diff --git a/layers/drawdispatch/descriptor_validator.cpp b/layers/drawdispatch/descriptor_validator.cpp index 4dd9157667e..b7b4fd9c84e 100644 --- a/layers/drawdispatch/descriptor_validator.cpp +++ b/layers/drawdispatch/descriptor_validator.cpp @@ -1,6 +1,6 @@ -/* Copyright (c) 2023-2024 The Khronos Group Inc. - * Copyright (c) 2023-2024 Valve Corporation - * Copyright (c) 2023-2024 LunarG, Inc. +/* Copyright (c) 2023-2025 The Khronos Group Inc. + * Copyright (c) 2023-2025 Valve Corporation + * Copyright (c) 2023-2025 LunarG, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -323,12 +323,7 @@ bool DescriptorValidator::ValidateDescriptor(const spirv::ResourceInterfaceVaria string_SpvDim(dim), is_image_array); } - // Because you can have a runtime array with different types in it, without extensive GPU-AV tracking, we have no way to - // detect if the types match up in a given index - const bool is_descriptor_potentially_aliased = - resource_variable.array_length == spirv::kRuntimeArray || (is_gpu_av && resource_variable.array_length > 1); - if (!is_descriptor_potentially_aliased && - ((resource_variable.info.image_format_type & image_view_state->descriptor_format_bits) == 0)) { + if ((resource_variable.info.image_format_type & image_view_state->descriptor_format_bits) == 0) { const bool signed_override = ((resource_variable.info.image_format_type & spirv::NumericTypeUint) && resource_variable.info.is_sign_extended); const bool unsigned_override = diff --git a/tests/unit/gpu_av_descriptor_post_process.cpp b/tests/unit/gpu_av_descriptor_post_process.cpp index 24eee4409a1..dbfeabc621c 100644 --- a/tests/unit/gpu_av_descriptor_post_process.cpp +++ b/tests/unit/gpu_av_descriptor_post_process.cpp @@ -1,8 +1,8 @@ /* - * Copyright (c) 2020-2024 The Khronos Group Inc. - * Copyright (c) 2020-2024 Valve Corporation - * Copyright (c) 2020-2024 LunarG, Inc. - * Copyright (c) 2020-2023 Google, Inc. + * Copyright (c) 2020-2025 The Khronos Group Inc. + * Copyright (c) 2020-2025 Valve Corporation + * Copyright (c) 2020-2025 LunarG, Inc. + * Copyright (c) 2020-2025 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -1210,6 +1210,151 @@ TEST_F(NegativeGpuAVDescriptorPostProcess, MultipleCommandBuffersSameDescriptorS m_default_queue->Wait(); } +TEST_F(NegativeGpuAVDescriptorPostProcess, AliasImageBindingRuntimeArray) { + TEST_DESCRIPTION("https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/7677"); + SetTargetApiVersion(VK_API_VERSION_1_2); + AddRequiredExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); + AddRequiredFeature(vkt::Feature::runtimeDescriptorArray); + RETURN_IF_SKIP(InitGpuAvFramework()); + RETURN_IF_SKIP(InitState()); + + char const *csSource = R"glsl( + #version 460 + #extension GL_EXT_samplerless_texture_functions : require + #extension GL_EXT_nonuniform_qualifier : require + + // The 3 images in this binding are [float, uint, float] + layout(set = 0, binding = 0) uniform texture2D float_textures[]; + layout(set = 0, binding = 0) uniform utexture2D uint_textures[]; + layout(set = 0, binding = 1) buffer output_buffer { + uint index; // is just zero, but forces runtime array + vec4 data; + }; + + void main() { + const vec4 value = texelFetch(float_textures[index + 0], ivec2(0), 0); // good + const uint mask_a = texelFetch(uint_textures[index + 1], ivec2(0), 0).x; // good + const uint mask_b = texelFetch(uint_textures[index + 2], ivec2(0), 0).x; // bad + data = (mask_a + mask_b) > 0 ? value : vec4(0.0); + } + )glsl"; + + OneOffDescriptorSet descriptor_set(m_device, { + {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 3, VK_SHADER_STAGE_ALL, nullptr}, + {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, + }); + const vkt::PipelineLayout pipeline_layout(*m_device, {&descriptor_set.layout_}); + + CreateComputePipelineHelper pipe(*this); + pipe.cp_ci_.layout = pipeline_layout.handle(); + pipe.cs_ = std::make_unique(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); + pipe.CreateComputePipeline(); + + auto image_ci = vkt::Image::ImageCreateInfo2D(64, 64, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT); + vkt::Image float_image(*m_device, image_ci, vkt::set_layout); + vkt::ImageView float_image_view = float_image.CreateView(); + + image_ci.format = VK_FORMAT_R8G8B8A8_UINT; + vkt::Image uint_image(*m_device, image_ci, vkt::set_layout); + vkt::ImageView uint_image_view = uint_image.CreateView(); + + vkt::Buffer buffer(*m_device, 64, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, kHostVisibleMemProps); + uint32_t *buffer_ptr = (uint32_t *)buffer.Memory().Map(); + *buffer_ptr = 0; + buffer.Memory().Unmap(); + + descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 0); + descriptor_set.WriteDescriptorImageInfo(0, uint_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 1); + descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 2); + descriptor_set.WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.UpdateDescriptorSets(); + + m_command_buffer.Begin(); + vk::CmdBindDescriptorSets(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout, 0, 1, + &descriptor_set.set_, 0, nullptr); + vk::CmdBindPipeline(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle()); + vk::CmdDispatch(m_command_buffer.handle(), 1, 1, 1); + m_command_buffer.End(); + + m_errorMonitor->SetDesiredError("VUID-vkCmdDraw-format-07753"); + m_default_queue->Submit(m_command_buffer); + m_default_queue->Wait(); + m_errorMonitor->VerifyFound(); +} + +TEST_F(NegativeGpuAVDescriptorPostProcess, AliasImageBindingPartiallyBound) { + TEST_DESCRIPTION("https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/7677"); + SetTargetApiVersion(VK_API_VERSION_1_2); + AddRequiredExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); + AddRequiredFeature(vkt::Feature::descriptorBindingPartiallyBound); + RETURN_IF_SKIP(InitGpuAvFramework()); + RETURN_IF_SKIP(InitState()); + + char const *csSource = R"glsl( + #version 460 + #extension GL_EXT_samplerless_texture_functions : require + + // The 3 images in this binding are [float, uint, float] + layout(set = 0, binding = 0) uniform texture2D float_textures[3]; + layout(set = 0, binding = 0) uniform utexture2D uint_textures[3]; + layout(set = 0, binding = 1) buffer output_buffer { vec4 data; }; + + void main() { + const vec4 value = texelFetch(float_textures[0], ivec2(0), 0); // good + const uint mask_a = texelFetch(uint_textures[1], ivec2(0), 0).x; // good + const uint mask_b = texelFetch(uint_textures[2], ivec2(0), 0).x; // bad + data = (mask_a + mask_b) > 0 ? value : vec4(0.0); + } + )glsl"; + + OneOffDescriptorIndexingSet descriptor_set( + m_device, + { + {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 3, VK_SHADER_STAGE_ALL, nullptr, VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT}, + {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr, 0}, + }); + const vkt::PipelineLayout pipeline_layout(*m_device, {&descriptor_set.layout_}); + + CreateComputePipelineHelper pipe(*this); + pipe.cp_ci_.layout = pipeline_layout.handle(); + pipe.cs_ = std::make_unique(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); + pipe.CreateComputePipeline(); + + auto image_ci = vkt::Image::ImageCreateInfo2D(64, 64, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT); + vkt::Image float_image(*m_device, image_ci, vkt::set_layout); + vkt::ImageView float_image_view = float_image.CreateView(); + + image_ci.format = VK_FORMAT_R8G8B8A8_UINT; + vkt::Image uint_image(*m_device, image_ci, vkt::set_layout); + vkt::ImageView uint_image_view = uint_image.CreateView(); + + vkt::Buffer buffer(*m_device, 64, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT); + + descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 0); + descriptor_set.WriteDescriptorImageInfo(0, uint_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 1); + descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 2); + descriptor_set.WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.UpdateDescriptorSets(); + + m_command_buffer.Begin(); + vk::CmdBindDescriptorSets(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout, 0, 1, + &descriptor_set.set_, 0, nullptr); + vk::CmdBindPipeline(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle()); + vk::CmdDispatch(m_command_buffer.handle(), 1, 1, 1); + m_command_buffer.End(); + + m_errorMonitor->SetDesiredError("VUID-vkCmdDraw-format-07753"); + m_default_queue->Submit(m_command_buffer); + m_default_queue->Wait(); + m_errorMonitor->VerifyFound(); +} + TEST_F(NegativeGpuAVDescriptorPostProcess, DescriptorIndexingSlang) { SetTargetApiVersion(VK_API_VERSION_1_2); AddRequiredExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); diff --git a/tests/unit/gpu_av_descriptor_post_process_positive.cpp b/tests/unit/gpu_av_descriptor_post_process_positive.cpp index ca73e2cffed..0aa3bf0e17f 100644 --- a/tests/unit/gpu_av_descriptor_post_process_positive.cpp +++ b/tests/unit/gpu_av_descriptor_post_process_positive.cpp @@ -1,6 +1,6 @@ -/* Copyright (c) 2023-2024 The Khronos Group Inc. - * Copyright (c) 2023-2024 Valve Corporation - * Copyright (c) 2023-2024 LunarG, Inc. +/* Copyright (c) 2023-2025 The Khronos Group Inc. + * Copyright (c) 2023-2025 Valve Corporation + * Copyright (c) 2023-2025 LunarG, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/tests/unit/shader_image_access.cpp b/tests/unit/shader_image_access.cpp index d489d3f0c59..877dd50ee1e 100644 --- a/tests/unit/shader_image_access.cpp +++ b/tests/unit/shader_image_access.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2023-2024 LunarG, Inc. - * Copyright (c) 2023-2024 Valve Corporation + * Copyright (c) 2023-2025 LunarG, Inc. + * Copyright (c) 2023-2025 Valve Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -519,20 +519,20 @@ TEST_F(NegativeShaderImageAccess, AliasImageBinding) { #version 460 #extension GL_EXT_samplerless_texture_functions : require - layout(set = 0, binding = 0) uniform texture2D float_textures[3]; - layout(set = 0, binding = 0) uniform utexture2D uint_textures[3]; + // This should be an GLSL error, if both of these are accessed (without PARTIALLY_BOUND), you need to satisfy both, which is impossible + layout(set = 0, binding = 0) uniform texture2D float_textures[2]; + layout(set = 0, binding = 0) uniform utexture2D uint_textures[2]; layout(set = 0, binding = 1) buffer output_buffer { vec4 data; }; // avoid optimization void main() { - const vec4 value = texelFetch(float_textures[0], ivec2(0), 0); // good - const uint mask_a = texelFetch(uint_textures[1], ivec2(0), 0).x; // good - const uint mask_b = texelFetch(uint_textures[2], ivec2(0), 0).x; // bad - data = (mask_a + mask_b) > 0 ? value : vec4(0.0); + const vec4 value = texelFetch(float_textures[0], ivec2(0), 0); + const uint mask_a = texelFetch(uint_textures[1], ivec2(0), 0).x; + data = mask_a > 0 ? value : vec4(0.0); } )glsl"; CreateComputePipelineHelper pipe(*this); - pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 3, VK_SHADER_STAGE_ALL, nullptr}, + pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 2, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; pipe.cs_ = std::make_unique(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); pipe.CreateComputePipeline(); @@ -551,8 +551,6 @@ TEST_F(NegativeShaderImageAccess, AliasImageBinding) { VK_IMAGE_LAYOUT_GENERAL, 0); pipe.descriptor_set_->WriteDescriptorImageInfo(0, uint_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_IMAGE_LAYOUT_GENERAL, 1); - pipe.descriptor_set_->WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, - VK_IMAGE_LAYOUT_GENERAL, 2); pipe.descriptor_set_->WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); pipe.descriptor_set_->UpdateDescriptorSets(); diff --git a/tests/unit/shader_image_access_positive.cpp b/tests/unit/shader_image_access_positive.cpp index 60da5a1a5f0..c7be7159b6c 100644 --- a/tests/unit/shader_image_access_positive.cpp +++ b/tests/unit/shader_image_access_positive.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2023-2024 LunarG, Inc. - * Copyright (c) 2023-2024 Valve Corporation + * Copyright (c) 2023-2025 LunarG, Inc. + * Copyright (c) 2023-2025 Valve Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -597,4 +597,50 @@ TEST_F(PositiveShaderImageAccess, FunctionDescriptorIndexing) { vk::CmdDraw(m_command_buffer.handle(), 3, 1, 0, 0); m_command_buffer.EndRenderPass(); m_command_buffer.End(); -} \ No newline at end of file +} + +TEST_F(PositiveShaderImageAccess, AliasImageBinding) { + TEST_DESCRIPTION("https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/7677"); + RETURN_IF_SKIP(Init()); + + char const *csSource = R"glsl( + #version 460 + #extension GL_EXT_samplerless_texture_functions : require + + // Because the UINT is never accessed, it will be ignored + layout(set = 0, binding = 0) uniform texture2D float_textures[2]; + layout(set = 0, binding = 0) uniform utexture2D uint_textures[2]; + layout(set = 0, binding = 1) buffer output_buffer { vec4 data; }; // avoid optimization + + void main() { + const vec4 value_0 = texelFetch(float_textures[0], ivec2(0), 0); + const vec4 value_1 = texelFetch(float_textures[1], ivec2(0), 0); + data = value_0 + value_1; + } + )glsl"; + + CreateComputePipelineHelper pipe(*this); + pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 2, VK_SHADER_STAGE_ALL, nullptr}, + {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; + pipe.cs_ = std::make_unique(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); + pipe.CreateComputePipeline(); + + auto image_ci = vkt::Image::ImageCreateInfo2D(64, 64, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT); + vkt::Image float_image(*m_device, image_ci, vkt::set_layout); + vkt::ImageView float_image_view = float_image.CreateView(); + vkt::Buffer buffer(*m_device, 64, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT); + + pipe.descriptor_set_->WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 0); + pipe.descriptor_set_->WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + VK_IMAGE_LAYOUT_GENERAL, 1); + pipe.descriptor_set_->WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + pipe.descriptor_set_->UpdateDescriptorSets(); + + m_command_buffer.Begin(); + vk::CmdBindDescriptorSets(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_.handle(), 0, 1, + &pipe.descriptor_set_->set_, 0, nullptr); + vk::CmdBindPipeline(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle()); + vk::CmdDispatch(m_command_buffer.handle(), 1, 1, 1); + m_command_buffer.End(); +}