Skip to content

Commit

Permalink
gpuav: Remove legacy Alias check
Browse files Browse the repository at this point in the history
  • Loading branch information
spencer-lunarg committed Jan 17, 2025
1 parent d209920 commit f28a508
Show file tree
Hide file tree
Showing 5 changed files with 214 additions and 30 deletions.
13 changes: 4 additions & 9 deletions layers/drawdispatch/descriptor_validator.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/* Copyright (c) 2023-2024 The Khronos Group Inc.
* Copyright (c) 2023-2024 Valve Corporation
* Copyright (c) 2023-2024 LunarG, Inc.
/* Copyright (c) 2023-2025 The Khronos Group Inc.
* Copyright (c) 2023-2025 Valve Corporation
* Copyright (c) 2023-2025 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -323,12 +323,7 @@ bool DescriptorValidator::ValidateDescriptor(const spirv::ResourceInterfaceVaria
string_SpvDim(dim), is_image_array);
}

// Because you can have a runtime array with different types in it, without extensive GPU-AV tracking, we have no way to
// detect if the types match up in a given index
const bool is_descriptor_potentially_aliased =
resource_variable.array_length == spirv::kRuntimeArray || (is_gpu_av && resource_variable.array_length > 1);
if (!is_descriptor_potentially_aliased &&
((resource_variable.info.image_format_type & image_view_state->descriptor_format_bits) == 0)) {
if ((resource_variable.info.image_format_type & image_view_state->descriptor_format_bits) == 0) {
const bool signed_override =
((resource_variable.info.image_format_type & spirv::NumericTypeUint) && resource_variable.info.is_sign_extended);
const bool unsigned_override =
Expand Down
153 changes: 149 additions & 4 deletions tests/unit/gpu_av_descriptor_post_process.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2020-2024 The Khronos Group Inc.
* Copyright (c) 2020-2024 Valve Corporation
* Copyright (c) 2020-2024 LunarG, Inc.
* Copyright (c) 2020-2023 Google, Inc.
* Copyright (c) 2020-2025 The Khronos Group Inc.
* Copyright (c) 2020-2025 Valve Corporation
* Copyright (c) 2020-2025 LunarG, Inc.
* Copyright (c) 2020-2025 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -1210,6 +1210,151 @@ TEST_F(NegativeGpuAVDescriptorPostProcess, MultipleCommandBuffersSameDescriptorS
m_default_queue->Wait();
}

TEST_F(NegativeGpuAVDescriptorPostProcess, AliasImageBindingRuntimeArray) {
TEST_DESCRIPTION("https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/7677");
SetTargetApiVersion(VK_API_VERSION_1_2);
AddRequiredExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::runtimeDescriptorArray);
RETURN_IF_SKIP(InitGpuAvFramework());
RETURN_IF_SKIP(InitState());

char const *csSource = R"glsl(
#version 460
#extension GL_EXT_samplerless_texture_functions : require
#extension GL_EXT_nonuniform_qualifier : require
// The 3 images in this binding are [float, uint, float]
layout(set = 0, binding = 0) uniform texture2D float_textures[];
layout(set = 0, binding = 0) uniform utexture2D uint_textures[];
layout(set = 0, binding = 1) buffer output_buffer {
uint index; // is just zero, but forces runtime array
vec4 data;
};
void main() {
const vec4 value = texelFetch(float_textures[index + 0], ivec2(0), 0); // good
const uint mask_a = texelFetch(uint_textures[index + 1], ivec2(0), 0).x; // good
const uint mask_b = texelFetch(uint_textures[index + 2], ivec2(0), 0).x; // bad
data = (mask_a + mask_b) > 0 ? value : vec4(0.0);
}
)glsl";

OneOffDescriptorSet descriptor_set(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 3, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
const vkt::PipelineLayout pipeline_layout(*m_device, {&descriptor_set.layout_});

CreateComputePipelineHelper pipe(*this);
pipe.cp_ci_.layout = pipeline_layout.handle();
pipe.cs_ = std::make_unique<VkShaderObj>(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT);
pipe.CreateComputePipeline();

auto image_ci = vkt::Image::ImageCreateInfo2D(64, 64, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT);
vkt::Image float_image(*m_device, image_ci, vkt::set_layout);
vkt::ImageView float_image_view = float_image.CreateView();

image_ci.format = VK_FORMAT_R8G8B8A8_UINT;
vkt::Image uint_image(*m_device, image_ci, vkt::set_layout);
vkt::ImageView uint_image_view = uint_image.CreateView();

vkt::Buffer buffer(*m_device, 64, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, kHostVisibleMemProps);
uint32_t *buffer_ptr = (uint32_t *)buffer.Memory().Map();
*buffer_ptr = 0;
buffer.Memory().Unmap();

descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 0);
descriptor_set.WriteDescriptorImageInfo(0, uint_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 1);
descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 2);
descriptor_set.WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
descriptor_set.UpdateDescriptorSets();

m_command_buffer.Begin();
vk::CmdBindDescriptorSets(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout, 0, 1,
&descriptor_set.set_, 0, nullptr);
vk::CmdBindPipeline(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle());
vk::CmdDispatch(m_command_buffer.handle(), 1, 1, 1);
m_command_buffer.End();

m_errorMonitor->SetDesiredError("VUID-vkCmdDraw-format-07753");
m_default_queue->Submit(m_command_buffer);
m_default_queue->Wait();
m_errorMonitor->VerifyFound();
}

TEST_F(NegativeGpuAVDescriptorPostProcess, AliasImageBindingPartiallyBound) {
TEST_DESCRIPTION("https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/7677");
SetTargetApiVersion(VK_API_VERSION_1_2);
AddRequiredExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::descriptorBindingPartiallyBound);
RETURN_IF_SKIP(InitGpuAvFramework());
RETURN_IF_SKIP(InitState());

char const *csSource = R"glsl(
#version 460
#extension GL_EXT_samplerless_texture_functions : require
// The 3 images in this binding are [float, uint, float]
layout(set = 0, binding = 0) uniform texture2D float_textures[3];
layout(set = 0, binding = 0) uniform utexture2D uint_textures[3];
layout(set = 0, binding = 1) buffer output_buffer { vec4 data; };
void main() {
const vec4 value = texelFetch(float_textures[0], ivec2(0), 0); // good
const uint mask_a = texelFetch(uint_textures[1], ivec2(0), 0).x; // good
const uint mask_b = texelFetch(uint_textures[2], ivec2(0), 0).x; // bad
data = (mask_a + mask_b) > 0 ? value : vec4(0.0);
}
)glsl";

OneOffDescriptorIndexingSet descriptor_set(
m_device,
{
{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 3, VK_SHADER_STAGE_ALL, nullptr, VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr, 0},
});
const vkt::PipelineLayout pipeline_layout(*m_device, {&descriptor_set.layout_});

CreateComputePipelineHelper pipe(*this);
pipe.cp_ci_.layout = pipeline_layout.handle();
pipe.cs_ = std::make_unique<VkShaderObj>(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT);
pipe.CreateComputePipeline();

auto image_ci = vkt::Image::ImageCreateInfo2D(64, 64, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT);
vkt::Image float_image(*m_device, image_ci, vkt::set_layout);
vkt::ImageView float_image_view = float_image.CreateView();

image_ci.format = VK_FORMAT_R8G8B8A8_UINT;
vkt::Image uint_image(*m_device, image_ci, vkt::set_layout);
vkt::ImageView uint_image_view = uint_image.CreateView();

vkt::Buffer buffer(*m_device, 64, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);

descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 0);
descriptor_set.WriteDescriptorImageInfo(0, uint_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 1);
descriptor_set.WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 2);
descriptor_set.WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
descriptor_set.UpdateDescriptorSets();

m_command_buffer.Begin();
vk::CmdBindDescriptorSets(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout, 0, 1,
&descriptor_set.set_, 0, nullptr);
vk::CmdBindPipeline(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle());
vk::CmdDispatch(m_command_buffer.handle(), 1, 1, 1);
m_command_buffer.End();

m_errorMonitor->SetDesiredError("VUID-vkCmdDraw-format-07753");
m_default_queue->Submit(m_command_buffer);
m_default_queue->Wait();
m_errorMonitor->VerifyFound();
}

TEST_F(NegativeGpuAVDescriptorPostProcess, DescriptorIndexingSlang) {
SetTargetApiVersion(VK_API_VERSION_1_2);
AddRequiredExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
Expand Down
6 changes: 3 additions & 3 deletions tests/unit/gpu_av_descriptor_post_process_positive.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/* Copyright (c) 2023-2024 The Khronos Group Inc.
* Copyright (c) 2023-2024 Valve Corporation
* Copyright (c) 2023-2024 LunarG, Inc.
/* Copyright (c) 2023-2025 The Khronos Group Inc.
* Copyright (c) 2023-2025 Valve Corporation
* Copyright (c) 2023-2025 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
20 changes: 9 additions & 11 deletions tests/unit/shader_image_access.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2023-2024 LunarG, Inc.
* Copyright (c) 2023-2024 Valve Corporation
* Copyright (c) 2023-2025 LunarG, Inc.
* Copyright (c) 2023-2025 Valve Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -519,20 +519,20 @@ TEST_F(NegativeShaderImageAccess, AliasImageBinding) {
#version 460
#extension GL_EXT_samplerless_texture_functions : require
layout(set = 0, binding = 0) uniform texture2D float_textures[3];
layout(set = 0, binding = 0) uniform utexture2D uint_textures[3];
// This should be an GLSL error, if both of these are accessed (without PARTIALLY_BOUND), you need to satisfy both, which is impossible
layout(set = 0, binding = 0) uniform texture2D float_textures[2];
layout(set = 0, binding = 0) uniform utexture2D uint_textures[2];
layout(set = 0, binding = 1) buffer output_buffer { vec4 data; }; // avoid optimization
void main() {
const vec4 value = texelFetch(float_textures[0], ivec2(0), 0); // good
const uint mask_a = texelFetch(uint_textures[1], ivec2(0), 0).x; // good
const uint mask_b = texelFetch(uint_textures[2], ivec2(0), 0).x; // bad
data = (mask_a + mask_b) > 0 ? value : vec4(0.0);
const vec4 value = texelFetch(float_textures[0], ivec2(0), 0);
const uint mask_a = texelFetch(uint_textures[1], ivec2(0), 0).x;
data = mask_a > 0 ? value : vec4(0.0);
}
)glsl";

CreateComputePipelineHelper pipe(*this);
pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 3, VK_SHADER_STAGE_ALL, nullptr},
pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 2, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}};
pipe.cs_ = std::make_unique<VkShaderObj>(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT);
pipe.CreateComputePipeline();
Expand All @@ -551,8 +551,6 @@ TEST_F(NegativeShaderImageAccess, AliasImageBinding) {
VK_IMAGE_LAYOUT_GENERAL, 0);
pipe.descriptor_set_->WriteDescriptorImageInfo(0, uint_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 1);
pipe.descriptor_set_->WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 2);
pipe.descriptor_set_->WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
pipe.descriptor_set_->UpdateDescriptorSets();

Expand Down
52 changes: 49 additions & 3 deletions tests/unit/shader_image_access_positive.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2023-2024 LunarG, Inc.
* Copyright (c) 2023-2024 Valve Corporation
* Copyright (c) 2023-2025 LunarG, Inc.
* Copyright (c) 2023-2025 Valve Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -597,4 +597,50 @@ TEST_F(PositiveShaderImageAccess, FunctionDescriptorIndexing) {
vk::CmdDraw(m_command_buffer.handle(), 3, 1, 0, 0);
m_command_buffer.EndRenderPass();
m_command_buffer.End();
}
}

TEST_F(PositiveShaderImageAccess, AliasImageBinding) {
TEST_DESCRIPTION("https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/7677");
RETURN_IF_SKIP(Init());

char const *csSource = R"glsl(
#version 460
#extension GL_EXT_samplerless_texture_functions : require
// Because the UINT is never accessed, it will be ignored
layout(set = 0, binding = 0) uniform texture2D float_textures[2];
layout(set = 0, binding = 0) uniform utexture2D uint_textures[2];
layout(set = 0, binding = 1) buffer output_buffer { vec4 data; }; // avoid optimization
void main() {
const vec4 value_0 = texelFetch(float_textures[0], ivec2(0), 0);
const vec4 value_1 = texelFetch(float_textures[1], ivec2(0), 0);
data = value_0 + value_1;
}
)glsl";

CreateComputePipelineHelper pipe(*this);
pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 2, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}};
pipe.cs_ = std::make_unique<VkShaderObj>(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT);
pipe.CreateComputePipeline();

auto image_ci = vkt::Image::ImageCreateInfo2D(64, 64, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT);
vkt::Image float_image(*m_device, image_ci, vkt::set_layout);
vkt::ImageView float_image_view = float_image.CreateView();
vkt::Buffer buffer(*m_device, 64, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);

pipe.descriptor_set_->WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 0);
pipe.descriptor_set_->WriteDescriptorImageInfo(0, float_image_view, VK_NULL_HANDLE, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
VK_IMAGE_LAYOUT_GENERAL, 1);
pipe.descriptor_set_->WriteDescriptorBufferInfo(1, buffer, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
pipe.descriptor_set_->UpdateDescriptorSets();

m_command_buffer.Begin();
vk::CmdBindDescriptorSets(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_.handle(), 0, 1,
&pipe.descriptor_set_->set_, 0, nullptr);
vk::CmdBindPipeline(m_command_buffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle());
vk::CmdDispatch(m_command_buffer.handle(), 1, 1, 1);
m_command_buffer.End();
}

0 comments on commit f28a508

Please sign in to comment.