diff --git a/tests/unit/sync_val.cpp b/tests/unit/sync_val.cpp index 1e31d0b1695..821d8560361 100644 --- a/tests/unit/sync_val.cpp +++ b/tests/unit/sync_val.cpp @@ -5676,7 +5676,7 @@ TEST_F(NegativeSyncVal, ExpandedMetaStage) { m_command_buffer.Begin(); m_command_buffer.Copy(buffer_a, buffer_b); vk::CmdPipelineBarrier2(m_command_buffer.handle(), &dep_info); - m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", "SYNC_ALL_COMMANDS_SHADER_READ"); + m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", "VK_ACCESS_2_SHADER_READ_BIT accesses on VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT stage"); m_command_buffer.Copy(buffer_a, buffer_b); m_errorMonitor->VerifyFound(); m_command_buffer.End(); @@ -5703,7 +5703,7 @@ TEST_F(NegativeSyncVal, ExpandedMetaStage2) { m_command_buffer.Begin(); m_command_buffer.Copy(buffer_a, buffer_b); vk::CmdPipelineBarrier2(m_command_buffer.handle(), &dep_info); - m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", "SYNC_ALL_COMMANDS_MEMORY_READ"); + m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", "VK_ACCESS_2_MEMORY_READ_BIT accesses on VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT stage"); m_command_buffer.Copy(buffer_a, buffer_b); m_errorMonitor->VerifyFound(); m_command_buffer.End(); @@ -5730,7 +5730,7 @@ TEST_F(NegativeSyncVal, ExpandedMetaStage3) { m_command_buffer.Begin(); m_command_buffer.Copy(buffer_a, buffer_b); vk::CmdPipelineBarrier2(m_command_buffer.handle(), &dep_info); - m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", "SYNC_ALL_COMMANDS_SHADER_WRITE"); + m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", "VK_ACCESS_2_SHADER_WRITE_BIT accesses on VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT stage"); m_command_buffer.Copy(buffer_a, buffer_b); m_errorMonitor->VerifyFound(); m_command_buffer.End(); diff --git a/tests/unit/sync_val_reporting.cpp b/tests/unit/sync_val_reporting.cpp index 05db010bc34..a65a031a9de 100644 --- a/tests/unit/sync_val_reporting.cpp +++ b/tests/unit/sync_val_reporting.cpp @@ -1,6 +1,6 @@ -/* Copyright (c) 2024 The Khronos Group Inc. - * Copyright (c) 2024 Valve Corporation - * Copyright (c) 2024 LunarG, Inc. +/* Copyright (c) 2025 The Khronos Group Inc. + * Copyright (c) 2025 Valve Corporation + * Copyright (c) 2025 LunarG, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -995,3 +995,245 @@ TEST_F(NegativeSyncValReporting, DebugLabelRegionsFromSecondaryCommandBuffers) { m_errorMonitor->VerifyFound(); m_default_queue->Wait(); } + +TEST_F(NegativeSyncValReporting, ReportAllTransferMetaStage) { + TEST_DESCRIPTION("Check that error message reports accesses on all transfer stages in compact form (uses meta stage)"); + SetTargetApiVersion(VK_API_VERSION_1_3); + AddRequiredFeature(vkt::Feature::synchronization2); + RETURN_IF_SKIP(InitSyncVal()); + + vkt::Buffer buffer_a(*m_device, 128, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + vkt::Buffer buffer_b(*m_device, 128, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT); + OneOffDescriptorSet descriptor_set(m_device, + { + {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, + {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, + }); + descriptor_set.WriteDescriptorBufferInfo(0, buffer_a, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.WriteDescriptorBufferInfo(1, buffer_b, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.UpdateDescriptorSets(); + const char* cs_source = R"glsl( + #version 450 + layout(set=0, binding=0) buffer buf_a { uint values_a[]; }; + layout(set=0, binding=1) buffer buf_b { uint values_b[]; }; + void main(){ + values_b[0] = values_a[0]; + } + )glsl"; + CreateComputePipelineHelper pipe(*this); + pipe.cs_ = std::make_unique(this, cs_source, VK_SHADER_STAGE_COMPUTE_BIT); + pipe.pipeline_layout_ = vkt::PipelineLayout(*m_device, {&descriptor_set.layout_}); + pipe.CreateComputePipeline(); + + VkBufferMemoryBarrier2 barrier = vku::InitStructHelper(); + barrier.srcStageMask = VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT; + barrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT; + barrier.dstStageMask = VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT; + barrier.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT; + barrier.buffer = buffer_b; + barrier.size = 128; + + VkDependencyInfo dep_info = vku::InitStructHelper(); + dep_info.bufferMemoryBarrierCount = 1; + dep_info.pBufferMemoryBarriers = &barrier; + + m_command_buffer.Begin(); + vk::CmdBindPipeline(m_command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle()); + vk::CmdBindDescriptorSets(m_command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_, 0, 1, &descriptor_set.set_, + 0, nullptr); + + m_command_buffer.Copy(buffer_a, buffer_b); + // This barrier makes copy accesses visible to the transfer stage but not to the compute stage + vk::CmdPipelineBarrier2(m_command_buffer, &dep_info); + + // Check that error reporting merged internal representation of transfer stage accesses into a compact form + m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", + "VK_ACCESS_2_TRANSFER_WRITE_BIT accesses on VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT stage"); + vk::CmdDispatch(m_command_buffer, 1, 1, 1); + m_errorMonitor->VerifyFound(); + m_command_buffer.End(); +} + +TEST_F(NegativeSyncValReporting, DoNotReportUnsupportedStage) { + TEST_DESCRIPTION("Check that unsupported stage does not add information to the error message"); + SetTargetApiVersion(VK_API_VERSION_1_3); + AddRequiredFeature(vkt::Feature::synchronization2); + RETURN_IF_SKIP(InitSyncVal()); + + vkt::Buffer buffer_a(*m_device, 128, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + vkt::Buffer buffer_b(*m_device, 128, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT); + OneOffDescriptorSet descriptor_set(m_device, + { + {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, + {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, + }); + descriptor_set.WriteDescriptorBufferInfo(0, buffer_a, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.WriteDescriptorBufferInfo(1, buffer_b, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.UpdateDescriptorSets(); + const char* cs_source = R"glsl( + #version 450 + layout(set=0, binding=0) buffer buf_a { uint values_a[]; }; + layout(set=0, binding=1) buffer buf_b { uint values_b[]; }; + void main(){ + values_b[0] = values_a[0]; + } + )glsl"; + CreateComputePipelineHelper pipe(*this); + pipe.cs_ = std::make_unique(this, cs_source, VK_SHADER_STAGE_COMPUTE_BIT); + pipe.pipeline_layout_ = vkt::PipelineLayout(*m_device, {&descriptor_set.layout_}); + pipe.CreateComputePipeline(); + + // TRANSFER READ and/or WRITE are the only accesses supported by COPY/RESOLVE/BLIT/CLEAR stages. + // ACCELERATION_STRUCTURE_COPY_BIT_KHR supports more accesses, but because ray tracing + // extension is not enabled we don't need to take them into account and can report result in a + // short "all accesses" form. + VkBufferMemoryBarrier2 barrier = vku::InitStructHelper(); + barrier.srcStageMask = VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT; + barrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT | VK_ACCESS_2_TRANSFER_READ_BIT; + barrier.dstStageMask = VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT; + barrier.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT | VK_ACCESS_2_TRANSFER_READ_BIT; + barrier.buffer = buffer_b; + barrier.size = 128; + + VkDependencyInfo dep_info = vku::InitStructHelper(); + dep_info.bufferMemoryBarrierCount = 1; + dep_info.pBufferMemoryBarriers = &barrier; + + m_command_buffer.Begin(); + vk::CmdBindPipeline(m_command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle()); + vk::CmdBindDescriptorSets(m_command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_, 0, 1, &descriptor_set.set_, + 0, nullptr); + + m_command_buffer.Copy(buffer_a, buffer_b); + // This barrier makes previous writes visible to COPY stage but not to the following COMPUTE stage + vk::CmdPipelineBarrier2(m_command_buffer, &dep_info); + + // If error reporting does not skip unsupported ACCELERATION_STRUCTURE_COPY_BIT_KHR then the following error message won't + // be able to use short form (TRANSFER_WRITE+TRANSFER_READ != "all accesses" in that case) + m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", + "all accesses on VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT stage"); + + vk::CmdDispatch(m_command_buffer, 1, 1, 1); + m_errorMonitor->VerifyFound(); + m_command_buffer.End(); +} + +TEST_F(NegativeSyncValReporting, ReportAccelerationStructureCopyAccesses) { + TEST_DESCRIPTION("Check that TRANSFER_READ+WRITE is not replaced with ALL accesses for ACCELERATION_STRUCTURE_COPY stage"); + SetTargetApiVersion(VK_API_VERSION_1_3); + AddRequiredExtensions(VK_KHR_RAY_TRACING_MAINTENANCE_1_EXTENSION_NAME); + AddRequiredFeature(vkt::Feature::synchronization2); + AddRequiredFeature(vkt::Feature::rayTracingMaintenance1); + RETURN_IF_SKIP(InitSyncVal()); + + vkt::Buffer buffer_a(*m_device, 128, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + vkt::Buffer buffer_b(*m_device, 128, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT); + OneOffDescriptorSet descriptor_set(m_device, + { + {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, + {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, + }); + descriptor_set.WriteDescriptorBufferInfo(0, buffer_a, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.WriteDescriptorBufferInfo(1, buffer_b, 0, VK_WHOLE_SIZE, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + descriptor_set.UpdateDescriptorSets(); + const char* cs_source = R"glsl( + #version 450 + layout(set=0, binding=0) buffer buf_a { uint values_a[]; }; + layout(set=0, binding=1) buffer buf_b { uint values_b[]; }; + void main(){ + values_b[0] = values_a[0]; + } + )glsl"; + CreateComputePipelineHelper pipe(*this); + pipe.cs_ = std::make_unique(this, cs_source, VK_SHADER_STAGE_COMPUTE_BIT); + pipe.pipeline_layout_ = vkt::PipelineLayout(*m_device, {&descriptor_set.layout_}); + pipe.CreateComputePipeline(); + + // Protect TRANSFER_READ+WRITE accesses + VkBufferMemoryBarrier2 barrier = vku::InitStructHelper(); + barrier.srcStageMask = VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT; + barrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT | VK_ACCESS_2_TRANSFER_READ_BIT; + barrier.dstStageMask = VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT; + barrier.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT | VK_ACCESS_2_TRANSFER_READ_BIT; + barrier.buffer = buffer_b; + barrier.size = 128; + + VkDependencyInfo dep_info = vku::InitStructHelper(); + dep_info.bufferMemoryBarrierCount = 1; + dep_info.pBufferMemoryBarriers = &barrier; + + m_command_buffer.Begin(); + vk::CmdBindPipeline(m_command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipe.Handle()); + vk::CmdBindDescriptorSets(m_command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_, 0, 1, &descriptor_set.set_, + 0, nullptr); + + m_command_buffer.Copy(buffer_a, buffer_b); + // This barrier makes previous writes visible to COPY stage but not to the following COMPUTE stage + vk::CmdPipelineBarrier2(m_command_buffer, &dep_info); + + // ACCELERATION_STRUCTURE_COPY_BIT_KHR supports more accesses than TRANSFER_READ+WRITE, + // so the latter combination can't be replaced with "all accesses" + m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", + "VK_ACCESS_2_TRANSFER_READ_BIT|VK_ACCESS_2_TRANSFER_WRITE_BIT"); + + vk::CmdDispatch(m_command_buffer, 1, 1, 1); + m_errorMonitor->VerifyFound(); + m_command_buffer.End(); +} + +TEST_F(NegativeSyncValReporting, DoNotUseShortcutForSimpleAccessMask) { + TEST_DESCRIPTION("Check that for access mask with at most 2 bits set we don't use ALL accesses shortcut"); + SetTargetApiVersion(VK_API_VERSION_1_3); + AddRequiredFeature(vkt::Feature::synchronization2); + AddRequiredFeature(vkt::Feature::dynamicRendering); + RETURN_IF_SKIP(InitSyncVal()); + + vkt::Image image(*m_device, 32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); + vkt::Buffer buffer(*m_device, 32 * 32 * 4, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + image.SetLayout(VK_IMAGE_LAYOUT_GENERAL); + vkt::ImageView image_view = image.CreateView(); + + VkRenderingAttachmentInfo attachment = vku::InitStructHelper(); + attachment.imageLayout = VK_IMAGE_LAYOUT_GENERAL; + attachment.imageView = image_view; + attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; + attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; + + VkRenderingInfo rendering_info = vku::InitStructHelper(); + rendering_info.renderArea = {{0, 0}, {32, 32}}; + rendering_info.layerCount = 1; + rendering_info.colorAttachmentCount = 1; + rendering_info.pColorAttachments = &attachment; + + VkBufferImageCopy region{}; + region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1}; + region.imageExtent = {32, 32, 1}; + + VkImageMemoryBarrier2 barrier = vku::InitStructHelper(); + barrier.srcStageMask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT; + barrier.srcAccessMask = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT; + barrier.dstStageMask = VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT; + barrier.dstAccessMask = VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT; + barrier.image = image; + barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; + + VkDependencyInfo dep_info = vku::InitStructHelper(); + dep_info.imageMemoryBarrierCount = 1; + dep_info.pImageMemoryBarriers = &barrier; + + m_command_buffer.Begin(); + // Generate accesses on COLOR_ATTACHMENT_OUTPUT stage that the barrier will connect with + m_command_buffer.BeginRendering(rendering_info); + m_command_buffer.EndRendering(); + + // This barrier does not protect writes on the transfer stage. The following copy generates WAW + vk::CmdPipelineBarrier2(m_command_buffer, &dep_info); + + // Test that access mask is printed directly and is not replaced with "all accesses" shortcut + m_errorMonitor->SetDesiredErrorRegex("SYNC-HAZARD-WRITE-AFTER-WRITE", + "VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT|VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT"); + vk::CmdCopyBufferToImage(m_command_buffer, buffer, image, VK_IMAGE_LAYOUT_GENERAL, 1, ®ion); + m_errorMonitor->VerifyFound(); + m_command_buffer.End(); +}