diff --git a/docs/fine_grained_locking.md b/docs/fine_grained_locking.md index 4c2d95ae5e3..0813fa749ce 100644 --- a/docs/fine_grained_locking.md +++ b/docs/fine_grained_locking.md @@ -1,4 +1,4 @@ - + [![Khronos Vulkan][1]][2] [1]: https://vulkan.lunarg.com/img/Vulkan_100px_Dec16.png "https://www.khronos.org/vulkan/" @@ -541,12 +541,12 @@ The `p_driver_data` pointer is only used by Best Practices validation, but it is ``` - struct MEM_BINDING { - std::shared_ptr mem_state; + struct MemoryBinding { + std::shared_ptr memory_state; VkDeviceSize offset; VkDeviceSize size; }; - using BoundMemoryMap = small_unordered_map; + using BoundMemoryMap = small_unordered_map; BoundMemoryMap bound_memory_; ``` diff --git a/layers/best_practices/bp_device_memory.cpp b/layers/best_practices/bp_device_memory.cpp index 9ca910de99b..bf6b135eb32 100644 --- a/layers/best_practices/bp_device_memory.cpp +++ b/layers/best_practices/bp_device_memory.cpp @@ -158,17 +158,17 @@ bool BestPractices::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory me bool BestPractices::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory memory, const Location& loc) const { bool skip = false; auto buffer_state = Get(buffer); - auto mem_state = Get(memory); - ASSERT_AND_RETURN_SKIP(mem_state && buffer_state); + auto memory_state = Get(memory); + ASSERT_AND_RETURN_SKIP(memory_state && buffer_state); - if (mem_state->allocate_info.allocationSize == buffer_state->create_info.size && - mem_state->allocate_info.allocationSize < kMinDedicatedAllocationSize) { + if (memory_state->allocate_info.allocationSize == buffer_state->create_info.size && + memory_state->allocate_info.allocationSize < kMinDedicatedAllocationSize) { skip |= LogPerformanceWarning("BestPractices-vkBindBufferMemory-small-dedicated-allocation", device, loc, "Trying to bind %s to a memory block which is fully consumed by the buffer. " "The required size of the allocation is %" PRIu64 ", but smaller buffers like this should be sub-allocated from " "larger memory blocks. (Current threshold is %" PRIu64 " bytes.)", - FormatHandle(buffer).c_str(), mem_state->allocate_info.allocationSize, + FormatHandle(buffer).c_str(), memory_state->allocate_info.allocationSize, kMinDedicatedAllocationSize); } @@ -206,17 +206,17 @@ bool BestPractices::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_ bool BestPractices::ValidateBindImageMemory(VkImage image, VkDeviceMemory memory, const Location& loc) const { bool skip = false; auto image_state = Get(image); - auto mem_state = Get(memory); - ASSERT_AND_RETURN_SKIP(mem_state && image_state); + auto memory_state = Get(memory); + ASSERT_AND_RETURN_SKIP(memory_state && image_state); - if (mem_state->allocate_info.allocationSize == image_state->requirements[0].size && - mem_state->allocate_info.allocationSize < kMinDedicatedAllocationSize) { + if (memory_state->allocate_info.allocationSize == image_state->requirements[0].size && + memory_state->allocate_info.allocationSize < kMinDedicatedAllocationSize) { skip |= LogPerformanceWarning("BestPractices-vkBindImageMemory-small-dedicated-allocation", device, loc, "Trying to bind %s to a memory block which is fully consumed by the image. " "The required size of the allocation is %" PRIu64 ", but smaller images like this should be sub-allocated from " "larger memory blocks. (Current threshold is %" PRIu64 " bytes.)", - FormatHandle(image).c_str(), mem_state->allocate_info.allocationSize, + FormatHandle(image).c_str(), memory_state->allocate_info.allocationSize, kMinDedicatedAllocationSize); } @@ -238,7 +238,7 @@ bool BestPractices::ValidateBindImageMemory(VkImage image, VkDeviceMemory memory } } - uint32_t allocated_properties = phys_dev_mem_props.memoryTypes[mem_state->allocate_info.memoryTypeIndex].propertyFlags; + uint32_t allocated_properties = phys_dev_mem_props.memoryTypes[memory_state->allocate_info.memoryTypeIndex].propertyFlags; if (supports_lazy && (allocated_properties & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) { skip |= LogPerformanceWarning( @@ -246,7 +246,7 @@ bool BestPractices::ValidateBindImageMemory(VkImage image, VkDeviceMemory memory "ttempting to bind memory type %u to VkImage which was created with TRANSIENT_ATTACHMENT_BIT," "but this memory type is not LAZILY_ALLOCATED_BIT. You should use memory type %u here instead to save " "%" PRIu64 " bytes of physical memory.", - mem_state->allocate_info.memoryTypeIndex, suggested_type, image_state->requirements[0].size); + memory_state->allocate_info.memoryTypeIndex, suggested_type, image_state->requirements[0].size); } } diff --git a/layers/best_practices/bp_drawdispatch.cpp b/layers/best_practices/bp_drawdispatch.cpp index d7267d6d621..4219484bdef 100644 --- a/layers/best_practices/bp_drawdispatch.cpp +++ b/layers/best_practices/bp_drawdispatch.cpp @@ -221,10 +221,10 @@ bool BestPractices::ValidateIndexBufferArm(const bp_state::CommandBuffer& cb_sta } const VkIndexType ib_type = cb_state.index_buffer_binding.index_type; - const auto ib_mem_state = ib_state->MemState(); - if (!ib_mem_state) return skip; + const auto ib_memory_state = ib_state->MemoryState(); + if (!ib_memory_state) return skip; - const void* ib_mem = ib_mem_state->p_driver_data; + const void* ib_mem = ib_memory_state->p_driver_data; const auto& last_bound_state = cb_state.lastBound[ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS)]; const bool primitive_restart_enable = last_bound_state.IsPrimitiveRestartEnable(); @@ -233,7 +233,7 @@ bool BestPractices::ValidateIndexBufferArm(const bp_state::CommandBuffer& cb_sta if (ib_mem) { const uint32_t scan_stride = GetIndexAlignment(ib_type); // Check if all indices are within the memory allocation size, if robustness is enabled they might not be - if ((firstIndex + indexCount) * scan_stride > ib_mem_state->allocate_info.allocationSize) { + if ((firstIndex + indexCount) * scan_stride > ib_memory_state->allocate_info.allocationSize) { return skip; } const uint8_t* scan_begin = static_cast(ib_mem) + firstIndex * scan_stride; diff --git a/layers/core_checks/cc_buffer_address.h b/layers/core_checks/cc_buffer_address.h index c2112503637..ec8fe6deb0b 100644 --- a/layers/core_checks/cc_buffer_address.h +++ b/layers/core_checks/cc_buffer_address.h @@ -1,6 +1,6 @@ -/* Copyright (c) 2015-2024 The Khronos Group Inc. - * Copyright (c) 2015-2024 Valve Corporation - * Copyright (c) 2015-2024 LunarG, Inc. +/* Copyright (c) 2015-2025 The Khronos Group Inc. + * Copyright (c) 2015-2025 Valve Corporation + * Copyright (c) 2015-2025 LunarG, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -113,9 +113,10 @@ class BufferAddressValidation { std::string* out_error_msg) { if (!buffer_state->sparse && !buffer_state->IsMemoryBound()) { if (out_error_msg) { - if (const auto mem_state = buffer_state->MemState(); mem_state && mem_state->Destroyed()) { + const auto memory_state = buffer_state->MemoryState(); + if (memory_state && memory_state->Destroyed()) { *out_error_msg += - "buffer is bound to memory (" + validator.FormatHandle(mem_state->Handle()) + ") but it has been freed"; + "buffer is bound to memory (" + validator.FormatHandle(memory_state->Handle()) + ") but it has been freed"; } else { *out_error_msg += "buffer has not been bound to memory"; } diff --git a/layers/core_checks/cc_descriptor.cpp b/layers/core_checks/cc_descriptor.cpp index d1b7a884fb7..0aff4c91f16 100644 --- a/layers/core_checks/cc_descriptor.cpp +++ b/layers/core_checks/cc_descriptor.cpp @@ -2188,7 +2188,7 @@ bool CoreChecks::VerifyWriteUpdateContents(const vvl::DescriptorSet &dst_set, co // nullDescriptor feature allows this to be VK_NULL_HANDLE if (auto as_state = Get(as)) { skip |= VerifyBoundMemoryIsValid( - as_state->MemState(), LogObjectList(as), as_state->Handle(), + as_state->MemoryState(), LogObjectList(as), as_state->Handle(), write_loc.pNext(Struct::VkWriteDescriptorSetAccelerationStructureNV, Field::pAccelerationStructures, di), kVUIDUndefined); } diff --git a/layers/core_checks/cc_device_memory.cpp b/layers/core_checks/cc_device_memory.cpp index 0caeb8c5275..5ae5d872f12 100644 --- a/layers/core_checks/cc_device_memory.cpp +++ b/layers/core_checks/cc_device_memory.cpp @@ -28,15 +28,15 @@ #include "state_tracker/ray_tracing_state.h" // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value. -bool CoreChecks::VerifyBoundMemoryIsValid(const vvl::DeviceMemory *mem_state, const LogObjectList &objlist, +bool CoreChecks::VerifyBoundMemoryIsValid(const vvl::DeviceMemory *memory_state, const LogObjectList &objlist, const VulkanTypedHandle &typed_handle, const Location &loc, const char *vuid) const { bool skip = false; - if (!mem_state) { + if (!memory_state) { const char *type_name = string_VulkanObjectType(typed_handle.type); skip |= LogError(vuid, objlist, loc, "(%s) is used with no memory bound. Memory should be bound by calling vkBind%sMemory().", FormatHandle(typed_handle).c_str(), type_name + 2); - } else if (mem_state->Destroyed()) { + } else if (memory_state->Destroyed()) { skip |= LogError(vuid, objlist, loc, "(%s) is used, but bound memory was freed. Memory must not be freed prior to this operation.", FormatHandle(typed_handle).c_str()); @@ -44,12 +44,12 @@ bool CoreChecks::VerifyBoundMemoryIsValid(const vvl::DeviceMemory *mem_state, co return skip; } -bool CoreChecks::VerifyBoundMemoryIsDeviceVisible(const vvl::DeviceMemory *mem_state, const LogObjectList &objlist, +bool CoreChecks::VerifyBoundMemoryIsDeviceVisible(const vvl::DeviceMemory *memory_state, const LogObjectList &objlist, const VulkanTypedHandle &typed_handle, const Location &loc, const char *vuid) const { bool result = false; - if (mem_state) { - if ((phys_dev_mem_props.memoryTypes[mem_state->allocate_info.memoryTypeIndex].propertyFlags & + if (memory_state) { + if ((phys_dev_mem_props.memoryTypes[memory_state->allocate_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == 0) { result |= LogError(vuid, objlist, loc, "(%s) used with memory that is not device visible.", FormatHandle(typed_handle).c_str()); @@ -248,11 +248,10 @@ bool CoreChecks::ValidateAccelStructBufferMemoryIsHostVisible(const vvl::Acceler bool result = false; result |= ValidateMemoryIsBoundToBuffer(device, *accel_struct.buffer_state, buffer_loc, vuid); if (!result) { - const auto mem_state = accel_struct.buffer_state->MemState(); - if (mem_state) { - if ((phys_dev_mem_props.memoryTypes[mem_state->allocate_info.memoryTypeIndex].propertyFlags & + if (const auto memory_state = accel_struct.buffer_state->MemoryState()) { + if ((phys_dev_mem_props.memoryTypes[memory_state->allocate_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { - const LogObjectList objlist(accel_struct.Handle(), accel_struct.buffer_state->Handle(), mem_state->Handle()); + const LogObjectList objlist(accel_struct.Handle(), accel_struct.buffer_state->Handle(), memory_state->Handle()); result |= LogError(vuid, objlist, buffer_loc, "has been created with a buffer whose bound memory is not host visible."); } @@ -264,12 +263,12 @@ bool CoreChecks::ValidateAccelStructBufferMemoryIsHostVisible(const vvl::Acceler bool CoreChecks::ValidateAccelStructBufferMemoryIsNotMultiInstance(const vvl::AccelerationStructureKHR &accel_struct, const Location &accel_struct_loc, const char *vuid) const { bool skip = false; - if (const vvl::DeviceMemory *buffer_mem = accel_struct.buffer_state->MemState()) { - if (buffer_mem->multi_instance) { - const LogObjectList objlist(accel_struct.Handle(), accel_struct.buffer_state->Handle(), buffer_mem->Handle()); + if (const vvl::DeviceMemory *memory_state = accel_struct.buffer_state->MemoryState()) { + if (memory_state->multi_instance) { + const LogObjectList objlist(accel_struct.Handle(), accel_struct.buffer_state->Handle(), memory_state->Handle()); skip |= LogError(vuid, objlist, accel_struct_loc, "has been created with a buffer bound to memory (%s) that was allocated with multiple instances.", - FormatHandle(buffer_mem->Handle()).c_str()); + FormatHandle(memory_state->Handle()).c_str()); } } return skip; @@ -305,7 +304,7 @@ bool CoreChecks::ValidateSetMemBinding(const vvl::DeviceMemory &memory_state, co FormatHandle(memory_state.Handle()).c_str(), FormatHandle(typed_handle).c_str(), handle_type); } - const auto *prev_binding = mem_binding.MemState(); + const auto *prev_binding = mem_binding.MemoryState(); if (prev_binding || mem_binding.indeterminate_state) { const char *vuid = kVUIDUndefined; if (is_buffer) { @@ -2175,13 +2174,13 @@ bool CoreChecks::ValidateSparseMemoryBind(const VkSparseMemoryBind &bind, const VkDeviceSize resource_size, VkExternalMemoryHandleTypeFlags external_handle_types, const VulkanTypedHandle &resource_handle, const Location &loc) const { bool skip = false; - if (auto mem_state = Get(bind.memory)) { - if (!((uint32_t(1) << mem_state->allocate_info.memoryTypeIndex) & requirements.memoryTypeBits)) { + if (auto memory_state = Get(bind.memory)) { + if (!((uint32_t(1) << memory_state->allocate_info.memoryTypeIndex) & requirements.memoryTypeBits)) { const LogObjectList objlist(bind.memory, resource_handle); skip |= LogError("VUID-VkSparseMemoryBind-memory-01096", objlist, loc.dot(Field::memory), "has a type index (%" PRIu32 ") that is not among the allowed types mask (0x%" PRIX32 ") for this resource.", - mem_state->allocate_info.memoryTypeIndex, requirements.memoryTypeBits); + memory_state->allocate_info.memoryTypeIndex, requirements.memoryTypeBits); } if (SafeModulo(bind.memoryOffset, requirements.alignment) != 0) { @@ -2191,46 +2190,46 @@ bool CoreChecks::ValidateSparseMemoryBind(const VkSparseMemoryBind &bind, const requirements.alignment); } - if (phys_dev_mem_props.memoryTypes[mem_state->allocate_info.memoryTypeIndex].propertyFlags & + if (phys_dev_mem_props.memoryTypes[memory_state->allocate_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) { const LogObjectList objlist(bind.memory, resource_handle); skip |= LogError("VUID-VkSparseMemoryBind-memory-01097", objlist, loc.dot(Field::memory), "type has VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT bit set."); } - if (bind.memoryOffset >= mem_state->allocate_info.allocationSize) { + if (bind.memoryOffset >= memory_state->allocate_info.allocationSize) { const LogObjectList objlist(bind.memory, resource_handle); skip |= LogError("VUID-VkSparseMemoryBind-memoryOffset-01101", objlist, loc.dot(Field::memoryOffset), "(%" PRIu64 ") must be less than the size of memory (%" PRIu64 ")", bind.memoryOffset, - mem_state->allocate_info.allocationSize); + memory_state->allocate_info.allocationSize); } - if ((mem_state->allocate_info.allocationSize - bind.memoryOffset) < bind.size) { + if ((memory_state->allocate_info.allocationSize - bind.memoryOffset) < bind.size) { const LogObjectList objlist(bind.memory, resource_handle); skip |= LogError("VUID-VkSparseMemoryBind-size-01102", objlist, loc.dot(Field::size), "(%" PRIu64 ") must be less than or equal to the size of memory (%" PRIu64 ") minus memoryOffset (%" PRIu64 ").", - bind.size, mem_state->allocate_info.allocationSize, bind.memoryOffset); + bind.size, memory_state->allocate_info.allocationSize, bind.memoryOffset); } - if (mem_state->IsExport()) { - if (!(mem_state->export_handle_types & external_handle_types)) { + if (memory_state->IsExport()) { + if (!(memory_state->export_handle_types & external_handle_types)) { const LogObjectList objlist(bind.memory, resource_handle); skip |= LogError("VUID-VkSparseMemoryBind-memory-02730", objlist, loc.dot(Field::memory).pNext(Struct::VkExportMemoryAllocateInfo).dot(Field::handleTypes), "is %s, but the external handle types specified in resource are %s.", - string_VkExternalMemoryHandleTypeFlags(mem_state->export_handle_types).c_str(), + string_VkExternalMemoryHandleTypeFlags(memory_state->export_handle_types).c_str(), string_VkExternalMemoryHandleTypeFlags(external_handle_types).c_str()); } } - if (mem_state->IsImport()) { - if (!(*mem_state->import_handle_type & external_handle_types)) { + if (memory_state->IsImport()) { + if (!(*memory_state->import_handle_type & external_handle_types)) { const LogObjectList objlist(bind.memory, resource_handle); skip |= LogError("VUID-VkSparseMemoryBind-memory-02731", objlist, loc.dot(Field::memory), "was created with memory import operation, with handle type %s, but the external handle types " "specified in resource are %s.", - string_VkExternalMemoryHandleTypeFlagBits(*mem_state->import_handle_type), + string_VkExternalMemoryHandleTypeFlagBits(*memory_state->import_handle_type), string_VkExternalMemoryHandleTypeFlags(external_handle_types).c_str()); } } @@ -2288,13 +2287,13 @@ bool CoreChecks::ValidateSparseImageMemoryBind(vvl::Image const *image_state, Vk const Location &bind_loc, const Location &memory_loc) const { bool skip = false; - if (auto const mem_state = Get(bind.memory)) { + if (auto const memory_state = Get(bind.memory)) { // TODO: The closest one should be VUID-VkSparseImageMemoryBind-memory-01105 instead of the mentioned // one. We also need to check memory_bind.memory - if (bind.memoryOffset >= mem_state->allocate_info.allocationSize) { + if (bind.memoryOffset >= memory_state->allocate_info.allocationSize) { skip |= LogError("VUID-VkSparseMemoryBind-memoryOffset-01101", bind.memory, bind_loc.dot(Field::memoryOffset), "(%" PRIu64 ") is not less than the size (%" PRIu64 ") of memory.", bind.memoryOffset, - mem_state->allocate_info.allocationSize); + memory_state->allocate_info.allocationSize); } // TODO: We cannot validate the requirement size since there is no way @@ -2307,27 +2306,27 @@ bool CoreChecks::ValidateSparseImageMemoryBind(vvl::Image const *image_state, Vk requirement.alignment); } - skip |= ValidateMemoryTypes(*mem_state.get(), requirement.memoryTypeBits, memory_loc.dot(Field::memory), + skip |= ValidateMemoryTypes(*memory_state.get(), requirement.memoryTypeBits, memory_loc.dot(Field::memory), "VUID-VkSparseImageMemoryBind-memory-01105"); - if (mem_state->IsExport()) { - if (!(mem_state->export_handle_types & image_state->external_memory_handle_types)) { + if (memory_state->IsExport()) { + if (!(memory_state->export_handle_types & image_state->external_memory_handle_types)) { const LogObjectList objlist(bind.memory, image_state->Handle()); skip |= LogError("VUID-VkSparseImageMemoryBind-memory-02732", objlist, memory_loc.dot(Field::memory).pNext(Struct::VkExportMemoryAllocateInfo).dot(Field::handleTypes), "is %s, but the external handle types specified in resource are %s.", - string_VkExternalMemoryHandleTypeFlags(mem_state->export_handle_types).c_str(), + string_VkExternalMemoryHandleTypeFlags(memory_state->export_handle_types).c_str(), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle_types).c_str()); } } - if (mem_state->IsImport()) { - if (!(*mem_state->import_handle_type & image_state->external_memory_handle_types)) { + if (memory_state->IsImport()) { + if (!(*memory_state->import_handle_type & image_state->external_memory_handle_types)) { const LogObjectList objlist(bind.memory, image_state->Handle()); skip |= LogError("VUID-VkSparseImageMemoryBind-memory-02733", objlist, memory_loc.dot(Field::memory), "was created with memory import operation, with handle type %s, but the external handle types " "specified in resource are %s.", - string_VkExternalMemoryHandleTypeFlagBits(*mem_state->import_handle_type), + string_VkExternalMemoryHandleTypeFlagBits(*memory_state->import_handle_type), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle_types).c_str()); } } @@ -2511,7 +2510,7 @@ bool CoreChecks::ValidateMemoryIsBoundToBuffer(LogObjectList objlist, const vvl: bool skip = false; if (!buffer_state.sparse) { objlist.add(buffer_state.Handle()); - skip |= VerifyBoundMemoryIsValid(buffer_state.MemState(), objlist, buffer_state.Handle(), buffer_loc, vuid); + skip |= VerifyBoundMemoryIsValid(buffer_state.MemoryState(), objlist, buffer_state.Handle(), buffer_loc, vuid); } return skip; } diff --git a/layers/core_checks/cc_ray_tracing.cpp b/layers/core_checks/cc_ray_tracing.cpp index f70caf87f96..f7d5d692ed2 100644 --- a/layers/core_checks/cc_ray_tracing.cpp +++ b/layers/core_checks/cc_ray_tracing.cpp @@ -136,7 +136,7 @@ bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device bool skip = false; if (auto as_state = Get(accelerationStructure)) { - skip |= VerifyBoundMemoryIsValid(as_state->MemState(), LogObjectList(accelerationStructure), as_state->Handle(), + skip |= VerifyBoundMemoryIsValid(as_state->MemoryState(), LogObjectList(accelerationStructure), as_state->Handle(), error_obj.location.dot(Field::accelerationStructure), "VUID-vkGetAccelerationStructureHandleNV-accelerationStructure-02787"); } @@ -1248,7 +1248,7 @@ bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer } if (dst_as_state) { - skip |= VerifyBoundMemoryIsValid(dst_as_state->MemState(), LogObjectList(commandBuffer, dst), dst_as_state->Handle(), + skip |= VerifyBoundMemoryIsValid(dst_as_state->MemoryState(), LogObjectList(commandBuffer, dst), dst_as_state->Handle(), error_obj.location.dot(Field::dst), "VUID-vkCmdBuildAccelerationStructureNV-dst-07787"); } @@ -1332,15 +1332,15 @@ bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer c if (dst_as_state) { const LogObjectList objlist(commandBuffer, dst); - skip |= VerifyBoundMemoryIsValid(dst_as_state->MemState(), objlist, dst_as_state->Handle(), + skip |= VerifyBoundMemoryIsValid(dst_as_state->MemoryState(), objlist, dst_as_state->Handle(), error_obj.location.dot(Field::dst), "VUID-vkCmdCopyAccelerationStructureNV-dst-07792"); - skip |= VerifyBoundMemoryIsDeviceVisible(dst_as_state->MemState(), objlist, dst_as_state->Handle(), + skip |= VerifyBoundMemoryIsDeviceVisible(dst_as_state->MemoryState(), objlist, dst_as_state->Handle(), error_obj.location.dot(Field::dst), "VUID-vkCmdCopyAccelerationStructureNV-buffer-03719"); } if (src_as_state) { const LogObjectList objlist(commandBuffer, src); - skip |= VerifyBoundMemoryIsDeviceVisible(src_as_state->MemState(), objlist, src_as_state->Handle(), + skip |= VerifyBoundMemoryIsDeviceVisible(src_as_state->MemoryState(), objlist, src_as_state->Handle(), error_obj.location.dot(Field::src), "VUID-vkCmdCopyAccelerationStructureNV-buffer-03718"); if (!src_as_state->built) { diff --git a/layers/core_checks/cc_video.cpp b/layers/core_checks/cc_video.cpp index 09ee786b9ea..3c59fde61f6 100644 --- a/layers/core_checks/cc_video.cpp +++ b/layers/core_checks/cc_video.cpp @@ -3565,34 +3565,34 @@ bool CoreChecks::PreCallValidateBindVideoSessionMemoryKHR(VkDevice device, VkVid const auto &bind_info = pBindSessionMemoryInfos[i]; const auto &mem_binding_info = vs_state->GetMemoryBindingInfo(bind_info.memoryBindIndex); if (mem_binding_info != nullptr) { - if (auto mem_state = Get(bind_info.memory)) { - if (((1 << mem_state->allocate_info.memoryTypeIndex) & mem_binding_info->requirements.memoryTypeBits) == 0) { - const LogObjectList objlist(videoSession, mem_state->Handle()); + if (auto memory_state = Get(bind_info.memory)) { + if (((1 << memory_state->allocate_info.memoryTypeIndex) & mem_binding_info->requirements.memoryTypeBits) == 0) { + const LogObjectList objlist(videoSession, memory_state->Handle()); skip |= LogError("VUID-vkBindVideoSessionMemoryKHR-pBindSessionMemoryInfos-07198", objlist, error_obj.location, "memoryTypeBits (0x%x) for memory binding " "with index %u of %s are not compatible with the memory type index (%u) of " "%s specified in pBindSessionMemoryInfos[%u].memory.", mem_binding_info->requirements.memoryTypeBits, bind_info.memoryBindIndex, - FormatHandle(videoSession).c_str(), mem_state->allocate_info.memoryTypeIndex, - FormatHandle(*mem_state).c_str(), i); + FormatHandle(videoSession).c_str(), memory_state->allocate_info.memoryTypeIndex, + FormatHandle(*memory_state).c_str(), i); } - if (bind_info.memoryOffset >= mem_state->allocate_info.allocationSize) { - const LogObjectList objlist(videoSession, mem_state->Handle()); + if (bind_info.memoryOffset >= memory_state->allocate_info.allocationSize) { + const LogObjectList objlist(videoSession, memory_state->Handle()); skip |= LogError("VUID-VkBindVideoSessionMemoryInfoKHR-memoryOffset-07201", objlist, error_obj.location.dot(Field::pBindSessionMemoryInfos, i).dot(Field::memoryOffset), "(%" PRIuLEAST64 ") must be less than the size (%" PRIuLEAST64 ") of %s.", - bind_info.memoryOffset, mem_state->allocate_info.allocationSize, - FormatHandle(*mem_state).c_str()); - } else if (bind_info.memoryOffset + bind_info.memorySize > mem_state->allocate_info.allocationSize) { - const LogObjectList objlist(videoSession, mem_state->Handle()); + bind_info.memoryOffset, memory_state->allocate_info.allocationSize, + FormatHandle(*memory_state).c_str()); + } else if (bind_info.memoryOffset + bind_info.memorySize > memory_state->allocate_info.allocationSize) { + const LogObjectList objlist(videoSession, memory_state->Handle()); skip |= LogError("VUID-VkBindVideoSessionMemoryInfoKHR-memorySize-07202", objlist, error_obj.location.dot(Field::pBindSessionMemoryInfos, i).dot(Field::memoryOffset), "(%" PRIuLEAST64 ") + memory size (%" PRIuLEAST64 ") must be less than or equal to the size (%" PRIuLEAST64 ") of %s.", - bind_info.memoryOffset, bind_info.memorySize, mem_state->allocate_info.allocationSize, - FormatHandle(*mem_state).c_str()); + bind_info.memoryOffset, bind_info.memorySize, memory_state->allocate_info.allocationSize, + FormatHandle(*memory_state).c_str()); } } diff --git a/layers/core_checks/core_validation.h b/layers/core_checks/core_validation.h index 7b4a7647c6e..8b074ff5aba 100644 --- a/layers/core_checks/core_validation.h +++ b/layers/core_checks/core_validation.h @@ -1050,9 +1050,9 @@ class CoreChecks : public ValidationStateTracker { void UpdateCmdBufImageLayouts(const vvl::CommandBuffer& cb_state); - bool VerifyBoundMemoryIsValid(const vvl::DeviceMemory* mem_state, const LogObjectList& objlist, + bool VerifyBoundMemoryIsValid(const vvl::DeviceMemory* memory_state, const LogObjectList& objlist, const VulkanTypedHandle& typed_handle, const Location& loc, const char* vuid) const; - bool VerifyBoundMemoryIsDeviceVisible(const vvl::DeviceMemory* mem_state, const LogObjectList& objlist, + bool VerifyBoundMemoryIsDeviceVisible(const vvl::DeviceMemory* memory_state, const LogObjectList& objlist, const VulkanTypedHandle& typed_handle, const Location& loc, const char* vuid) const; bool ValidateLayoutVsAttachmentDescription(const VkImageLayout first_layout, const uint32_t attachment, diff --git a/layers/state_tracker/device_memory_state.cpp b/layers/state_tracker/device_memory_state.cpp index 77b22d90caf..cc3cde68474 100644 --- a/layers/state_tracker/device_memory_state.cpp +++ b/layers/state_tracker/device_memory_state.cpp @@ -1,7 +1,7 @@ -/* Copyright (c) 2015-2024 The Khronos Group Inc. - * Copyright (c) 2015-2024 Valve Corporation - * Copyright (c) 2015-2024 LunarG, Inc. - * Copyright (C) 2015-2024 Google Inc. +/* Copyright (c) 2015-2025 The Khronos Group Inc. + * Copyright (c) 2015-2025 Valve Corporation + * Copyright (c) 2015-2025 LunarG, Inc. + * Copyright (C) 2015-2025 Google Inc. * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -109,12 +109,12 @@ DeviceMemory::DeviceMemory(VkDeviceMemory handle, const VkMemoryAllocateInfo *al } } // namespace vvl -void vvl::BindableLinearMemoryTracker::BindMemory(StateObject *parent, std::shared_ptr &mem_state, - VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) { - ASSERT_AND_RETURN(mem_state); +void vvl::BindableLinearMemoryTracker::BindMemory(StateObject *parent, std::shared_ptr &memory_state, + VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) { + ASSERT_AND_RETURN(memory_state); - mem_state->AddParent(parent); - binding_ = {mem_state, memory_offset, 0u}; + memory_state->AddParent(parent); + binding_ = {memory_state, memory_offset, 0u}; } DeviceMemoryState vvl::BindableLinearMemoryTracker::GetBoundMemoryStates() const { @@ -177,9 +177,9 @@ bool vvl::BindableSparseMemoryTracker::HasFullRangeBound() const { return true; } -void vvl::BindableSparseMemoryTracker::BindMemory(StateObject *parent, std::shared_ptr &mem_state, - VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) { - MEM_BINDING memory_data{mem_state, memory_offset, resource_offset}; +void vvl::BindableSparseMemoryTracker::BindMemory(StateObject *parent, std::shared_ptr &memory_state, + VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) { + MemoryBinding memory_data{memory_state, memory_offset, resource_offset}; BindingMap::value_type item{{resource_offset, resource_offset + size}, memory_data}; auto guard = WriteLockGuard{binding_lock_}; @@ -269,16 +269,16 @@ BoundRanges vvl::BindableSparseMemoryTracker::GetBoundRanges(const BufferRange & } DeviceMemoryState vvl::BindableSparseMemoryTracker::GetBoundMemoryStates() const { - DeviceMemoryState dev_mem_states; + DeviceMemoryState dev_memory_states; { auto guard = ReadLockGuard{binding_lock_}; for (auto &binding : binding_map_) { - if (binding.second.memory_state) dev_mem_states.emplace(binding.second.memory_state); + if (binding.second.memory_state) dev_memory_states.emplace(binding.second.memory_state); } } - return dev_mem_states; + return dev_memory_states; } @@ -309,13 +309,14 @@ bool vvl::BindableMultiplanarMemoryTracker::HasFullRangeBound() const { } // resource_offset is the plane index -void vvl::BindableMultiplanarMemoryTracker::BindMemory(StateObject *parent, std::shared_ptr &mem_state, - VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) { - ASSERT_AND_RETURN(mem_state); +void vvl::BindableMultiplanarMemoryTracker::BindMemory(StateObject *parent, std::shared_ptr &memory_state, + VkDeviceSize memory_offset, VkDeviceSize resource_offset, + VkDeviceSize size) { + ASSERT_AND_RETURN(memory_state); assert(resource_offset < planes_.size()); - mem_state->AddParent(parent); - planes_[static_cast(resource_offset)].binding = {mem_state, memory_offset, 0u}; + memory_state->AddParent(parent); + planes_[static_cast(resource_offset)].binding = {memory_state, memory_offset, 0u}; } // range needs to be between [0, planes_[0].size + planes_[1].size + planes_[2].size) @@ -341,15 +342,15 @@ BoundMemoryRange vvl::BindableMultiplanarMemoryTracker::GetBoundMemoryRange(cons } DeviceMemoryState vvl::BindableMultiplanarMemoryTracker::GetBoundMemoryStates() const { - DeviceMemoryState dev_mem_states; + DeviceMemoryState dev_memory_states; for (unsigned i = 0u; i < planes_.size(); ++i) { if (planes_[i].binding.memory_state) { - dev_mem_states.insert(planes_[i].binding.memory_state); + dev_memory_states.insert(planes_[i].binding.memory_state); } } - return dev_mem_states; + return dev_memory_states; } std::pair vvl::Bindable::GetResourceMemoryOverlap( diff --git a/layers/state_tracker/device_memory_state.h b/layers/state_tracker/device_memory_state.h index b5e779968a7..1a45e7ee826 100644 --- a/layers/state_tracker/device_memory_state.h +++ b/layers/state_tracker/device_memory_state.h @@ -92,7 +92,7 @@ class DeviceMemory : public StateObject { // will be stored in the range_map // We need the resource_offset and memory_offset to be able to transform from // resource space (in which the range is) to memory space -struct MEM_BINDING { +struct MemoryBinding { std::shared_ptr memory_state; VkDeviceSize memory_offset; VkDeviceSize resource_offset; @@ -108,7 +108,7 @@ class BindableMemoryTracker { virtual ~BindableMemoryTracker() {} // kept for backwards compatibility, only useful with the Linear tracker - virtual const MEM_BINDING *Binding() const = 0; + virtual const MemoryBinding *Binding() const = 0; virtual unsigned CountDeviceMemory(VkDeviceMemory memory) const = 0; virtual bool HasFullRangeBound() const = 0; @@ -124,7 +124,7 @@ class BindableNoMemoryTracker : public BindableMemoryTracker { public: BindableNoMemoryTracker(const VkMemoryRequirements *) {} - const MEM_BINDING *Binding() const override { return nullptr; } + const MemoryBinding *Binding() const override { return nullptr; } unsigned CountDeviceMemory(VkDeviceMemory memory) const override { return 0; } @@ -144,14 +144,14 @@ class BindableLinearMemoryTracker : public BindableMemoryTracker { public: BindableLinearMemoryTracker(const VkMemoryRequirements *) {} - const MEM_BINDING *Binding() const override { return binding_.memory_state ? &binding_ : nullptr; } + const MemoryBinding *Binding() const override { return binding_.memory_state ? &binding_ : nullptr; } unsigned CountDeviceMemory(VkDeviceMemory memory) const override { return binding_.memory_state && binding_.memory_state->VkHandle() == memory ? 1 : 0; } bool HasFullRangeBound() const override { return binding_.memory_state != nullptr; } - void BindMemory(StateObject *parent, std::shared_ptr &mem_state, VkDeviceSize memory_offset, + void BindMemory(StateObject *parent, std::shared_ptr &memory_state, VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) override; BoundMemoryRange GetBoundMemoryRange(const MemoryRange &range) const override; @@ -160,7 +160,7 @@ class BindableLinearMemoryTracker : public BindableMemoryTracker { DeviceMemoryState GetBoundMemoryStates() const override; private: - MEM_BINDING binding_; + MemoryBinding binding_; }; // Sparse bindable memory tracker @@ -170,13 +170,13 @@ class BindableSparseMemoryTracker : public BindableMemoryTracker { BindableSparseMemoryTracker(const VkMemoryRequirements *requirements, bool is_resident) : resource_size_(requirements->size), is_resident_(is_resident) {} - const MEM_BINDING *Binding() const override { return nullptr; } + const MemoryBinding *Binding() const override { return nullptr; } unsigned CountDeviceMemory(VkDeviceMemory memory) const override; bool HasFullRangeBound() const override; - void BindMemory(StateObject *parent, std::shared_ptr &mem_state, VkDeviceSize memory_offset, + void BindMemory(StateObject *parent, std::shared_ptr &memory_state, VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) override; BoundMemoryRange GetBoundMemoryRange(const MemoryRange &range) const override; @@ -192,7 +192,7 @@ class BindableSparseMemoryTracker : public BindableMemoryTracker { private: // This range map uses the range in resource space to know the size of the bound memory - using BindingMap = sparse_container::range_map; + using BindingMap = sparse_container::range_map; BindingMap binding_map_; mutable std::shared_mutex binding_lock_; VkDeviceSize resource_size_; @@ -204,13 +204,13 @@ class BindableMultiplanarMemoryTracker : public BindableMemoryTracker { public: BindableMultiplanarMemoryTracker(const VkMemoryRequirements *requirements, uint32_t num_planes); - const MEM_BINDING *Binding() const override { return nullptr; } + const MemoryBinding *Binding() const override { return nullptr; } unsigned CountDeviceMemory(VkDeviceMemory memory) const override; bool HasFullRangeBound() const override; - void BindMemory(StateObject *parent, std::shared_ptr &mem_state, VkDeviceSize memory_offset, + void BindMemory(StateObject *parent, std::shared_ptr &memory_state, VkDeviceSize memory_offset, VkDeviceSize resource_offset, VkDeviceSize size) override; BoundMemoryRange GetBoundMemoryRange(const MemoryRange &range) const override; @@ -223,7 +223,7 @@ class BindableMultiplanarMemoryTracker : public BindableMemoryTracker { private: struct Plane { - MEM_BINDING binding; + MemoryBinding binding; VkDeviceSize size; }; std::vector planes_; @@ -261,8 +261,8 @@ class Bindable : public StateObject { } // Will be false if VkBindMemoryStatus had a non-success result - const vvl::DeviceMemory *MemState() const { - const MEM_BINDING *binding = Binding(); + const vvl::DeviceMemory *MemoryState() const { + const MemoryBinding *binding = Binding(); return binding ? binding->memory_state.get() : nullptr; } @@ -277,8 +277,8 @@ class Bindable : public StateObject { } bool IsMemoryBound() const { - const auto mem_state = MemState(); - return mem_state && !mem_state->Destroyed(); + const auto memory_state = MemoryState(); + return memory_state && !memory_state->Destroyed(); } void NotifyInvalidate(const NodeList &invalid_nodes, bool unlink) override { @@ -324,7 +324,7 @@ class Bindable : public StateObject { } // Kept for compatibility - const MEM_BINDING *Binding() const { return memory_tracker_->Binding(); } + const MemoryBinding *Binding() const { return memory_tracker_->Binding(); } unsigned CountDeviceMemory(VkDeviceMemory memory) const { return memory_tracker_->CountDeviceMemory(memory); } diff --git a/layers/state_tracker/image_state.h b/layers/state_tracker/image_state.h index 2ed6a950c53..61a811f57a9 100644 --- a/layers/state_tracker/image_state.h +++ b/layers/state_tracker/image_state.h @@ -149,7 +149,7 @@ class Image : public Bindable { bool IsSwapchainImage() const { return create_from_swapchain != VK_NULL_HANDLE; } // TODO - need to understand if VkBindImageMemorySwapchainInfoKHR counts as "bound" - bool HasBeenBound() const { return (MemState() != nullptr) || (bind_swapchain); } + bool HasBeenBound() const { return (MemoryState() != nullptr) || (bind_swapchain); } inline bool IsImageTypeEqual(const VkImageCreateInfo &other_create_info) const { return create_info.imageType == other_create_info.imageType; diff --git a/layers/state_tracker/state_tracker.cpp b/layers/state_tracker/state_tracker.cpp index 2ca1758997e..adea009a0cf 100644 --- a/layers/state_tracker/state_tracker.cpp +++ b/layers/state_tracker/state_tracker.cpp @@ -1513,9 +1513,9 @@ void ValidationStateTracker::PreCallRecordQueueBindSparse(VkQueue queue, uint32_ for (uint32_t j = 0; j < bind_info.bufferBindCount; j++) { for (uint32_t k = 0; k < bind_info.pBufferBinds[j].bindCount; k++) { auto sparse_binding = bind_info.pBufferBinds[j].pBinds[k]; - auto mem_state = Get(sparse_binding.memory); + auto memory_state = Get(sparse_binding.memory); if (auto buffer_state = Get(bind_info.pBufferBinds[j].buffer)) { - buffer_state->BindMemory(buffer_state.get(), mem_state, sparse_binding.memoryOffset, + buffer_state->BindMemory(buffer_state.get(), memory_state, sparse_binding.memoryOffset, sparse_binding.resourceOffset, sparse_binding.size); } } @@ -1523,7 +1523,7 @@ void ValidationStateTracker::PreCallRecordQueueBindSparse(VkQueue queue, uint32_ for (uint32_t j = 0; j < bind_info.imageOpaqueBindCount; j++) { for (uint32_t k = 0; k < bind_info.pImageOpaqueBinds[j].bindCount; k++) { auto sparse_binding = bind_info.pImageOpaqueBinds[j].pBinds[k]; - auto mem_state = Get(sparse_binding.memory); + auto memory_state = Get(sparse_binding.memory); if (auto image_state = Get(bind_info.pImageOpaqueBinds[j].image)) { // An Android special image cannot get VkSubresourceLayout until the image binds a memory. // See: VUID-vkGetImageSubresourceLayout-image-09432 @@ -1531,7 +1531,7 @@ void ValidationStateTracker::PreCallRecordQueueBindSparse(VkQueue queue, uint32_ image_state->fragment_encoder = std::make_unique(*image_state); } - image_state->BindMemory(image_state.get(), mem_state, sparse_binding.memoryOffset, + image_state->BindMemory(image_state.get(), memory_state, sparse_binding.memoryOffset, sparse_binding.resourceOffset, sparse_binding.size); } } @@ -1542,7 +1542,7 @@ void ValidationStateTracker::PreCallRecordQueueBindSparse(VkQueue queue, uint32_ // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4; VkDeviceSize offset = sparse_binding.offset.z * sparse_binding.offset.y * sparse_binding.offset.x * 4; - auto mem_state = Get(sparse_binding.memory); + auto memory_state = Get(sparse_binding.memory); if (auto image_state = Get(bind_info.pImageBinds[j].image)) { // An Android special image cannot get VkSubresourceLayout until the image binds a memory. // See: VUID-vkGetImageSubresourceLayout-image-09432 @@ -1550,7 +1550,7 @@ void ValidationStateTracker::PreCallRecordQueueBindSparse(VkQueue queue, uint32_ image_state->fragment_encoder = std::make_unique(*image_state); } - image_state->BindMemory(image_state.get(), mem_state, sparse_binding.memoryOffset, offset, size); + image_state->BindMemory(image_state.get(), memory_state, sparse_binding.memoryOffset, offset, size); } } } @@ -1783,8 +1783,8 @@ void ValidationStateTracker::UpdateBindBufferMemoryState(const VkBindBufferMemor if (!buffer_state) return; // Track objects tied to memory - if (auto mem_state = Get(bind_info.memory)) { - buffer_state->BindMemory(buffer_state.get(), mem_state, bind_info.memoryOffset, 0u, buffer_state->requirements.size); + if (auto memory_state = Get(bind_info.memory)) { + buffer_state->BindMemory(buffer_state.get(), memory_state, bind_info.memoryOffset, 0u, buffer_state->requirements.size); } } @@ -2731,8 +2731,8 @@ void ValidationStateTracker::PostCallRecordBindAccelerationStructureMemoryNV( if (auto as_state = Get(info.accelerationStructure)) { // Track objects tied to memory - if (auto mem_state = Get(info.memory)) { - as_state->BindMemory(as_state.get(), mem_state, info.memoryOffset, 0u, as_state->memory_requirements.size); + if (auto memory_state = Get(info.memory)) { + as_state->BindMemory(as_state.get(), memory_state, info.memoryOffset, 0u, as_state->memory_requirements.size); } // GPU validation of top level acceleration structure building needs acceleration structure handles. diff --git a/layers/sync/sync_validation.cpp b/layers/sync/sync_validation.cpp index 07ca1ec8792..6a9c55fe907 100644 --- a/layers/sync/sync_validation.cpp +++ b/layers/sync/sync_validation.cpp @@ -1193,7 +1193,7 @@ bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, auto src_image = Get(srcImage); auto dst_buffer = Get(dstBuffer); - const auto dst_mem = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemState()->VkHandle() : VK_NULL_HANDLE; + const VkDeviceMemory dst_memory = (dst_buffer && !dst_buffer->sparse) ? dst_buffer->MemoryState()->VkHandle() : VK_NULL_HANDLE; for (uint32_t region = 0; region < regionCount; region++) { const auto ©_region = pRegions[region]; if (src_image) { @@ -1204,7 +1204,7 @@ bool SyncValidator::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, const auto error = error_messages_.ImageRegionError(hazard, srcImage, true, region, *cb_access_context); skip |= SyncError(hazard.Hazard(), objlist, loc, error); } - if (dst_mem) { + if (dst_memory != VK_NULL_HANDLE) { ResourceAccessRange dst_range = MakeRange( copy_region.bufferOffset, GetBufferSizeFromCopyImage(copy_region, src_image->create_info.format, src_image->create_info.arrayLayers));