diff --git a/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.h b/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.h index bb89ebd055058..03c40e9b54923 100644 --- a/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.h +++ b/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.h @@ -40,8 +40,6 @@ class TPCClusterDecompressor static void decompressTrack(const o2::tpc::CompressedClusters* clustersCompressed, const GPUParam& param, const unsigned int maxTime, const unsigned int i, unsigned int& offset, Args&... args); template static void decompressHits(const o2::tpc::CompressedClusters* clustersCompressed, const unsigned int start, const unsigned int end, Args&... args); - - protected: }; } // namespace GPUCA_NAMESPACE::gpu diff --git a/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.inc b/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.inc index b783122d2c7d4..211ee1f9b4e38 100644 --- a/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.inc +++ b/GPU/GPUTracking/DataCompression/TPCClusterDecompressor.inc @@ -37,15 +37,17 @@ static inline const auto& decompressTrackStore(const o2::tpc::CompressedClusters return clusterVector.back(); } -static inline const auto& decompressTrackStore(const o2::tpc::CompressedClusters* clustersCompressed, const unsigned int offset, unsigned int slice, unsigned int row, unsigned int pad, unsigned int time, std::vector (&clusters)[GPUCA_NSLICES][GPUCA_ROW_COUNT], std::atomic_flag (&locks)[GPUCA_NSLICES][GPUCA_ROW_COUNT]) +static inline const auto decompressTrackStore(const o2::tpc::CompressedClusters* clustersCompressed, const unsigned int offset, unsigned int slice, unsigned int row, unsigned int pad, unsigned int time, std::vector (&clusters)[GPUCA_NSLICES][GPUCA_ROW_COUNT], std::atomic_flag (&locks)[GPUCA_NSLICES][GPUCA_ROW_COUNT]) { std::vector& clusterVector = clusters[slice][row]; auto& lock = locks[slice][row]; while (lock.test_and_set(std::memory_order_acquire)) { } - auto& cluster = decompressTrackStore(clustersCompressed, offset, slice, row, pad, time, clusterVector); + // Note the return type is ClusterNative, not auto&, since a different thread might append another cluster, and the vector expansion can change the cluster pointer, so the cluster reference might be invalid + // TODO: A new version that might use a plain array + counter to fill the clusters should change this and the function return type to auto& + ClusterNative retVal = decompressTrackStore(clustersCompressed, offset, slice, row, pad, time, clusterVector); lock.clear(std::memory_order_release); - return cluster; + return retVal; } template