diff --git a/Modules/Core/Common/include/itkVector.h b/Modules/Core/Common/include/itkVector.h index 02fd8fec328..8a5e1cdf8af 100644 --- a/Modules/Core/Common/include/itkVector.h +++ b/Modules/Core/Common/include/itkVector.h @@ -90,7 +90,8 @@ class ITK_TEMPLATE_EXPORT Vector : public FixedArray return VVectorDimension; } - /** Set a vnl_vector_ref referencing the same memory block. */ + /** Copy values from the vnl_vector input to the internal memory block. The minimum of + * VVectorDimension and vnl_vector::size() elements are copied. */ void SetVnlVector(const vnl_vector &); diff --git a/Modules/Core/Common/include/itkVector.hxx b/Modules/Core/Common/include/itkVector.hxx index 408a9d8cb4f..8d2fca35141 100644 --- a/Modules/Core/Common/include/itkVector.hxx +++ b/Modules/Core/Common/include/itkVector.hxx @@ -155,7 +155,8 @@ template void Vector::SetVnlVector(const vnl_vector & v) { - for (unsigned int i = 0; i < v.size(); ++i) + const unsigned int elements_to_copy = std::min(TVectorDimension, v.size()); + for (unsigned int i = 0; i < elements_to_copy; ++i) { (*this)[i] = v(i); } diff --git a/Modules/Segmentation/LevelSets/include/itkParallelSparseFieldLevelSetImageFilter.hxx b/Modules/Segmentation/LevelSets/include/itkParallelSparseFieldLevelSetImageFilter.hxx index 255d2afac5c..cd8955db488 100644 --- a/Modules/Segmentation/LevelSets/include/itkParallelSparseFieldLevelSetImageFilter.hxx +++ b/Modules/Segmentation/LevelSets/include/itkParallelSparseFieldLevelSetImageFilter.hxx @@ -768,14 +768,14 @@ void ParallelSparseFieldLevelSetImageFilter::ThreadedAllocateData(ThreadIdType ThreadId) { static constexpr float SAFETY_FACTOR = 4.0; - unsigned int i, j; m_Data[ThreadId].m_Semaphore[0] = 0; m_Data[ThreadId].m_Semaphore[1] = 0; + const std::size_t bufferLayerSize = 2 * m_NumberOfLayers + 1; // Allocate the layers for the sparse field. - m_Data[ThreadId].m_Layers.reserve(2 * m_NumberOfLayers + 1); - for (i = 0; i < 2 * static_cast(m_NumberOfLayers) + 1; ++i) + m_Data[ThreadId].m_Layers.reserve(bufferLayerSize); + for (unsigned int i = 0; i < 2 * static_cast(m_NumberOfLayers) + 1; ++i) { m_Data[ThreadId].m_Layers.push_back(LayerType::New()); } @@ -786,12 +786,13 @@ ParallelSparseFieldLevelSetImageFilter::ThreadedAlloc } // Layers used as buffers for transferring pixels during load balancing - m_Data[ThreadId].m_LoadTransferBufferLayers = new LayerListType[2 * m_NumberOfLayers + 1]; - for (i = 0; i < 2 * static_cast(m_NumberOfLayers) + 1; ++i) + + m_Data[ThreadId].m_LoadTransferBufferLayers = new LayerListType[bufferLayerSize]; + for (unsigned int i = 0; i < 2 * static_cast(m_NumberOfLayers) + 1; ++i) { m_Data[ThreadId].m_LoadTransferBufferLayers[i].reserve(m_NumOfWorkUnits); - for (j = 0; j < m_NumOfWorkUnits; ++j) + for (unsigned int j = 0; j < m_NumOfWorkUnits; ++j) { m_Data[ThreadId].m_LoadTransferBufferLayers[i].push_back(LayerType::New()); } @@ -803,14 +804,13 @@ ParallelSparseFieldLevelSetImageFilter::ThreadedAlloc // The SAFETY_FACTOR simple ensures that the number of nodes created // is larger than those required to start with for each thread. - auto nodeNum = - static_cast(SAFETY_FACTOR * m_Layers[0]->Size() * (2 * m_NumberOfLayers + 1) / m_NumOfWorkUnits); + auto nodeNum = static_cast(SAFETY_FACTOR * m_Layers[0]->Size() * (bufferLayerSize) / m_NumOfWorkUnits); m_Data[ThreadId].m_LayerNodeStore->Reserve(nodeNum); m_Data[ThreadId].m_RMSChange = m_ValueZero; // UpLists and Downlists - for (i = 0; i < 2; ++i) + for (unsigned int i = 0; i < 2; ++i) { m_Data[ThreadId].UpList[i] = LayerType::New(); m_Data[ThreadId].DownList[i] = LayerType::New(); @@ -824,27 +824,23 @@ ParallelSparseFieldLevelSetImageFilter::ThreadedAlloc // for the Downlists m_Data[ThreadId].m_InterNeighborNodeTransferBufferLayers[1] = new LayerPointerType *[m_NumberOfLayers + 1]; - for (i = 0; i < static_cast(m_NumberOfLayers) + 1; ++i) + for (unsigned int i = 0; i < static_cast(m_NumberOfLayers) + 1; ++i) { m_Data[ThreadId].m_InterNeighborNodeTransferBufferLayers[0][i] = new LayerPointerType[m_NumOfWorkUnits]; m_Data[ThreadId].m_InterNeighborNodeTransferBufferLayers[1][i] = new LayerPointerType[m_NumOfWorkUnits]; } - for (i = 0; i < static_cast(m_NumberOfLayers) + 1; ++i) + for (unsigned int i = 0; i < static_cast(m_NumberOfLayers) + 1; ++i) { - for (j = 0; j < m_NumOfWorkUnits; ++j) + for (unsigned int j = 0; j < m_NumOfWorkUnits; ++j) { m_Data[ThreadId].m_InterNeighborNodeTransferBufferLayers[0][i][j] = LayerType::New(); m_Data[ThreadId].m_InterNeighborNodeTransferBufferLayers[1][i][j] = LayerType::New(); } } - // Local histogram for every thread (used during Iterate() ) - m_Data[ThreadId].m_ZHistogram = new int[m_ZSize]; - for (i = 0; i < m_ZSize; ++i) - { - m_Data[ThreadId].m_ZHistogram[i] = 0; - } + // Local histogram for every thread (used during Iterate()), initialized to zeros. + m_Data[ThreadId].m_ZHistogram = new int[m_ZSize](); // Every thread must have its own copy of the GlobalData struct. m_Data[ThreadId].globalData = this->GetDifferenceFunction()->GetGlobalDataPointer();