diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml index f23ae785a2..119877a36b 100644 --- a/indra/newview/app_settings/settings.xml +++ b/indra/newview/app_settings/settings.xml @@ -2,6 +2,21 @@ + + + + FSTextureNewBiasAdjustments + + Comment + Enable New Texture Bias Adjustments + Persist + 1 + Type + Boolean + Value + 0 + + FSLandmarkCreatedNotification Comment diff --git a/indra/newview/llviewertexture.cpp b/indra/newview/llviewertexture.cpp index 363f62b5f2..364e3cfe13 100644 --- a/indra/newview/llviewertexture.cpp +++ b/indra/newview/llviewertexture.cpp @@ -91,6 +91,12 @@ S32 LLViewerTexture::sAuxCount = 0; LLFrameTimer LLViewerTexture::sEvaluationTimer; F32 LLViewerTexture::sDesiredDiscardBias = 0.f; +// [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card +F32 LLViewerTexture::sPreviousDesiredDiscardBias = 0.f; // Init the static value of the previous discard bias, used to know what direction the bias is going, up, down or staying the same +F32 LLViewerTexture::sOverMemoryBudgetStartTime = 0.0f; // Init the static time when system first went over VRAM budget +F32 LLViewerTexture::sOverMemoryBudgetEndTime = 0.0f; // Init the static time when the system finally reached a normal memory amount +LLViewerTexture::OverMemoryBudetStates_u LLViewerTexture::sOverMemoryBudgetState; // Init the static flag to determine if the over budget is staying still +// [FIRE-35011] S32 LLViewerTexture::sMaxSculptRez = 128; //max sculpt image size constexpr S32 MAX_CACHED_RAW_IMAGE_AREA = 64 * 64; const S32 MAX_CACHED_RAW_SCULPT_IMAGE_AREA = LLViewerTexture::sMaxSculptRez * LLViewerTexture::sMaxSculptRez; @@ -522,14 +528,14 @@ void LLViewerTexture::updateClass() // Expose max texture VRAM setting //F32 budget = max_vram_budget == 0 ? (F32)gGLManager.mVRAM : (F32)max_vram_budget; F32 budget = !max_vram_budget_enabled ? (F32)gGLManager.mVRAM : (F32)max_vram_budget; - + //budget *= 2.0f; // Try to leave at least half a GB for everyone else and for bias, // but keep at least 768MB for ourselves // Viewer can 'overshoot' target when scene changes, if viewer goes over budget it // can negatively impact performance, so leave 20% of a breathing room for // 'bias' calculation to kick in. F32 target = llmax(llmin(budget - 512.f, budget * 0.8f), MIN_VRAM_BUDGET); - sFreeVRAMMegabytes = llmax(target - used, 0.f); + sFreeVRAMMegabytes = llmax(target - used, 0.0f); F32 over_pct = (used - target) / target; @@ -539,26 +545,62 @@ void LLViewerTexture::updateClass() static bool was_low = false; static bool was_sys_low = false; + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + //if (is_low && !was_low) + //{ + // // slam to 1.5 bias the moment we hit low memory (discards off screen textures immediately) + // sDesiredDiscardBias = llmax(sDesiredDiscardBias, 1.5f); + // + // if (is_sys_low || over_pct > 2.f) + // { // if we're low on system memory, emergency purge off screen textures to avoid a death spiral + // LL_WARNS() << "Low system memory detected, emergency downrezzing off screen textures" << LL_ENDL; + // for (auto& image : gTextureList) + // { + // gTextureList.updateImageDecodePriority(image, false /*will modify gTextureList otherwise!*/); + // } + // } + //} + // Update the previous desired discard bias with the current value before it is modified below. (By comparing the two, you can see if + // the bias is increasing, decreasing or staying the same. This is useful for determining how the system handles being over budget of + // RAM. + sPreviousDesiredDiscardBias = sDesiredDiscardBias; + // Update the state of the over memory budget state to the values stored here + sOverMemoryBudgetState.ClearState = 0; + sOverMemoryBudgetState.States.LowSystemRAM = is_sys_low; + sOverMemoryBudgetState.States.PreviousLowSystemRam = was_sys_low; + sOverMemoryBudgetState.States.LowVRAM = was_low; + if (is_low && !was_low) { + sOverMemoryBudgetState.States.UseBias = 1; // slam to 1.5 bias the moment we hit low memory (discards off screen textures immediately) sDesiredDiscardBias = llmax(sDesiredDiscardBias, 1.5f); + // We want to store the time from when the system went over budget to when it finished, this can be used to help delay when textures are + // updated again after back to normal memory usage is achieved. This can help smooth out the suddent spikes in high resolution fetch + // requests. Set the over memory budget start time to the current time + sOverMemoryBudgetStartTime = sCurrentTime; + // Reset the over memory budget end time to 0.0 as the old value is longer valid + sOverMemoryBudgetEndTime = 0.0f; if (is_sys_low || over_pct > 2.f) { // if we're low on system memory, emergency purge off screen textures to avoid a death spiral LL_WARNS() << "Low system memory detected, emergency downrezzing off screen textures" << LL_ENDL; + sOverMemoryBudgetState.States.Overage_High = 1; for (auto& image : gTextureList) { gTextureList.updateImageDecodePriority(image, false /*will modify gTextureList otherwise!*/); } } } + // [FIRE-35011] was_low = is_low; was_sys_low = is_sys_low; if (is_low) { + // Flag that we are using the bias + sOverMemoryBudgetState.States.UseBias = 1; // ramp up discard bias over time to free memory LL_DEBUGS("TextureMemory") << "System memory is low, use more aggressive discard bias." << LL_ENDL; if (sEvaluationTimer.getElapsedTimeF32() > MEMORY_CHECK_WAIT_TIME) @@ -567,6 +609,7 @@ void LLViewerTexture::updateClass() F32 increment = low_mem_min_discard_increment + llmax(over_pct, 0.f); sDesiredDiscardBias += increment * gFrameIntervalSeconds; + sOverMemoryBudgetState.States.IncreaseBias; } } else @@ -576,13 +619,21 @@ void LLViewerTexture::updateClass() // don't execute above until the slam to 1.5 has a chance to take effect sEvaluationTimer.reset(); - // lower discard bias over time when free memory is available - if (sDesiredDiscardBias > 1.f && over_pct < 0.f) + // lower discard bias over time when at least 10% of budget is free + const F32 FREE_PERCENTAGE_TRESHOLD = -0.1f; + if (sDesiredDiscardBias > 1.f && over_pct < FREE_PERCENTAGE_TRESHOLD) { static LLCachedControl high_mem_discard_decrement(gSavedSettings, "RenderHighMemMinDiscardDecrement", .1f); - - F32 decrement = high_mem_discard_decrement - llmin(over_pct, 0.f); - sDesiredDiscardBias -= decrement * gFrameIntervalSeconds; + //F32 decrement = high_mem_discard_decrement - llmin(over_pct, 0.f); + F32 decrement = high_mem_discard_decrement - llmin(over_pct - FREE_PERCENTAGE_TRESHOLD, 0.f); + sDesiredDiscardBias -= decrement * gFrameIntervalSeconds; + sOverMemoryBudgetState.States.DecreaseBias; + } + // Else if we are good on memory, but at the 10% range + else if (sDesiredDiscardBias > 1.f) + { + // Flag the over memory budget state that we are holding the bias. + sOverMemoryBudgetState.States.NormalHoldBias = 1; } } @@ -623,10 +674,35 @@ void LLViewerTexture::updateClass() } sDesiredDiscardBias = llclamp(sDesiredDiscardBias, 1.f, 4.f); + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // If the desired discard bias is 1.0 but was previously a larger number, that means we are back to normal memory usage again + if (sDesiredDiscardBias == 1.0f && sPreviousDesiredDiscardBias > sDesiredDiscardBias) + { + // So we need to set the memory buget end time to the current time + sOverMemoryBudgetEndTime = sCurrentTime; + } + // If none of the flags were set + if (sOverMemoryBudgetState.ClearState == 0) + { + // Set the normal flag as true + sOverMemoryBudgetState.States.Normal = 1; + } + // [FIRE-35011] LLViewerTexture::sFreezeImageUpdates = false; } +// [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card +// Accessor method to store the current texture state and update the current texture state +void LLViewerTexture::setTextureState(ETextureStates newState) +{ + // Store the current texture state + mPreviousTextureState = mTextureState; + // Update the current texture state with the flag passed in + mTextureState |= mPreviousTextureState; +} +// [FIRE-35011] + //static bool LLViewerTexture::isSystemMemoryLow() { @@ -702,7 +778,12 @@ void LLViewerTexture::init(bool firstinit) mMaxVirtualSizeResetInterval = 1; mMaxVirtualSizeResetCounter = mMaxVirtualSizeResetInterval; mParcelMedia = NULL; - + + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + mTextureState = ETextureStates::NORMAL; // Init the Texture state to NORMAL + mPreviousTextureState = ETextureStates::NORMAL; // Init the prevous texture state to NORMAL + mDelayToNormalUseAfterOverBudget = 0.0f; // Set the delay to normal use after over budget to 0.0f + // [FIRE-35011] memset(&mNumVolumes, 0, sizeof(U32)* LLRender::NUM_VOLUME_TEXTURE_CHANNELS); mVolumeList[LLRender::LIGHT_TEX].clear(); mVolumeList[LLRender::SCULPT_TEX].clear(); @@ -1729,6 +1810,196 @@ void LLViewerFetchedTexture::setDebugText(const std::string& text) extern bool gCubeSnapshot; +// [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card +// This method will will handle the memory overage for the process texture stats methods for both Fetched and LOD textures +bool LLViewerFetchedTexture::handleMemoryOverageForProcessTextureStats() +{ + // Static saved settings allowing to enable/disable the new bias adjustment feature + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + if (use_new_bias_adjustments) + { + // Check the current texture state is in delay, and if so, + if (mTextureState & LLViewerTexture::ETextureStates::RECOVERY_DELAY) + { + // Check to see if the current time is past the delay to normal use + if (mDelayToNormalUseAfterOverBudget <= sCurrentTime) + { + // If we are still in memory overage, then + if (sOverMemoryBudgetState.States.UseBias) + { + // Create an additional 1 second delay for this texture + mDelayToNormalUseAfterOverBudget = sCurrentTime + 1.0f + ll_rand() * 10.0f; + // If the texture was deleted, we want to have a larger delay + if (mTextureState & ETextureStates::VRAM_OVERAGE_DELETED) + { + // We want to reset the delay as we should still wait + mDelayToNormalUseAfterOverBudget += 2.0f * sDesiredDiscardBias; + } + // Else if the texture was just scaled down, delay it a smaller amount + else if (mTextureState & ETextureStates::VRAM_SCALED_DOWN) + { + // Add to the delay 1/2 the current discard bias + mDelayToNormalUseAfterOverBudget += 0.5f * sDesiredDiscardBias; + } + + return true; + } + else + { + // Reset the delay to noral use back to 0.0f + mDelayToNormalUseAfterOverBudget = 0.0f; + // Store the current texture state in the previous state + mPreviousTextureState = mTextureState; + // Reset the texture back to normal + mTextureState = ETextureStates::NORMAL; + } + } + else + { + // Still waiting, so just return + return true; + } + } + // Else, we want to check to see if we are in a memory overage state + else if (sOverMemoryBudgetState.States.UseBias) + { + // If the texture currently is Normal + if (mTextureState == ETextureStates::NORMAL) + { + /* + // Check to see if the texture should be scaled down + if (mDesiredDiscardLevel < (MAX_DISCARD_LEVEL - 1) && mBoostLevel < LLGLTexture::BOOST_SCULPTED) + { + // If the current desired discard bias is great then 2 and the discard level is incresing + if (sDesiredDiscardBias > 2.0f && sPreviousDesiredDiscardBias < sDesiredDiscardBias) + { + // Textures use a scale of 0 = max resolution, 5 = MAX_DISCARD_LEVEL (smallest level) + // Try to increase the current desired discard level by 1 level to see that helps + mDesiredDiscardLevel += 1; + // Store the current texture state before modifing it + mPreviousTextureState = mTextureState; + // Flag the texture as haven scaled down + mTextureState |= ETextureStates::VRAM_SCALED_DOWN; + // Update the virtual size + updateVirtualSize(); + //Reset + + + // If we are not forced to save the raw iamge, then + if (!mForceToSaveRawImage) + { + // should scale down + //scaleDown(); + } + } + + //return true; + } + */ + } + // Else, the texture has already been affected by the delay, check its status + else + { + /* + // If the texture has already been deleted by the overage at least already + if (mTextureState & ETextureStates::VRAM_OVERAGE_DELETED) + { + // If the desired discard level is below the max discard level and the boost on the texture is less then BOOST_SCULPTED + if (mDesiredDiscardLevel < (MAX_DISCARD_LEVEL - 1) && mBoostLevel < LLGLTexture::BOOST_SCULPTED) + { + // Set the desired discard level to the max low quality + mDesiredDiscardLevel = MAX_DISCARD_LEVEL - 1; + // Update the virtual size + updateVirtualSize(); + // If we are not forced to save the raw iamge, then + if (!mForceToSaveRawImage) + { + // should scale down + //scaleDown(); + } + } + } + // Else if the texture was scaled down before + else if (mTextureState & ETextureStates::VRAM_SCALED_DOWN) + { + // If more then 30 seconds has elapsed since going into texture over budget, then try to down size it to free + // up more memory + if (sCurrentTime - sOverMemoryBudgetStartTime > 30.0f) + { + // If the desired discard level is below the max discard level and the boost on the texture is less then + // BOOST_SCULPTED + if (mDesiredDiscardLevel < (MAX_DISCARD_LEVEL - 1) && mBoostLevel < LLGLTexture::BOOST_SCULPTED) + { + // Set the desired discard level to the next lower quality (by increaseing the value by 1) + mDesiredDiscardLevel += 1; + // Update the virtual size + updateVirtualSize(); + // If we are not forced to save the raw iamge, then + if (!mForceToSaveRawImage) + { + // should scale down + //scaleDown(); + } + } + } + } + */ + + return false; + } + } + else if ((sOverMemoryBudgetState.States.Normal || sOverMemoryBudgetState.States.NormalHoldBias) && + mTextureState == ETextureStates::SCALED_DOWN) + { + // Create an additional 1 second delay for this texture + mDelayToNormalUseAfterOverBudget = sCurrentTime + 1.0f + (F32)ll_rand(5); + + // Store the current texture state before modifing it + mPreviousTextureState = mTextureState; + // Set the current texture state RECOVERY_DELAY flag + mTextureState |= ETextureStates::RECOVERY_DELAY; + } + // Else if we have returned to normal memory usage after the memory acted up and it affected this texture + else if ((sOverMemoryBudgetState.States.Normal || sOverMemoryBudgetState.States.NormalHoldBias) && mTextureState != ETextureStates::NORMAL && + mTextureState != ETextureStates::DELETED) + { + // Create an additional 1 second delay for this texture + mDelayToNormalUseAfterOverBudget = sCurrentTime + 1.0f; + + // If the texture was deleted, we want to have a larger delay + if (mTextureState & ETextureStates::VRAM_OVERAGE_DELETED) + { + // We want to hold off on updating the time based upon how much time was spent during over budget, within 1 to 45 seconds + + mDelayToNormalUseAfterOverBudget += 1.0f + (F32)ll_rand(45); + } + // Else if the texture was just scaled down, delay it a smaller amount + else if (mTextureState & ETextureStates::VRAM_SCALED_DOWN) + { + // We want to hold off on updating the time based upon how much time was spent during over budget, within 1 to 5 seconds + mDelayToNormalUseAfterOverBudget += 1.0f + (F32)ll_rand(5); + } + else if (mTextureState & ETextureStates::SCALED_DOWN) + { + // We want to hold off on updating the time based upon how much time was spent during over budget, within 1 to 5 seconds + mDelayToNormalUseAfterOverBudget += 1.0f + (F32)ll_rand(5); + } + + // Store the current texture state before modifing it + mPreviousTextureState = mTextureState; + // Set the current texture state RECOVERY_DELAY flag + mTextureState |= ETextureStates::RECOVERY_DELAY; + + // Return as we don't want to process the bolow code + return true; + } + } + + // Default to return false, and that we don't want the parent method to return early. + return false; +} +// [FIRE-35011] +// //virtual void LLViewerFetchedTexture::processTextureStats() { @@ -1736,6 +2007,19 @@ void LLViewerFetchedTexture::processTextureStats() llassert(!gCubeSnapshot); // should only be called when the main camera is active llassert(!LLPipeline::sShadowRender); + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Static saved settings allowing to enable/disable the new bias adjustment feature + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + if (use_new_bias_adjustments) + { + // If the handle memory overage returns true, then this method needs to exit early as it wants to delay loading more + // textures + if (handleMemoryOverageForProcessTextureStats()) + { + return; + } + } + // [FIRE-35011] if(mFullyLoaded) { if(mDesiredDiscardLevel > mMinDesiredDiscardLevel)//need to load more @@ -2003,7 +2287,10 @@ bool LLViewerFetchedTexture::updateFetch() { LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; static LLCachedControl textures_decode_disabled(gSavedSettings,"TextureDecodeDisabled", false); - + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + // [FIRE-35011] if(textures_decode_disabled) // don't fetch the surface textures in wireframe mode { return false; @@ -2137,11 +2424,44 @@ bool LLViewerFetchedTexture::updateFetch() else { // already at a higher resolution mip, don't discard + // if (current_discard >= 0 && current_discard <= desired_discard) if (current_discard >= 0 && current_discard <= desired_discard) + // [FIRE-35011] { LL_PROFILE_ZONE_NAMED_CATEGORY_TEXTURE("vftuf - current <= desired"); make_request = false; } + /* + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // already at a higher resolution mip, don't discard + //if (current_discard >= 0 && current_discard <= desired_discard) + // If we are in a high memory situation which is increasing and our texture is not normal + if (use_new_bias_adjustments && sDesiredDiscardBias > 1.0f && sDesiredDiscardBias > sPreviousDesiredDiscardBias && mTextureState != LLViewerTexture::ETextureStates::NORMAL) + { + // If the texture has already been deleted by the overage at least already + if (mTextureState & ETextureStates::VRAM_OVERAGE_DELETED) + { + // If the desired discard level is below the max discard level and the boost on the texture is less then BOOST_SCULPTED + if (desired_discard < (MAX_DISCARD_LEVEL - 1) && mBoostLevel < LLGLTexture::BOOST_SCULPTED) + { + // Set the desired discard level to the max low quality + desired_discard = mDesiredDiscardLevel = MAX_DISCARD_LEVEL - 1; + } + } + // Else if the texture was scaled down before + else if (mTextureState & ETextureStates::VRAM_SCALED_DOWN) + { + // If the desired discard level is below the max discard level and the boost on the texture is less then + // BOOST_SCULPTED + if (desired_discard < (MAX_DISCARD_LEVEL - 1) && mBoostLevel < LLGLTexture::BOOST_SCULPTED) + { + // Set the desired discard level to the next lower quality (by increaseing the value by 1) + desired_discard = mDesiredDiscardLevel += 1; + } + } + } + // [FIRE-35011] + */ } } @@ -2389,6 +2709,10 @@ void LLViewerFetchedTexture::clearCallbackEntryList() return; } +// [FIRE-35011] +// These following three methods may need to modified for use by the mUUIDDeleteMap, but right now they are +// not causing any issue. Just a future note. When on the Delete list, they are not active. +// [FIRE-35011] void LLViewerFetchedTexture::deleteCallbackEntry(const LLLoadedCallbackEntry::source_callback_list_t* callback_list) { if(mLoadedCallbackList.empty() || !callback_list) @@ -3015,6 +3339,19 @@ bool LLViewerLODTexture::isUpdateFrozen() void LLViewerLODTexture::processTextureStats() { LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Static saved settings allowing to enable/disable the new bias adjustment feature + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + if (use_new_bias_adjustments) + { + // If the handle memory overage returns true, then this method needs to exit early as it wants to delay loading more + // textures + if (handleMemoryOverageForProcessTextureStats()) + { + return; + } + } + // [FIRE-35011] updateVirtualSize(); bool did_downscale = false; @@ -3146,6 +3483,24 @@ bool LLViewerLODTexture::scaleDown() if (!mDownScalePending) { + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + if (sOverMemoryBudgetState.States.UseBias && use_new_bias_adjustments) + { + // Store the previous texture state + mPreviousTextureState = mTextureState; + // Flag that the texture was downscaled during low memory + mTextureState |= ETextureStates::VRAM_SCALED_DOWN; + } + else if (use_new_bias_adjustments) + { + // Store the previous texture state + mPreviousTextureState = mTextureState; + // Flag that the texture was downscaled during low memory + mTextureState |= ETextureStates::SCALED_DOWN; + } + // [FIRE-35011] mDownScalePending = true; gTextureList.mDownScaleQueue.push(this); } diff --git a/indra/newview/llviewertexture.h b/indra/newview/llviewertexture.h index bf890dbde7..5ead1fe880 100644 --- a/indra/newview/llviewertexture.h +++ b/indra/newview/llviewertexture.h @@ -185,6 +185,25 @@ class LLViewerTexture : public LLGLTexture typedef std::vector material_list_t; material_list_t mMaterialList; // reverse pointer pointing to LL::GLTF::Materials using this image as texture + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Create an enum bitfield for storing the various memory states + enum ETextureStates + { + NORMAL = 0, // Normal state + DELETED = 1, // Normal Delete + SCALED_DOWN = 2, // Normal Scaled Down + VRAM_OVERAGE_DELETED = 4, // Deleted during VRAM overage + VRAM_SCALED_DOWN = 8, // Scaled down during VRAM overage + RECOVERY_DELAY = 16 // Recovery delay after VRAM_OVERAGE_DELETED or VRAM_SCALED_DOWN is true + }; + + void setTextureState(ETextureStates newState); + mutable U8 mTextureState; // Bitfield which represents the texture's current state + mutable U8 mPreviousTextureState; // Bitfield which represents the texture's current state + F32 mDelayToNormalUseAfterOverBudget; // Time to wait for returning to normal texture adjustments for larger resolution requests after + // being over VRAM budget + // [FIRE-35011] + protected: void cleanup() ; void init(bool firstinit) ; @@ -226,6 +245,33 @@ class LLViewerTexture : public LLGLTexture static S32 sAuxCount; static LLFrameTimer sEvaluationTimer; static F32 sDesiredDiscardBias; + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + static F32 sPreviousDesiredDiscardBias; // Static value of the previous Desired Discard Bias (Used to determine if the desired discard bias is increasing, decreasing, or staying the same + static F32 sOverMemoryBudgetStartTime; // Static value stores the mCurrentTime when the viewer first went over budget of RAM (sDesiredDiscardBias > 1.0) + static F32 sOverMemoryBudgetEndTime; // Static value stores the mCurrentTime when the viewer first exists over budget of RAM (sDesiredDiscardBias == 1.0) + typedef union + { + U32 ClearState; + struct + { + U32 Normal : 1; + U32 LowSystemRAM : 1; + U32 LowVRAM : 1; + U32 PreviousLowSystemRam : 1; + U32 PrevouusLowVRAM : 1; + U32 Overage_High : 1; + U32 Overage_Low : 1; + U32 No_Overage : 1; + U32 NormalHoldBias : 1; + U32 UseBias : 1; + U32 IncreaseBias : 1; + U32 DecreaseBias : 1; + } States; + + } OverMemoryBudetStates_u; + + static OverMemoryBudetStates_u sOverMemoryBudgetState; // State of the over memory budget + // [FIRE-35011] static S32 sMaxSculptRez ; static U32 sMinLargeImageSize ; static U32 sMaxSmallImageSize ; @@ -354,6 +400,11 @@ class LLViewerFetchedTexture : public LLViewerTexture void destroyTexture() ; virtual void processTextureStats() ; + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // New behavior for handling low memory for the ProcessTextureStats method for both Fetch and LOD textures + // Returns true if the the + bool handleMemoryOverageForProcessTextureStats(); + bool needsAux() const { return mNeedsAux; } diff --git a/indra/newview/llviewertexturelist.cpp b/indra/newview/llviewertexturelist.cpp index c15ef711d5..e7d1685f1f 100644 --- a/indra/newview/llviewertexturelist.cpp +++ b/indra/newview/llviewertexturelist.cpp @@ -117,6 +117,10 @@ void LLViewerTextureList::doPreloadImages() llassert_always(mInitialized) ; llassert_always(mImageList.empty()) ; llassert_always(mUUIDMap.empty()) ; + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Clear out the mUUIDDeleteMap as well + llassert_always(mUUIDDeleteMap.empty()); + // [FIRE-35011] // Set the "missing asset" image LLViewerFetchedTexture::sMissingAssetImagep = LLViewerTextureManager::getFetchedTextureFromFile("missing_asset.tga", FTT_LOCAL_FILE, MIPMAP_NO, LLViewerFetchedTexture::BOOST_UI); @@ -366,7 +370,9 @@ void LLViewerTextureList::shutdown() mFastCacheList.clear(); mUUIDMap.clear(); - + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + mUUIDDeleteMap.clear(); // Clear the UUIDMap for delete textures + // [FIRE-35011] mImageList.clear(); mInitialized = false ; //prevent loading textures again. @@ -689,14 +695,60 @@ void LLViewerTextureList::findTexturesByID(const LLUUID &image_id, std::vectorsecond); iter++; } + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + // If we are using the new bias adjustments, then + if (use_new_bias_adjustments) + { + // Add the deleted images on to this list, + uuid_map_t::iterator del_iter = mUUIDDeleteMap.lower_bound(search_key); + while (del_iter != mUUIDDeleteMap.end() && del_iter->first.textureId == image_id) + { + // Set the normal map to have the same image as the delete map + mUUIDMap[del_iter->first] = del_iter->second; + // Add the deleted image to the Image List + mImageList.insert(del_iter->second); + // Set the flag that the image is on the list to try + mUUIDMap[del_iter->first]->setInImageList(true); + output.push_back(del_iter->second); + // Remove the deleted texture from the delete UUID Map + mUUIDDeleteMap.erase(del_iter->first); + del_iter++; + } + } + // [FIRE-35011] } LLViewerFetchedTexture *LLViewerTextureList::findImage(const LLTextureKey &search_key) { LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; - uuid_map_t::iterator iter = mUUIDMap.find(search_key); + uuid_map_t::iterator iter = mUUIDMap.find(search_key); if (iter == mUUIDMap.end()) + { + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // If the iterator reached the end, instead of returning null, try to see if the image exists on the deleted list + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + + // If the search_key exists on the delete map + if (mUUIDDeleteMap.count(search_key) == 1 && use_new_bias_adjustments) + { + // Set the normal map to have the same image as the delete map + mUUIDMap[search_key] = mUUIDDeleteMap[search_key]; + // Add the deleted image to the Image List + mImageList.insert(mUUIDMap[search_key]); + // Set the flag that the image is on the list to try + mUUIDMap[search_key]->setInImageList(true); + // Remove the deleted texture from the delete UUID Map + mUUIDDeleteMap.erase(search_key); + // And return the found image + return mUUIDMap[search_key]; + } + // Otherwise, return false as the image does not exist on either the normal or deleted lists + // [FIRE-35011] return NULL; + } return iter->second; } @@ -790,14 +842,22 @@ void LLViewerTextureList::addImage(LLViewerFetchedTexture *new_image, ETexListTy { LL_INFOS() << "Image with ID " << image_id << " already in list" << LL_ENDL; } + // + if (mUUIDDeleteMap.count(key) == 1) + { + // The key also exists on the delete list + // Remove the reference + mUUIDDeleteMap.erase(key); + } sNumImages++; addImageToList(new_image); mUUIDMap[key] = new_image; new_image->setTextureListType(tex_type); } - - + +// [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card +/* void LLViewerTextureList::deleteImage(LLViewerFetchedTexture *image) { LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; @@ -813,7 +873,58 @@ void LLViewerTextureList::deleteImage(LLViewerFetchedTexture *image) removeImageFromList(image); } } - +*/ +// Added new mapping for storing deleted textures (The deleting and creating new textures is what is killing the system. +void LLViewerTextureList::deleteImage(LLViewerFetchedTexture *image) +{ + LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; + if( image) + { + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + // Do the old method of deleteing the call backs if using the old method, but we want to skip that in the + // new process + if (image->hasCallbacks() && !use_new_bias_adjustments) + { + mCallbackList.erase(image); + } + LLTextureKey key(image->getID(), (ETexListType)image->getTextureListType()); + // Instead of deleting the object, what we want to do it move it over to the UUID Delete Map + if (use_new_bias_adjustments) + { + // Check to see if the key exists on the delete list first, if not, then + if (mUUIDDeleteMap.count(key) == 0) + { + // Add the image to the delete UUIDMap + mUUIDDeleteMap[key] = image; + } + else + { + // We should clean up the one that is about to be replaced. (Should not happed) + mUUIDDeleteMap[key] = image; + } + // Store the current texture state in the previous texture state + image->mPreviousTextureState = image->mTextureState; + // Set the texture state based upon if the system is running out of memory + // If we are running out of memory + if (LLViewerTexture::sOverMemoryBudgetState.States.UseBias) + { + // Set the texture state to VRAM_OVERAGE_DELETED + image->mTextureState = LLViewerTexture::ETextureStates::VRAM_OVERAGE_DELETED; + } + // Else, this is normal delete, + else + { + // So just set the texture state to the normal delete state + image->mTextureState = LLViewerTexture::ETextureStates::DELETED; + } + } + llverify(mUUIDMap.erase(key) == 1); + sNumImages--; + removeImageFromList(image); + } +} +// [FIRE-35011] /////////////////////////////////////////////////////////////////////////////// @@ -875,6 +986,30 @@ void LLViewerTextureList::updateImages(F32 max_time) } } + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + /* + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + + // Currently we are no longer deleting the Texture Memory at all, just forcing all images to scale down. + // We can possibly have an emergency clear for the map of deleted textures if we spend over 2 minutes at high memory. + // But this may just re-introduce the issue with the deleting textures as it is. + if (use_new_bias_adjustments && LLViewerTexture::sDesiredDiscardBias >= 4.0f && (LLViewerTexture::sCurrentTime - LLViewerTexture::sOverMemoryBudgetStartTime) > 120.0f) + { + // Need to purge any requests on the delete list + for (uuid_map_t::iterator iter = mUUIDDeleteMap.begin(); iter != mUUIDDeleteMap.end(); ++iter) + { + LLViewerFetchedTexture* imagep = iter->second; + if (imagep) + { + imagep->forceToDeleteRequest(); + } + } + mUUIDDeleteMap.clear(); // Clear the UUIDMap for delete textures + } + */ + // [FIRE-35011] + updateImagesUpdateStats(); } @@ -894,6 +1029,24 @@ void LLViewerTextureList::clearFetchingRequests() LLViewerFetchedTexture* imagep = *iter; imagep->forceToDeleteRequest() ; } + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + // Need to purge any requests on the delete list + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + // If we are using the new bias system + if (use_new_bias_adjustments) + { + // Iterator over all of the objects in the UUID Delete Map + for (uuid_map_t::iterator iter = mUUIDDeleteMap.begin(); iter != mUUIDDeleteMap.end(); ++iter) + { + LLViewerFetchedTexture* imagep = iter->second; + if (imagep) + { + imagep->forceToDeleteRequest(); + } + } + } + // [FIRE-35011] } extern bool gCubeSnapshot; @@ -902,13 +1055,14 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag { llassert(!gCubeSnapshot); - if (imagep->getBoostLevel() < LLViewerFetchedTexture::BOOST_HIGH) // don't bother checking face list for boosted textures + if (imagep->getBoostLevel() < LLViewerFetchedTexture::BOOST_HIGH) // don't bother checking face list for boosted textures { static LLCachedControl texture_scale_min(gSavedSettings, "TextureScaleMinAreaFactor", 0.04f); static LLCachedControl texture_scale_max(gSavedSettings, "TextureScaleMaxAreaFactor", 25.f); - F32 max_vsize = 0.f; - bool on_screen = false; + F32 max_vsize = 0.f; + bool on_screen = false; + F32 original_max_size = imagep->getMaxVirtualSize(); U32 face_count = 0; @@ -917,7 +1071,12 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag F32 bias = llclamp(max_discard - 2.f, 1.f, LLViewerTexture::sDesiredDiscardBias); // convert bias into a vsize scaler - bias = (F32) llroundf(powf(4, bias - 1.f)); + bias = (F32)llroundf(powf(4, bias - 1.f)); + + if (LLViewerTexture::sOverMemoryBudgetState.States.UseBias == 0) + { + bias = 1.0f; + } LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; for (U32 i = 0; i < LLRender::NUM_TEXTURE_CHANNELS; ++i) @@ -936,7 +1095,7 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag { // only call calcPixelArea at most once every 10 frames for a given face // this helps eliminate redundant calls to calcPixelArea for faces that have multiple textures // assigned to them, such as is the case with GLTF materials or Blinn-Phong materials - face->mInFrustum = face->calcPixelArea(cos_angle_to_view_dir, radius); + face->mInFrustum = face->calcPixelArea(cos_angle_to_view_dir, radius); face->mLastTextureUpdate = gFrameCount; } @@ -951,11 +1110,11 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag // // Maximum usage examples: huge chunk of terrain repeats texture // TODO: make this work with the GLTF texture transforms - S32 te_offset = face->getTEOffset(); // offset is -1 if not inited - LLViewerObject* objp = face->getViewerObject(); - const LLTextureEntry* te = (te_offset < 0 || te_offset >= objp->getNumTEs()) ? nullptr : objp->getTE(te_offset); - F32 min_scale = te ? llmin(fabsf(te->getScaleS()), fabsf(te->getScaleT())) : 1.f; - min_scale = llclamp(min_scale * min_scale, texture_scale_min(), texture_scale_max()); + S32 te_offset = face->getTEOffset(); // offset is -1 if not inited + LLViewerObject* objp = face->getViewerObject(); + const LLTextureEntry* te = (te_offset < 0 || te_offset >= objp->getNumTEs()) ? nullptr : objp->getTE(te_offset); + F32 min_scale = te ? llmin(fabsf(te->getScaleS()), fabsf(te->getScaleT())) : 1.f; + min_scale = llclamp(min_scale * min_scale, texture_scale_min(), texture_scale_max()); vsize /= min_scale; // apply bias to offscreen faces all the time, but only to onscreen faces when bias is large @@ -968,7 +1127,7 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag if (face->mInFrustum) { static LLCachedControl texture_camera_boost(gSavedSettings, "TextureCameraBoost", 8.f); - vsize *= llmax(face->mImportanceToCamera*texture_camera_boost, 1.f); + vsize *= llmax(face->mImportanceToCamera * texture_camera_boost, 1.f); } max_vsize = llmax(max_vsize, vsize); @@ -987,14 +1146,41 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag // this is an alternative to decaying mMaxVirtualSize over time // that keeps textures from continously downrezzing and uprezzing in the background - if (LLViewerTexture::sDesiredDiscardBias > 1.5f || - (!on_screen && LLViewerTexture::sDesiredDiscardBias > 1.f)) + if (LLViewerTexture::sDesiredDiscardBias > 1.5f || (!on_screen && LLViewerTexture::sDesiredDiscardBias > 1.f) && LLViewerTexture::sOverMemoryBudgetState.States.UseBias) { imagep->mMaxVirtualSize = 0.f; } } imagep->addTextureStats(max_vsize); + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + if (imagep->getMaxVirtualSize() < original_max_size) + { + //imagep->addTextureStats(original_max_size); + if (LLViewerTexture::sOverMemoryBudgetState.States.UseBias) + { + // Flag the image that it was downsized + // imagep->setTextureState(LLViewerTexture::ETextureStates::VRAM_SCALED_DOWN); + } + else + { + //imagep->setTextureState(LLViewerTexture::ETextureStates::SCALED_DOWN); + } + } + else + { + // If we are in a low state, just reset the max virtual size back to the original value + if (LLViewerTexture::sOverMemoryBudgetState.States.UseBias) + { + //imagep->mMaxVirtualSize = original_max_size; + } + else if (imagep->mTextureState & LLViewerTexture::ETextureStates::SCALED_DOWN && + LLViewerTexture::sOverMemoryBudgetState.States.NormalHoldBias) + { + //imagep->mMaxVirtualSize = original_max_size; + } + } + // [FIRE-35011] } #if 0 @@ -1032,7 +1218,14 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag return; } } - else + // + else if (imagep->mTextureState & LLViewerTexture::ETextureStates::VRAM_SCALED_DOWN && imagep->getMaxVirtualSize() == 0.0f && + imagep->getType() == LLViewerTexture::LOD_TEXTURE && imagep->getBoostLevel() == LLViewerTexture::BOOST_NONE) + { + // We are going to delete the image after scaling it down, so do it now. + //deleteImage(imagep); + } + // { // still referenced outside of image list, reset timer imagep->getLastReferencedTimer()->reset(); @@ -1214,11 +1407,37 @@ F32 LLViewerTextureList::updateImagesFetchTextures(F32 max_time) //update MIN_UPDATE_COUNT or 5% of other textures, whichever is greater update_count = llmax((U32) MIN_UPDATE_COUNT, (U32) mUUIDMap.size()/20); - if (LLViewerTexture::sDesiredDiscardBias > 1.f) + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + //if (LLViewerTexture::sDesiredDiscardBias > 1.f) + //{ + // // we are over memory target, update more agresively + // update_count = (S32)(update_count * LLViewerTexture::sDesiredDiscardBias); + //} + // Saved Settings bool flag used to enable the newer system (Can be removed but good for testing and comparing) + static LLCachedControl use_new_bias_adjustments(gSavedSettings, "FSTextureNewBiasAdjustments", false); + // If using the new bias adjustments, then perform the new actions + if (use_new_bias_adjustments) + { + // If the desired discard bias is greater then 1 and is increasing or stable, if descreasing, use the normal about of texture + // updates + if (LLViewerTexture::sDesiredDiscardBias > 1.f && + LLViewerTexture::sDesiredDiscardBias >= LLViewerTexture::sPreviousDesiredDiscardBias && + LLViewerTexture::sOverMemoryBudgetState.States.UseBias && !LLViewerTexture::sOverMemoryBudgetState.States.NormalHoldBias) + { + // we are over memory target, update more agresively + update_count = (S32)(update_count * LLViewerTexture::sDesiredDiscardBias); + } + } + else { - // we are over memory target, update more agresively - update_count = (S32)(update_count * LLViewerTexture::sDesiredDiscardBias); + // Else, use the standard method + if (LLViewerTexture::sDesiredDiscardBias > 1.f) + { + // we are over memory target, update more agresively + update_count = (S32)(update_count * LLViewerTexture::sDesiredDiscardBias); + } } + // [FIRE-35011] update_count = llmin(update_count, (U32) mUUIDMap.size()); { // copy entries out of UUID map to avoid iterator invalidation from deletion inside updateImageDecodeProiroty or updateFetch below diff --git a/indra/newview/llviewertexturelist.h b/indra/newview/llviewertexturelist.h index 3816c32500..7b37f1ba90 100644 --- a/indra/newview/llviewertexturelist.h +++ b/indra/newview/llviewertexturelist.h @@ -238,6 +238,9 @@ class LLViewerTextureList private: typedef std::map< LLTextureKey, LLPointer > uuid_map_t; uuid_map_t mUUIDMap; + // [FIRE-35011] Weird patterned extreme CPU usage when using more than 6gb vram on 10g card + uuid_map_t mUUIDDeleteMap; // New storage for Delete Textures (they now don't go away fully), fixing manuy performace issues + // [FIRE-35011] LLTextureKey mLastUpdateKey; image_list_t mImageList; diff --git a/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml b/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml index 9d4cd92403..3a038e0e6d 100644 --- a/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml +++ b/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml @@ -1230,6 +1230,19 @@ If you do not understand the distinction then leave this control alone." width="100"> Second(s) + + + +