From 81707dcc74fd8a66349c80c0c47254de110b9f5d Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Thu, 25 Apr 2024 22:09:42 -0700 Subject: [PATCH 1/5] [Tiered Caching] Expose a dynamic setting to disable/enable disk cache (#13373) * [Tiered Caching] Expose a dynamic setting to disable/enable disk cache Signed-off-by: Sagar Upadhyaya * Putting tiered cache settings behind feature flag Signed-off-by: Sagar Upadhyaya * Adding a changelog Signed-off-by: Sagar Upadhyaya * Addressing Sorabh's comments Signed-off-by: Sagar Upadhyaya * Putting new setting behind feature flag Signed-off-by: Sagar Upadhyaya --------- Signed-off-by: Sagar Upadhyaya Signed-off-by: Sagar <99425694+sgup432@users.noreply.github.com> --- CHANGELOG.md | 1 + .../TieredSpilloverCacheIT.java | 115 ++++++++++++++++++ .../common/tier/TieredSpilloverCache.java | 74 +++++++---- .../tier/TieredSpilloverCachePlugin.java | 13 +- .../tier/TieredSpilloverCacheSettings.java | 22 +++- .../tier/TieredSpilloverCachePluginTests.java | 11 +- .../tier/TieredSpilloverCacheTests.java | 115 ++++++++++++++++++ 7 files changed, 323 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7adf9a752c846..ee2bb839db324 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) - [Streaming Indexing] Ensure support of the new transport by security plugin ([#13174](https://github.com/opensearch-project/OpenSearch/pull/13174)) - Add cluster setting to dynamically configure the buckets for filter rewrite optimization. ([#13179](https://github.com/opensearch-project/OpenSearch/pull/13179)) +- [Tiered Caching] Add a dynamic setting to disable/enable disk cache. ([#13373](https://github.com/opensearch-project/OpenSearch/pull/13373)) - [Remote Store] Add capability of doing refresh as determined by the translog ([#12992](https://github.com/opensearch-project/OpenSearch/pull/12992)) - Batch mode for async fetching shard information in GatewayAllocator for unassigned shards ([#8746](https://github.com/opensearch-project/OpenSearch/pull/8746)) diff --git a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java index 977a66c53b7e8..cbe16a690c104 100644 --- a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java +++ b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java @@ -425,6 +425,121 @@ public void testWithExplicitCacheClear() throws Exception { }, 1, TimeUnit.SECONDS); } + public void testWithDynamicDiskCacheSetting() throws Exception { + int onHeapCacheSizeInBytes = 10; // Keep it low so that all items are cached onto disk. + internalCluster().startNode( + Settings.builder() + .put(defaultSettings(onHeapCacheSizeInBytes + "b")) + .put(INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), new TimeValue(1)) + .build() + ); + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) + ) + .get() + ); + // Update took time policy to zero so that all entries are eligible to be cached on disk. + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put( + TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), + new TimeValue(0, TimeUnit.MILLISECONDS) + ) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + int numberOfIndexedItems = randomIntBetween(5, 10); + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + indexRandom(true, client.prepareIndex("index").setSource("k" + iterator, "hello" + iterator)); + } + ensureSearchable("index"); + refreshAndWaitForReplication(); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); + long perQuerySizeInCacheInBytes = -1; + // Step 1: Hit some queries. We will see misses and queries will be cached(onto disk cache) for subsequent + // requests. + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + if (perQuerySizeInCacheInBytes == -1) { + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + perQuerySizeInCacheInBytes = requestCacheStats.getMemorySizeInBytes(); + } + assertSearchResponse(resp); + } + + RequestCacheStats requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems * perQuerySizeInCacheInBytes, requestCacheStats.getMemorySizeInBytes()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(0, requestCacheStats.getHitCount()); + assertEquals(0, requestCacheStats.getEvictions()); + + // Step 2: Hit same queries again. We will see hits now. + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems * perQuerySizeInCacheInBytes, requestCacheStats.getMemorySizeInBytes()); + assertEquals(numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(numberOfIndexedItems, requestCacheStats.getHitCount()); + assertEquals(0, requestCacheStats.getEvictions()); + long lastKnownHitCount = requestCacheStats.getHitCount(); + long lastKnownMissCount = requestCacheStats.getMissCount(); + + // Step 3: Turn off disk cache now. And hit same queries again. We should not see hits now as all queries + // were cached onto disk cache. + updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put(TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), false) + .build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + + for (int iterator = 0; iterator < numberOfIndexedItems; iterator++) { + SearchResponse resp = client.prepareSearch("index") + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k" + iterator, "hello" + iterator)) + .get(); + assertSearchResponse(resp); + } + requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(numberOfIndexedItems * perQuerySizeInCacheInBytes, requestCacheStats.getMemorySizeInBytes()); // + // Still shows disk cache entries as explicit clear or invalidation is required to clean up disk cache. + assertEquals(lastKnownMissCount + numberOfIndexedItems, requestCacheStats.getMissCount()); + assertEquals(0, lastKnownHitCount - requestCacheStats.getHitCount()); // No new hits being seen. + lastKnownMissCount = requestCacheStats.getMissCount(); + lastKnownHitCount = requestCacheStats.getHitCount(); + + // Step 4: Invalidate entries via refresh. + // Explicit refresh would invalidate cache entries. + refreshAndWaitForReplication(); + assertBusy(() -> { + // Explicit refresh should clear up cache entries + assertTrue(getRequestCacheStats(client, "index").getMemorySizeInBytes() == 0); + }, 1, TimeUnit.SECONDS); + requestCacheStats = getRequestCacheStats(client, "index"); + assertEquals(0, lastKnownMissCount - requestCacheStats.getMissCount()); + assertEquals(0, lastKnownHitCount - requestCacheStats.getHitCount()); + } + private RequestCacheStats getRequestCacheStats(Client client, String indexName) { return client.admin().indices().prepareStats(indexName).setRequestCache(true).get().getTotal().getRequestCache(); } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index ae3d9f1dbcf62..34f17df751d7a 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -27,8 +27,9 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; @@ -38,6 +39,8 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; + /** * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap * and the items evicted from on heap cache are moved to disk based cache. If disk based cache also gets full, @@ -67,12 +70,14 @@ public class TieredSpilloverCache implements ICache { /** * Maintains caching tiers in ascending order of cache latency. */ - private final List> cacheList; + private final Map, Boolean> caches; private final List> policies; TieredSpilloverCache(Builder builder) { Objects.requireNonNull(builder.onHeapCacheFactory, "onHeap cache builder can't be null"); Objects.requireNonNull(builder.diskCacheFactory, "disk cache builder can't be null"); + Objects.requireNonNull(builder.cacheConfig, "cache config can't be null"); + Objects.requireNonNull(builder.cacheConfig.getClusterSettings(), "cluster settings can't be null"); this.removalListener = Objects.requireNonNull(builder.removalListener, "Removal listener can't be null"); this.onHeapCache = builder.onHeapCacheFactory.create( @@ -80,7 +85,8 @@ public class TieredSpilloverCache implements ICache { @Override public void onRemoval(RemovalNotification, V> notification) { try (ReleasableLock ignore = writeLock.acquire()) { - if (SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) + if (caches.get(diskCache) + && SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason()) && evaluatePolicies(notification.getValue())) { diskCache.put(notification.getKey(), notification.getValue()); } else { @@ -103,9 +109,15 @@ && evaluatePolicies(notification.getValue())) { ); this.diskCache = builder.diskCacheFactory.create(builder.cacheConfig, builder.cacheType, builder.cacheFactories); - this.cacheList = Arrays.asList(onHeapCache, diskCache); + Boolean isDiskCacheEnabled = DISK_CACHE_ENABLED_SETTING_MAP.get(builder.cacheType).get(builder.cacheConfig.getSettings()); + LinkedHashMap, Boolean> cacheListMap = new LinkedHashMap<>(); + cacheListMap.put(onHeapCache, true); + cacheListMap.put(diskCache, isDiskCacheEnabled); + this.caches = Collections.synchronizedMap(cacheListMap); this.dimensionNames = builder.cacheConfig.getDimensionNames(); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list + builder.cacheConfig.getClusterSettings() + .addSettingsUpdateConsumer(DISK_CACHE_ENABLED_SETTING_MAP.get(builder.cacheType), this::enableDisableDiskCache); } // Package private for testing @@ -118,6 +130,13 @@ ICache getDiskCache() { return diskCache; } + // Package private for testing. + void enableDisableDiskCache(Boolean isDiskCacheEnabled) { + // When disk cache is disabled, we are not clearing up the disk cache entries yet as that should be part of + // separate cache/clear API. + this.caches.put(diskCache, isDiskCacheEnabled); + } + @Override public V get(ICacheKey key) { return getValueFromTieredCache().apply(key); @@ -132,7 +151,6 @@ public void put(ICacheKey key, V value) { @Override public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> loader) throws Exception { - V cacheValue = getValueFromTieredCache().apply(key); if (cacheValue == null) { // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. @@ -151,10 +169,10 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> public void invalidate(ICacheKey key) { // We are trying to invalidate the key from all caches though it would be present in only of them. // Doing this as we don't know where it is located. We could do a get from both and check that, but what will - // also trigger a hit/miss listener event, so ignoring it for now. + // also count hits/misses stats, so ignoring it for now. try (ReleasableLock ignore = writeLock.acquire()) { - for (ICache cache : cacheList) { - cache.invalidate(key); + for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + cacheEntry.getKey().invalidate(key); } } } @@ -162,8 +180,8 @@ public void invalidate(ICacheKey key) { @Override public void invalidateAll() { try (ReleasableLock ignore = writeLock.acquire()) { - for (ICache cache : cacheList) { - cache.invalidateAll(); + for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + cacheEntry.getKey().invalidateAll(); } } } @@ -175,15 +193,21 @@ public void invalidateAll() { @SuppressWarnings({ "unchecked" }) @Override public Iterable> keys() { - Iterable>[] iterables = (Iterable>[]) new Iterable[] { onHeapCache.keys(), diskCache.keys() }; - return new ConcatenatedIterables>(iterables); + List>> iterableList = new ArrayList<>(); + for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + iterableList.add(cacheEntry.getKey().keys()); + } + Iterable>[] iterables = (Iterable>[]) iterableList.toArray(new Iterable[0]); + return new ConcatenatedIterables<>(iterables); } @Override public long count() { long count = 0; - for (ICache cache : cacheList) { - count += cache.count(); + for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + // Count for all the tiers irrespective of whether they are enabled or not. As eventually + // this will turn to zero once cache is cleared up either via invalidation or manually. + count += cacheEntry.getKey().count(); } return count; } @@ -191,16 +215,17 @@ public long count() { @Override public void refresh() { try (ReleasableLock ignore = writeLock.acquire()) { - for (ICache cache : cacheList) { - cache.refresh(); + for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + cacheEntry.getKey().refresh(); } } } @Override public void close() throws IOException { - for (ICache cache : cacheList) { - cache.close(); + for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + // Close all the caches here irrespective of whether they are enabled or not. + cacheEntry.getKey().close(); } } @@ -212,13 +237,12 @@ public ImmutableCacheStatsHolder stats() { private Function, V> getValueFromTieredCache() { return key -> { try (ReleasableLock ignore = readLock.acquire()) { - for (ICache cache : cacheList) { - V value = cache.get(key); - if (value != null) { - // update hit stats - return value; - } else { - // update miss stats + for (Map.Entry, Boolean> cacheEntry : caches.entrySet()) { + if (cacheEntry.getValue()) { + V value = cacheEntry.getKey().get(key); + if (value != null) { + return value; + } } } } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java index dfd40199d859e..1c10e51630460 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -11,6 +11,8 @@ import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.ICache; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.CachePlugin; import org.opensearch.plugins.Plugin; @@ -18,6 +20,7 @@ import java.util.List; import java.util.Map; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; /** @@ -30,10 +33,15 @@ public class TieredSpilloverCachePlugin extends Plugin implements CachePlugin { */ public static final String TIERED_CACHE_SPILLOVER_PLUGIN_NAME = "tieredSpilloverCachePlugin"; + private final Settings settings; + /** * Default constructor + * @param settings settings */ - public TieredSpilloverCachePlugin() {} + public TieredSpilloverCachePlugin(Settings settings) { + this.settings = settings; + } @Override public Map getCacheFactoryMap() { @@ -54,6 +62,9 @@ public List> getSettings() { TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); settingList.add(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(cacheType)); + if (FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings)) { + settingList.add(DISK_CACHE_ENABLED_SETTING_MAP.get(cacheType)); + } } return settingList; } diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java index b89e8c517a351..e8e441d6bd3a6 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -42,6 +42,14 @@ public class TieredSpilloverCacheSettings { (key) -> Setting.simpleString(key, "", NodeScope) ); + /** + * Setting to disable/enable disk cache dynamically. + */ + public static final Setting.AffixSetting TIERED_SPILLOVER_DISK_CACHE_SETTING = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.enabled", + (key) -> Setting.boolSetting(key, true, NodeScope, Setting.Property.Dynamic) + ); + /** * Setting defining the minimum took time for a query to be allowed into the disk cache. */ @@ -63,17 +71,29 @@ public class TieredSpilloverCacheSettings { public static final Map> TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; /** - * Fetches concrete took time policy settings. + * Stores disk cache enabled settings for various cache types as these are dynamic so that can be registered and + * retrieved accordingly. + */ + public static final Map> DISK_CACHE_ENABLED_SETTING_MAP; + + /** + * Fetches concrete took time policy and disk cache settings. */ static { Map> concreteTookTimePolicySettingMap = new HashMap<>(); + Map> diskCacheSettingMap = new HashMap<>(); for (CacheType cacheType : CacheType.values()) { concreteTookTimePolicySettingMap.put( cacheType, TIERED_SPILLOVER_DISK_TOOK_TIME_THRESHOLD.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) ); + diskCacheSettingMap.put( + cacheType, + TIERED_SPILLOVER_DISK_CACHE_SETTING.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); } TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP = concreteTookTimePolicySettingMap; + DISK_CACHE_ENABLED_SETTING_MAP = diskCacheSettingMap; } /** diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java index 1172a48e97c6a..4a96ffe2069ec 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java @@ -9,6 +9,8 @@ package org.opensearch.cache.common.tier; import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchTestCase; import java.util.Map; @@ -16,9 +18,16 @@ public class TieredSpilloverCachePluginTests extends OpenSearchTestCase { public void testGetCacheFactoryMap() { - TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin(); + TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin(Settings.EMPTY); Map map = tieredSpilloverCachePlugin.getCacheFactoryMap(); assertNotNull(map.get(TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME)); assertEquals(TieredSpilloverCachePlugin.TIERED_CACHE_SPILLOVER_PLUGIN_NAME, tieredSpilloverCachePlugin.getName()); } + + public void testGetSettingsWithFeatureFlagOn() { + TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin( + Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE_SETTING.getKey(), true).build() + ); + assertFalse(tieredSpilloverCachePlugin.getSettings().isEmpty()); + } } diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index bf9f8fd22d793..1ecb63414dc68 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -42,6 +42,7 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; @@ -56,6 +57,7 @@ public void setup() { Settings settings = Settings.EMPTY; clusterSettings = new ClusterSettings(settings, new HashSet<>()); clusterSettings.registerSetting(TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP.get(CacheType.INDICES_REQUEST_CACHE)); + clusterSettings.registerSetting(DISK_CACHE_ENABLED_SETTING_MAP.get(CacheType.INDICES_REQUEST_CACHE)); } public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception { @@ -302,6 +304,7 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { ) .build() ) + .setClusterSettings(clusterSettings) .build(); ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(0, diskCacheSize); @@ -777,6 +780,7 @@ public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exceptio ) .build() ) + .setClusterSettings(clusterSettings) .setDimensionNames(dimensionNames) .build(); TieredSpilloverCache tieredSpilloverCache = new TieredSpilloverCache.Builder() @@ -1008,6 +1012,116 @@ public void testMinimumThresholdSettingValue() throws Exception { assertEquals(validDuration, concreteSetting.get(validSettings)); } + public void testPutWithDiskCacheDisabledSetting() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .put(DISK_CACHE_ENABLED_SETTING_MAP.get(CacheType.INDICES_REQUEST_CACHE).getKey(), false) + .build(), + 0 + ); + + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); // Create more items than onHeap cache. + for (int iter = 0; iter < numOfItems1; iter++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + LoadAwareCacheLoader, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } + ICache onHeapCache = tieredSpilloverCache.getOnHeapCache(); + ICache diskCache = tieredSpilloverCache.getDiskCache(); + assertEquals(onHeapCacheSize, onHeapCache.count()); + assertEquals(0, diskCache.count()); // Disk cache shouldn't have anything considering it is disabled. + assertEquals(numOfItems1 - onHeapCacheSize, removalListener.evictionsMetric.count()); + } + + public void testGetPutAndInvalidateWithDiskCacheDisabled() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize - 1); // Create more items than onHeap + // cache to cause spillover. + for (int iter = 0; iter < numOfItems1; iter++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + LoadAwareCacheLoader, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } + ICache onHeapCache = tieredSpilloverCache.getOnHeapCache(); + ICache diskCache = tieredSpilloverCache.getDiskCache(); + List> diskCacheKeys = new ArrayList<>(); + tieredSpilloverCache.getDiskCache().keys().forEach(diskCacheKeys::add); + long actualDiskCacheCount = diskCache.count(); + long actualTieredCacheCount = tieredSpilloverCache.count(); + assertEquals(onHeapCacheSize, onHeapCache.count()); + assertEquals(numOfItems1 - onHeapCacheSize, actualDiskCacheCount); + assertEquals(0, removalListener.evictionsMetric.count()); + assertEquals(numOfItems1, actualTieredCacheCount); + for (ICacheKey diskKey : diskCacheKeys) { + assertNotNull(tieredSpilloverCache.get(diskKey)); + } + + tieredSpilloverCache.enableDisableDiskCache(false); // Disable disk cache now. + int numOfItems2 = totalSize - numOfItems1; + for (int iter = 0; iter < numOfItems2; iter++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + LoadAwareCacheLoader, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } + for (ICacheKey diskKey : diskCacheKeys) { + assertNull(tieredSpilloverCache.get(diskKey)); // Considering disk cache is disabled, we shouldn't find + // these keys. + } + assertEquals(onHeapCacheSize, onHeapCache.count()); // Should remain same. + assertEquals(0, diskCache.count() - actualDiskCacheCount); // Considering it is disabled now, shouldn't cache + // any more items. + assertEquals(numOfItems2, removalListener.evictionsMetric.count()); // Considering onHeap cache was already + // full, we should all existing onHeap entries being evicted. + assertEquals(0, tieredSpilloverCache.count() - actualTieredCacheCount); // Count still returns disk cache + // entries count as they haven't been cleared yet. + long lastKnownTieredCacheEntriesCount = tieredSpilloverCache.count(); + + // Clear up disk cache keys. + for (ICacheKey diskKey : diskCacheKeys) { + tieredSpilloverCache.invalidate(diskKey); + } + assertEquals(0, diskCache.count()); + assertEquals(lastKnownTieredCacheEntriesCount - diskCacheKeys.size(), tieredSpilloverCache.count()); + + tieredSpilloverCache.invalidateAll(); // Clear up all the keys. + assertEquals(0, tieredSpilloverCache.count()); + } + private List getMockDimensions() { List dims = new ArrayList<>(); for (String dimensionName : dimensionNames) { @@ -1121,6 +1235,7 @@ private TieredSpilloverCache intializeTieredSpilloverCache( .put(settings) .build() ) + .setClusterSettings(clusterSettings) .build(); ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(diskDeliberateDelay, diskCacheSize); From f7984974963d745ca3cb88460251792acd7b326d Mon Sep 17 00:00:00 2001 From: Kiran Prakash Date: Fri, 26 Apr 2024 11:52:31 -0700 Subject: [PATCH 2/5] [Tiered Caching] Make Indices Request Cache Stale Key Mgmt Threshold setting dynamic (#12941) * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update ClusterSettings.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * spotless Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * some refactoring Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * address existing tests Signed-off-by: Kiran Prakash * UTs Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * ITs Signed-off-by: Kiran Prakash * spotless Signed-off-by: Kiran Prakash * refactor Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * Update CHANGELOG.md Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * resolve conflicts Signed-off-by: Kiran Prakash * address code comments Signed-off-by: Kiran Prakash * address code comments Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * rename tests Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash * resolve conflicts Signed-off-by: Kiran Prakash * Update IndicesRequestCache.java Signed-off-by: Kiran Prakash * code comments Signed-off-by: Kiran Prakash * Update IndicesRequestCacheIT.java Signed-off-by: Kiran Prakash --------- Signed-off-by: Kiran Prakash --- CHANGELOG.md | 1 + .../indices/IndicesRequestCacheIT.java | 844 +++++++++++++++--- .../common/settings/ClusterSettings.java | 2 + .../indices/IndicesRequestCache.java | 41 +- .../opensearch/indices/IndicesService.java | 3 +- .../indices/IndicesRequestCacheTests.java | 34 + 6 files changed, 785 insertions(+), 140 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee2bb839db324..0a351a1061e15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add cluster setting to dynamically configure the buckets for filter rewrite optimization. ([#13179](https://github.com/opensearch-project/OpenSearch/pull/13179)) - [Tiered Caching] Add a dynamic setting to disable/enable disk cache. ([#13373](https://github.com/opensearch-project/OpenSearch/pull/13373)) - [Remote Store] Add capability of doing refresh as determined by the translog ([#12992](https://github.com/opensearch-project/OpenSearch/pull/12992)) +- [Tiered caching] Make Indices Request Cache Stale Key Mgmt Threshold setting dynamic ([#12941](https://github.com/opensearch-project/OpenSearch/pull/12941)) - Batch mode for async fetching shard information in GatewayAllocator for unassigned shards ([#8746](https://github.com/opensearch-project/OpenSearch/pull/8746)) ### Dependencies diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index ec5637cec6485..ea064a3a3212d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -34,7 +34,11 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.indices.alias.Alias; +import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; @@ -42,13 +46,16 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; @@ -59,7 +66,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.TimeUnit; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.indices.IndicesRequestCache.INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING; +import static org.opensearch.indices.IndicesService.INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; @@ -69,6 +81,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false) public class IndicesRequestCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public IndicesRequestCacheIT(Settings settings) { super(settings); @@ -92,25 +105,31 @@ protected boolean useRandomReplicationStrategy() { // One of the primary purposes of the query cache is to cache aggs results public void testCacheAggs() throws Exception { Client client = client(); + String index = "index"; assertAcked( client.admin() .indices() - .prepareCreate("index") + .prepareCreate(index) .setMapping("f", "type=date") - .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + ) .get() ); indexRandom( true, - client.prepareIndex("index").setSource("f", "2014-03-10T00:00:00.000Z"), - client.prepareIndex("index").setSource("f", "2014-05-13T00:00:00.000Z") + client.prepareIndex(index).setSource("f", "2014-03-10T00:00:00.000Z"), + client.prepareIndex(index).setSource("f", "2014-05-13T00:00:00.000Z") ); - ensureSearchable("index"); + ensureSearchable(index); // This is not a random example: serialization with time zones writes shared strings // which used to not work well with the query cache because of the handles stream output // see #9500 - final SearchResponse r1 = client.prepareSearch("index") + final SearchResponse r1 = client.prepareSearch(index) .setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation( @@ -124,12 +143,12 @@ public void testCacheAggs() throws Exception { // The cached is actually used assertThat( - client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), + client.admin().indices().prepareStats(index).setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0L) ); for (int i = 0; i < 10; ++i) { - final SearchResponse r2 = client.prepareSearch("index") + final SearchResponse r2 = client.prepareSearch(index) .setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation( @@ -156,10 +175,11 @@ public void testCacheAggs() throws Exception { public void testQueryRewrite() throws Exception { Client client = client(); + String index = "index"; assertAcked( client.admin() .indices() - .prepareCreate("index") + .prepareCreate(index) .setMapping("s", "type=date") .setSettings( Settings.builder() @@ -172,28 +192,28 @@ public void testQueryRewrite() throws Exception { ); indexRandom( true, - client.prepareIndex("index").setId("1").setRouting("1").setSource("s", "2016-03-19"), - client.prepareIndex("index").setId("2").setRouting("1").setSource("s", "2016-03-20"), - client.prepareIndex("index").setId("3").setRouting("1").setSource("s", "2016-03-21"), - client.prepareIndex("index").setId("4").setRouting("2").setSource("s", "2016-03-22"), - client.prepareIndex("index").setId("5").setRouting("2").setSource("s", "2016-03-23"), - client.prepareIndex("index").setId("6").setRouting("2").setSource("s", "2016-03-24"), - client.prepareIndex("index").setId("7").setRouting("3").setSource("s", "2016-03-25"), - client.prepareIndex("index").setId("8").setRouting("3").setSource("s", "2016-03-26"), - client.prepareIndex("index").setId("9").setRouting("3").setSource("s", "2016-03-27") + client.prepareIndex(index).setId("1").setRouting("1").setSource("s", "2016-03-19"), + client.prepareIndex(index).setId("2").setRouting("1").setSource("s", "2016-03-20"), + client.prepareIndex(index).setId("3").setRouting("1").setSource("s", "2016-03-21"), + client.prepareIndex(index).setId("4").setRouting("2").setSource("s", "2016-03-22"), + client.prepareIndex(index).setId("5").setRouting("2").setSource("s", "2016-03-23"), + client.prepareIndex(index).setId("6").setRouting("2").setSource("s", "2016-03-24"), + client.prepareIndex(index).setId("7").setRouting("3").setSource("s", "2016-03-25"), + client.prepareIndex(index).setId("8").setRouting("3").setSource("s", "2016-03-26"), + client.prepareIndex(index).setId("9").setRouting("3").setSource("s", "2016-03-27") ); - ensureSearchable("index"); - assertCacheState(client, "index", 0, 0); + ensureSearchable(index); + assertCacheState(client, index, 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refreshAndWaitForReplication(); - ensureSearchable("index"); + ensureSearchable(index); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); - final SearchResponse r1 = client.prepareSearch("index") + final SearchResponse r1 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) @@ -202,9 +222,9 @@ public void testQueryRewrite() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 0, 5); + assertCacheState(client, index, 0, 5); - final SearchResponse r2 = client.prepareSearch("index") + final SearchResponse r2 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) @@ -212,9 +232,9 @@ public void testQueryRewrite() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 3, 7); + assertCacheState(client, index, 3, 7); - final SearchResponse r3 = client.prepareSearch("index") + final SearchResponse r3 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")) @@ -222,15 +242,16 @@ public void testQueryRewrite() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 6, 9); + assertCacheState(client, index, 6, 9); } public void testQueryRewriteMissingValues() throws Exception { Client client = client(); + String index = "index"; assertAcked( client.admin() .indices() - .prepareCreate("index") + .prepareCreate(index) .setMapping("s", "type=date") .setSettings( Settings.builder() @@ -242,61 +263,62 @@ public void testQueryRewriteMissingValues() throws Exception { ); indexRandom( true, - client.prepareIndex("index").setId("1").setSource("s", "2016-03-19"), - client.prepareIndex("index").setId("2").setSource("s", "2016-03-20"), - client.prepareIndex("index").setId("3").setSource("s", "2016-03-21"), - client.prepareIndex("index").setId("4").setSource("s", "2016-03-22"), - client.prepareIndex("index").setId("5").setSource("s", "2016-03-23"), - client.prepareIndex("index").setId("6").setSource("s", "2016-03-24"), - client.prepareIndex("index").setId("7").setSource("other", "value"), - client.prepareIndex("index").setId("8").setSource("s", "2016-03-26"), - client.prepareIndex("index").setId("9").setSource("s", "2016-03-27") + client.prepareIndex(index).setId("1").setSource("s", "2016-03-19"), + client.prepareIndex(index).setId("2").setSource("s", "2016-03-20"), + client.prepareIndex(index).setId("3").setSource("s", "2016-03-21"), + client.prepareIndex(index).setId("4").setSource("s", "2016-03-22"), + client.prepareIndex(index).setId("5").setSource("s", "2016-03-23"), + client.prepareIndex(index).setId("6").setSource("s", "2016-03-24"), + client.prepareIndex(index).setId("7").setSource("other", "value"), + client.prepareIndex(index).setId("8").setSource("s", "2016-03-26"), + client.prepareIndex(index).setId("9").setSource("s", "2016-03-27") ); - ensureSearchable("index"); - assertCacheState(client, "index", 0, 0); + ensureSearchable(index); + assertCacheState(client, index, 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refreshAndWaitForReplication(); - ensureSearchable("index"); + ensureSearchable(index); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); - final SearchResponse r1 = client.prepareSearch("index") + final SearchResponse r1 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) .get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(8L)); - assertCacheState(client, "index", 0, 1); + assertCacheState(client, index, 0, 1); - final SearchResponse r2 = client.prepareSearch("index") + final SearchResponse r2 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) .get(); OpenSearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits().value, equalTo(8L)); - assertCacheState(client, "index", 1, 1); + assertCacheState(client, index, 1, 1); - final SearchResponse r3 = client.prepareSearch("index") + final SearchResponse r3 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-28")) .get(); OpenSearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits().value, equalTo(8L)); - assertCacheState(client, "index", 2, 1); + assertCacheState(client, index, 2, 1); } public void testQueryRewriteDates() throws Exception { Client client = client(); + String index = "index"; assertAcked( client.admin() .indices() - .prepareCreate("index") + .prepareCreate(index) .setMapping("d", "type=date") .setSettings( Settings.builder() @@ -308,28 +330,28 @@ public void testQueryRewriteDates() throws Exception { ); indexRandom( true, - client.prepareIndex("index").setId("1").setSource("d", "2014-01-01T00:00:00"), - client.prepareIndex("index").setId("2").setSource("d", "2014-02-01T00:00:00"), - client.prepareIndex("index").setId("3").setSource("d", "2014-03-01T00:00:00"), - client.prepareIndex("index").setId("4").setSource("d", "2014-04-01T00:00:00"), - client.prepareIndex("index").setId("5").setSource("d", "2014-05-01T00:00:00"), - client.prepareIndex("index").setId("6").setSource("d", "2014-06-01T00:00:00"), - client.prepareIndex("index").setId("7").setSource("d", "2014-07-01T00:00:00"), - client.prepareIndex("index").setId("8").setSource("d", "2014-08-01T00:00:00"), - client.prepareIndex("index").setId("9").setSource("d", "2014-09-01T00:00:00") + client.prepareIndex(index).setId("1").setSource("d", "2014-01-01T00:00:00"), + client.prepareIndex(index).setId("2").setSource("d", "2014-02-01T00:00:00"), + client.prepareIndex(index).setId("3").setSource("d", "2014-03-01T00:00:00"), + client.prepareIndex(index).setId("4").setSource("d", "2014-04-01T00:00:00"), + client.prepareIndex(index).setId("5").setSource("d", "2014-05-01T00:00:00"), + client.prepareIndex(index).setId("6").setSource("d", "2014-06-01T00:00:00"), + client.prepareIndex(index).setId("7").setSource("d", "2014-07-01T00:00:00"), + client.prepareIndex(index).setId("8").setSource("d", "2014-08-01T00:00:00"), + client.prepareIndex(index).setId("9").setSource("d", "2014-09-01T00:00:00") ); - ensureSearchable("index"); - assertCacheState(client, "index", 0, 0); + ensureSearchable(index); + assertCacheState(client, index, 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refreshAndWaitForReplication(); - ensureSearchable("index"); + ensureSearchable(index); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); - final SearchResponse r1 = client.prepareSearch("index") + final SearchResponse r1 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) @@ -338,9 +360,9 @@ public void testQueryRewriteDates() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(9L)); - assertCacheState(client, "index", 0, 1); + assertCacheState(client, index, 0, 1); - final SearchResponse r2 = client.prepareSearch("index") + final SearchResponse r2 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) @@ -348,9 +370,9 @@ public void testQueryRewriteDates() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits().value, equalTo(9L)); - assertCacheState(client, "index", 1, 1); + assertCacheState(client, index, 1, 1); - final SearchResponse r3 = client.prepareSearch("index") + final SearchResponse r3 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) @@ -358,7 +380,7 @@ public void testQueryRewriteDates() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits().value, equalTo(9L)); - assertCacheState(client, "index", 2, 1); + assertCacheState(client, index, 2, 1); } public void testQueryRewriteDatesWithNow() throws Exception { @@ -449,53 +471,54 @@ public void testCanCache() throws Exception { .put("index.number_of_routing_shards", 2) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .build(); - assertAcked(client.admin().indices().prepareCreate("index").setMapping("s", "type=date").setSettings(settings).get()); + String index = "index"; + assertAcked(client.admin().indices().prepareCreate(index).setMapping("s", "type=date").setSettings(settings).get()); indexRandom( true, - client.prepareIndex("index").setId("1").setRouting("1").setSource("s", "2016-03-19"), - client.prepareIndex("index").setId("2").setRouting("1").setSource("s", "2016-03-20"), - client.prepareIndex("index").setId("3").setRouting("1").setSource("s", "2016-03-21"), - client.prepareIndex("index").setId("4").setRouting("2").setSource("s", "2016-03-22"), - client.prepareIndex("index").setId("5").setRouting("2").setSource("s", "2016-03-23"), - client.prepareIndex("index").setId("6").setRouting("2").setSource("s", "2016-03-24"), - client.prepareIndex("index").setId("7").setRouting("3").setSource("s", "2016-03-25"), - client.prepareIndex("index").setId("8").setRouting("3").setSource("s", "2016-03-26"), - client.prepareIndex("index").setId("9").setRouting("3").setSource("s", "2016-03-27") + client.prepareIndex(index).setId("1").setRouting("1").setSource("s", "2016-03-19"), + client.prepareIndex(index).setId("2").setRouting("1").setSource("s", "2016-03-20"), + client.prepareIndex(index).setId("3").setRouting("1").setSource("s", "2016-03-21"), + client.prepareIndex(index).setId("4").setRouting("2").setSource("s", "2016-03-22"), + client.prepareIndex(index).setId("5").setRouting("2").setSource("s", "2016-03-23"), + client.prepareIndex(index).setId("6").setRouting("2").setSource("s", "2016-03-24"), + client.prepareIndex(index).setId("7").setRouting("3").setSource("s", "2016-03-25"), + client.prepareIndex(index).setId("8").setRouting("3").setSource("s", "2016-03-26"), + client.prepareIndex(index).setId("9").setRouting("3").setSource("s", "2016-03-27") ); - ensureSearchable("index"); - assertCacheState(client, "index", 0, 0); + ensureSearchable(index); + assertCacheState(client, index, 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refreshAndWaitForReplication(); - ensureSearchable("index"); + ensureSearchable(index); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); // If size > 0 we should no cache by default - final SearchResponse r1 = client.prepareSearch("index") + final SearchResponse r1 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(1) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-19").lte("2016-03-25")) .get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache - final SearchResponse r2 = client.prepareSearch("index") + final SearchResponse r2 = client.prepareSearch(index) .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) .get(); OpenSearchAssertions.assertAllSuccessful(r2); assertThat(r2.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); // If search type is DFS_QUERY_THEN_FETCH we should not cache even if // the cache flag is explicitly set on the request - final SearchResponse r3 = client.prepareSearch("index") + final SearchResponse r3 = client.prepareSearch(index) .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) .setSize(0) .setRequestCache(true) @@ -503,10 +526,10 @@ public void testCanCache() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r3); assertThat(r3.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); // If the request has an non-filter aggregation containing now we should not cache - final SearchResponse r5 = client.prepareSearch("index") + final SearchResponse r5 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setRequestCache(true) @@ -515,10 +538,10 @@ public void testCanCache() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r5); assertThat(r5.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); // If size > 1 and cache flag is set on the request we should cache - final SearchResponse r6 = client.prepareSearch("index") + final SearchResponse r6 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(1) .setRequestCache(true) @@ -526,10 +549,10 @@ public void testCanCache() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r6); assertThat(r6.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 0, 2); + assertCacheState(client, index, 0, 2); // If the request has a filter aggregation containing now we should cache since it gets rewritten - final SearchResponse r4 = client.prepareSearch("index") + final SearchResponse r4 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setRequestCache(true) @@ -538,7 +561,7 @@ public void testCanCache() throws Exception { .get(); OpenSearchAssertions.assertAllSuccessful(r4); assertThat(r4.getHits().getTotalHits().value, equalTo(7L)); - assertCacheState(client, "index", 0, 4); + assertCacheState(client, index, 0, 4); } public void testCacheWithFilteredAlias() throws InterruptedException { @@ -548,61 +571,63 @@ public void testCacheWithFilteredAlias() throws InterruptedException { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .build(); + String index = "index"; assertAcked( client.admin() .indices() - .prepareCreate("index") + .prepareCreate(index) .setMapping("created_at", "type=date") .setSettings(settings) .addAlias(new Alias("last_week").filter(QueryBuilders.rangeQuery("created_at").gte("now-7d/d"))) .get() ); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); - client.prepareIndex("index").setId("1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); + client.prepareIndex(index).setId("1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); + ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refreshAndWaitForReplication(); - indexRandomForConcurrentSearch("index"); + indexRandomForConcurrentSearch(index); - assertCacheState(client, "index", 0, 0); + assertCacheState(client, index, 0, 0); - SearchResponse r1 = client.prepareSearch("index") + SearchResponse r1 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")) .get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); - assertCacheState(client, "index", 0, 1); + assertCacheState(client, index, 0, 1); - r1 = client.prepareSearch("index") + r1 = client.prepareSearch(index) .setSearchType(SearchType.QUERY_THEN_FETCH) .setSize(0) .setQuery(QueryBuilders.rangeQuery("created_at").gte("now-7d/d")) .get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); - assertCacheState(client, "index", 1, 1); + assertCacheState(client, index, 1, 1); r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); - assertCacheState(client, "index", 1, 2); + assertCacheState(client, index, 1, 2); r1 = client.prepareSearch("last_week").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get(); OpenSearchAssertions.assertAllSuccessful(r1); assertThat(r1.getHits().getTotalHits().value, equalTo(1L)); - assertCacheState(client, "index", 2, 2); + assertCacheState(client, index, 2, 2); } public void testProfileDisableCache() throws Exception { Client client = client(); + String index = "index"; assertAcked( client.admin() .indices() - .prepareCreate("index") + .prepareCreate(index) .setMapping("k", "type=keyword") .setSettings( Settings.builder() @@ -612,14 +637,14 @@ public void testProfileDisableCache() throws Exception { ) .get() ); - indexRandom(true, client.prepareIndex("index").setSource("k", "hello")); - ensureSearchable("index"); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); int expectedHits = 0; int expectedMisses = 0; for (int i = 0; i < 5; i++) { boolean profile = i % 2 == 0; - SearchResponse resp = client.prepareSearch("index") + SearchResponse resp = client.prepareSearch(index) .setRequestCache(true) .setProfile(profile) .setQuery(QueryBuilders.termQuery("k", "hello")) @@ -634,16 +659,17 @@ public void testProfileDisableCache() throws Exception { expectedHits++; } } - assertCacheState(client, "index", expectedHits, expectedMisses); + assertCacheState(client, index, expectedHits, expectedMisses); } } public void testCacheWithInvalidation() throws Exception { Client client = client(); + String index = "index"; assertAcked( client.admin() .indices() - .prepareCreate("index") + .prepareCreate(index) .setMapping("k", "type=keyword") .setSettings( Settings.builder() @@ -654,38 +680,581 @@ public void testCacheWithInvalidation() throws Exception { ) .get() ); - indexRandom(true, client.prepareIndex("index").setSource("k", "hello")); - ensureSearchable("index"); - SearchResponse resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); + SearchResponse resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); assertSearchResponse(resp); OpenSearchAssertions.assertAllSuccessful(resp); assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); - assertCacheState(client, "index", 0, 1); + assertCacheState(client, index, 0, 1); // Index but don't refresh - indexRandom(false, client.prepareIndex("index").setSource("k", "hello2")); - resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + indexRandom(false, client.prepareIndex(index).setSource("k", "hello2")); + resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); assertSearchResponse(resp); // Should expect hit as here as refresh didn't happen - assertCacheState(client, "index", 1, 1); + assertCacheState(client, index, 1, 1); // Explicit refresh would invalidate cache refreshAndWaitForReplication(); // Hit same query again - resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); assertSearchResponse(resp); // Should expect miss as key has changed due to change in IndexReader.CacheKey (due to refresh) - assertCacheState(client, "index", 1, 2); + assertCacheState(client, index, 1, 2); + } + + // calling cache clear api, when staleness threshold is lower than staleness, it should clean the stale keys from cache + public void testCacheClearAPIRemovesStaleKeysWhenStalenessThresholdIsLow() throws Exception { + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + // setting intentionally high to avoid cache cleaner interfering + TimeValue.timeValueMillis(300) + ) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(index2); + client.admin().indices().clearCache(clearIndicesCacheRequest).actionGet(); + + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + } + + // when staleness threshold is lower than staleness, it should clean the stale keys from cache + public void testStaleKeysCleanupWithLowThreshold() throws Exception { + int cacheCleanIntervalInMillis = 1; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + // force refresh so that it creates 1 stale key + flushAndRefresh(index2); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + } + + // when staleness threshold is equal to staleness, it should clean the stale keys from cache + public void testCacheCleanupOnEqualStalenessAndThreshold() throws Exception { + int cacheCleanIntervalInMillis = 1; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.33) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + // force refresh so that it creates 1 stale key + flushAndRefresh(index2); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // when staleness threshold is higher than staleness, it should NOT clean the cache + public void testCacheCleanupSkipsWithHighStalenessThreshold() throws Exception { + int cacheCleanIntervalInMillis = 1; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + // force refresh so that it creates 1 stale key + flushAndRefresh(index2); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should NOT have cleaned up the stale key from index 2 + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // when staleness threshold is explicitly set to 0, cache cleaner regularly cleans up stale keys. + public void testCacheCleanupOnZeroStalenessThreshold() throws Exception { + int cacheCleanIntervalInMillis = 50; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create 10 index1 cache entries + for (int i = 1; i <= 10; i++) { + long cacheSizeBefore = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + createCacheEntry(client, index1, "hello" + i); + assertCacheState(client, index1, 0, i); + long cacheSizeAfter = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(cacheSizeAfter > cacheSizeBefore); + } + + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + // force refresh so that it creates 1 stale key + flushAndRefresh(index2); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // when staleness threshold is not explicitly set, cache cleaner regularly cleans up stale keys + public void testStaleKeysRemovalWithoutExplicitThreshold() throws Exception { + int cacheCleanIntervalInMillis = 1; + String node = internalCluster().startNode( + Settings.builder() + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + String index1 = "index1"; + String index2 = "index2"; + Client client = client(node); + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + // force refresh so that it creates 1 stale key + flushAndRefresh(index2); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // when cache cleaner interval setting is not set, cache cleaner is configured appropriately with the fall-back setting + public void testCacheCleanupWithDefaultSettings() throws Exception { + int cacheCleanIntervalInMillis = 1; + String node = internalCluster().startNode( + Settings.builder().put(INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY, TimeValue.timeValueMillis(cacheCleanIntervalInMillis)) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + // force refresh so that it creates 1 stale key + flushAndRefresh(index2); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // staleness threshold updates flows through to the cache cleaner + public void testDynamicStalenessThresholdUpdate() throws Exception { + int cacheCleanIntervalInMillis = 1; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + assertTrue(getRequestCacheStats(client, index1).getMemorySizeInBytes() > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > 0); + + // force refresh so that it creates 1 stale key + flushAndRefresh(index2); + assertBusy(() -> { + // cache cleaner should NOT have cleaned up the stale key from index 2 + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + + // Update indices.requests.cache.cleanup.staleness_threshold to "10%" + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), 0.10)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertBusy(() -> { + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should NOT have cleaned from index 1 + assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // staleness threshold dynamic updates should throw exceptions on invalid input + public void testInvalidStalenessThresholdUpdateThrowsException() throws Exception { + int cacheCleanIntervalInMillis = 1; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index1 = "index1"; + setupIndex(client, index1); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + assertTrue(getRequestCacheStats(client, index1).getMemorySizeInBytes() > 0); + + // Update indices.requests.cache.cleanup.staleness_threshold to "10%" with illegal argument + assertThrows("Ratio should be in [0-1.0]", IllegalArgumentException.class, () -> { + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder().put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 10) + ); + client().admin().cluster().updateSettings(updateSettingsRequest).actionGet(); + }); + + // everything else should continue to work fine later on. + // force refresh so that it creates 1 stale key + flushAndRefresh(index1); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should NOT have cleaned from index 1 + assertEquals(0, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // closing the Index after caching will clean up from Indices Request Cache + public void testCacheClearanceAfterIndexClosure() throws Exception { + int cacheCleanIntervalInMillis = 100; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index = "index"; + setupIndex(client, index); + + // create first cache entry in index + createCacheEntry(client, index, "hello"); + assertCacheState(client, index, 0, 1); + assertTrue(getRequestCacheStats(client, index).getMemorySizeInBytes() > 0); + assertTrue(getNodeCacheStats(client).getMemorySizeInBytes() > 0); + + // close index + assertAcked(client.admin().indices().prepareClose(index)); + // request cache stats cannot be access since Index should be closed + try { + getRequestCacheStats(client, index); + } catch (Exception e) { + assert (e instanceof IndexClosedException); + } + // sleep until cache cleaner would have cleaned up the stale key from index + assertBusy(() -> { + // cache cleaner should have cleaned up the stale keys from index + assertFalse(getNodeCacheStats(client).getMemorySizeInBytes() > 0); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // deleting the Index after caching will clean up from Indices Request Cache + public void testCacheCleanupAfterIndexDeletion() throws Exception { + int cacheCleanIntervalInMillis = 100; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index = "index"; + setupIndex(client, index); + + // create first cache entry in index + createCacheEntry(client, index, "hello"); + assertCacheState(client, index, 0, 1); + assertTrue(getRequestCacheStats(client, index).getMemorySizeInBytes() > 0); + assertTrue(getNodeCacheStats(client).getMemorySizeInBytes() > 0); + + // delete index + assertAcked(client.admin().indices().prepareDelete(index)); + // request cache stats cannot be access since Index should be deleted + try { + getRequestCacheStats(client, index); + } catch (Exception e) { + assert (e instanceof IndexNotFoundException); + } + + // sleep until cache cleaner would have cleaned up the stale key from index + assertBusy(() -> { + // cache cleaner should have cleaned up the stale keys from index + assertFalse(getNodeCacheStats(client).getMemorySizeInBytes() > 0); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + // when staleness threshold is lower than staleness, it should clean the cache from all indices having stale keys + public void testStaleKeysCleanupWithMultipleIndices() throws Exception { + int cacheCleanIntervalInMillis = 300; + String node = internalCluster().startNode( + Settings.builder() + .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10) + .put( + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, + TimeValue.timeValueMillis(cacheCleanIntervalInMillis) + ) + ); + Client client = client(node); + String index1 = "index1"; + String index2 = "index2"; + setupIndex(client, index1); + setupIndex(client, index2); + + // create first cache entry in index1 + createCacheEntry(client, index1, "hello"); + assertCacheState(client, index1, 0, 1); + long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(memorySizeForIndex1 > 0); + + // create second cache entry in index1 + createCacheEntry(client, index1, "there"); + assertCacheState(client, index1, 0, 2); + long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes(); + assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1); + + // create first cache entry in index2 + createCacheEntry(client, index2, "hello"); + assertCacheState(client, index2, 0, 1); + assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0); + + // force refresh index 1 so that it creates 2 stale keys + flushAndRefresh(index1); + // create another cache entry in index 1, this should not be cleaned up. + createCacheEntry(client, index1, "hello"); + // record the size of this entry + long memorySizeOfLatestEntryForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes() - finalMemorySizeForIndex1; + // force refresh index 2 so that it creates 1 stale key + flushAndRefresh(index2); + // sleep until cache cleaner would have cleaned up the stale key from index 2 + assertBusy(() -> { + // cache cleaner should have cleaned up the stale key from index 2 + assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes()); + // cache cleaner should have only cleaned up the stale entities + assertEquals(memorySizeOfLatestEntryForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes()); + }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS); + } + + private void setupIndex(Client client, String index) throws Exception { + assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .get() + ); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + indexRandom(true, client.prepareIndex(index).setSource("k", "there")); + ensureSearchable(index); + } + + private void createCacheEntry(Client client, String index, String value) { + SearchResponse resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", value)).get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); } private static void assertCacheState(Client client, String index, long expectedHits, long expectedMisses) { - RequestCacheStats requestCacheStats = client.admin() - .indices() - .prepareStats(index) - .setRequestCache(true) - .get() - .getTotal() - .getRequestCache(); + RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); // Check the hit count and miss count together so if they are not // correct we can see both values assertEquals( @@ -695,4 +1264,17 @@ private static void assertCacheState(Client client, String index, long expectedH } + private static RequestCacheStats getRequestCacheStats(Client client, String index) { + return client.admin().indices().prepareStats(index).setRequestCache(true).get().getTotal().getRequestCache(); + } + + private static RequestCacheStats getNodeCacheStats(Client client) { + NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().execute().actionGet(); + for (NodeStats stat : stats.getNodes()) { + if (stat.getNode().isDataNode()) { + return stat.getIndices().getRequestCache(); + } + } + return null; + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index ded844b3a7f18..4a5a45eb1a17a 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -497,6 +497,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY, IndicesRequestCache.INDICES_CACHE_QUERY_SIZE, IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE, + IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING, + IndicesRequestCache.INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING, HunspellService.HUNSPELL_LAZY_LOAD, HunspellService.HUNSPELL_IGNORE_CASE, HunspellService.HUNSPELL_DICTIONARY_OPTIONS, diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 039e14a031f3f..105239b2c351b 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -112,6 +112,10 @@ public final class IndicesRequestCache implements RemovalListener INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting( "index.requests.cache.enable", true, @@ -128,15 +132,16 @@ public final class IndicesRequestCache implements RemovalListener INDICES_REQUEST_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting( - "indices.requests.cache.cleanup.interval", + public static final Setting INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING = Setting.positiveTimeSetting( + INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY, INDICES_CACHE_CLEAN_INTERVAL_SETTING, Property.NodeScope ); public static final Setting INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING = new Setting<>( - "indices.requests.cache.cleanup.staleness_threshold", + INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, "0%", IndicesRequestCache::validateStalenessSetting, + Property.Dynamic, Property.NodeScope ); @@ -146,6 +151,7 @@ public final class IndicesRequestCache implements RemovalListener cache; + private final ClusterService clusterService; private final Function> cacheEntityLookup; // pkg-private for testing final IndicesRequestCacheCleanupManager cacheCleanupManager; @@ -167,10 +173,13 @@ public final class IndicesRequestCache implements RemovalListener, BytesReference> weigher = (k, v) -> k.ramBytesUsed(k.key.ramBytesUsed()) + v.ramBytesUsed(); this.cacheCleanupManager = new IndicesRequestCacheCleanupManager( threadPool, - INDICES_REQUEST_CACHE_CLEAN_INTERVAL_SETTING.get(settings), + INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING.get(settings), getStalenessThreshold(settings) ); this.cacheEntityLookup = cacheEntityFunction; + this.clusterService = clusterService; + this.clusterService.getClusterSettings() + .addSettingsUpdateConsumer(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING, this::setStalenessThreshold); this.cache = cacheService.createCache( new CacheConfig.Builder().setSettings(settings) .setWeigher(weigher) @@ -208,6 +217,11 @@ private double getStalenessThreshold(Settings settings) { return RatioValue.parseRatioValue(threshold).getAsRatio(); } + // pkg-private for testing + void setStalenessThreshold(String threshold) { + this.cacheCleanupManager.updateStalenessThreshold(RatioValue.parseRatioValue(threshold).getAsRatio()); + } + void clear(CacheEntity entity) { cacheCleanupManager.enqueueCleanupKey(new CleanupKey(entity, null)); cacheCleanupManager.forceCleanCache(); @@ -274,7 +288,6 @@ BytesReference getOrCompute( } else { cacheEntity.onHit(); } - return value; } @@ -473,7 +486,7 @@ class IndicesRequestCacheCleanupManager implements Closeable { private final Set keysToClean; private final ConcurrentMap> cleanupKeyToCountMap; private final AtomicInteger staleKeysCount; - private final double stalenessThreshold; + private volatile double stalenessThreshold; private final IndicesRequestCacheCleaner cacheCleaner; IndicesRequestCacheCleanupManager(ThreadPool threadpool, TimeValue cleanInterval, double stalenessThreshold) { @@ -485,6 +498,18 @@ class IndicesRequestCacheCleanupManager implements Closeable { threadpool.schedule(cacheCleaner, cleanInterval, ThreadPool.Names.SAME); } + void updateStalenessThreshold(double stalenessThreshold) { + double oldStalenessThreshold = this.stalenessThreshold; + this.stalenessThreshold = stalenessThreshold; + if (logger.isDebugEnabled()) { + logger.debug( + "Staleness threshold for indices request cache changed to {} from {}", + this.stalenessThreshold, + oldStalenessThreshold + ); + } + } + /** * Enqueue cleanup key. * @@ -508,7 +533,7 @@ void enqueueCleanupKey(CleanupKey cleanupKey) { * @param cleanupKey the CleanupKey to be updated in the map */ private void updateStaleCountOnCacheInsert(CleanupKey cleanupKey) { - if (stalenessThreshold == 0.0 || cleanupKey.entity == null) { + if (cleanupKey.entity == null) { return; } IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); @@ -596,7 +621,7 @@ private void updateStaleCountOnEntryRemoval(CleanupKey cleanupKey, RemovalNotifi * @param cleanupKey the CleanupKey that has been marked for cleanup */ private void incrementStaleKeysCount(CleanupKey cleanupKey) { - if (stalenessThreshold == 0.0 || cleanupKey.entity == null) { + if (cleanupKey.entity == null) { return; } IndexShard indexShard = (IndexShard) cleanupKey.entity.getCacheIdentity(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index fd7d897a0e99c..251be8a990055 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -216,10 +216,11 @@ public class IndicesService extends AbstractLifecycleComponent IndicesClusterStateService.AllocatedIndices, IndexService.ShardStoreDeleter { private static final Logger logger = LogManager.getLogger(IndicesService.class); + public static final String INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY = "indices.cache.cleanup_interval"; public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting( - "indices.cache.cleanup_interval", + INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY, TimeValue.timeValueMinutes(1), Property.NodeScope ); diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index 051acfe9d085a..71eb30fca2385 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -906,6 +906,40 @@ public void testClosingIndexWipesStats() throws Exception { IOUtils.close(secondReader); } + public void testCacheCleanupBasedOnStaleThreshold_thresholdUpdate() throws Exception { + threadPool = getThreadPool(); + Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "51%").build(); + cache = getIndicesRequestCache(settings); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + + // Get 2 entries into the cache + cache.getOrCompute(getEntity(indexShard), getLoader(reader), reader, getTermBytes()); + cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); + assertEquals(2, cache.count()); + + // Close the reader, to be enqueued for cleanup + // 1 out of 2 keys ie 50% are now stale. + reader.close(); + // cache count should not be affected + assertEquals(2, cache.count()); + + // clean cache with 51% staleness threshold + cache.cacheCleanupManager.cleanCache(); + // cleanup should have been ignored + assertEquals(2, cache.count()); + + cache.setStalenessThreshold("49%"); + // clean cache with 49% staleness threshold + cache.cacheCleanupManager.cleanCache(); + // cleanup should NOT have been ignored + assertEquals(1, cache.count()); + + IOUtils.close(secondReader); + } + public void testEviction() throws Exception { final ByteSizeValue size; { From 207bbad6bfaffde580a8425f88627c3638547087 Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Sat, 27 Apr 2024 00:47:39 -0400 Subject: [PATCH 3/5] Switch to macos-13 runner for precommit and assemble github actions due to macos-latest is now arm64 (#13412) * Switch to macos-13 for precommit and assemble due to macos-latest is now arm64 Signed-off-by: Peter Zhu * Update changelog Signed-off-by: Peter Zhu * Add action Signed-off-by: Peter Zhu * Add action Signed-off-by: Peter Zhu * more changes Signed-off-by: Peter Zhu * Remove colima start command as action already start it Signed-off-by: Peter Zhu --------- Signed-off-by: Peter Zhu --- .github/workflows/assemble.yml | 7 ++----- .github/workflows/precommit.yml | 2 +- CHANGELOG.md | 1 + 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml index 382105364c048..d18170e9ea6b7 100644 --- a/.github/workflows/assemble.yml +++ b/.github/workflows/assemble.yml @@ -8,7 +8,7 @@ jobs: strategy: matrix: java: [ 11, 17, 21 ] - os: [ubuntu-latest, windows-latest, macos-latest] + os: [ubuntu-latest, windows-latest, macos-13] steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} @@ -18,10 +18,7 @@ jobs: distribution: temurin - name: Setup docker (missing on MacOS) if: runner.os == 'macos' - run: | - brew install docker - colima start - sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock + uses: douglascamata/setup-docker-macos-action@main - name: Run Gradle (assemble) run: | ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 800aacec98516..95ca49ac9cb43 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -8,7 +8,7 @@ jobs: strategy: matrix: java: [ 11, 17, 21 ] - os: [ubuntu-latest, windows-latest, macos-latest] + os: [ubuntu-latest, windows-latest, macos-13] steps: - uses: actions/checkout@v4 - name: Set up JDK ${{ matrix.java }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a351a1061e15..90be6b21dfe9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Ignoring unavailable shards during search request execution with ignore_available parameter ([#13298](https://github.com/opensearch-project/OpenSearch/pull/13298)) - Refactoring globMatch using simpleMatchWithNormalizedStrings from Regex ([#13104](https://github.com/opensearch-project/OpenSearch/pull/13104)) - [BWC and API enforcement] Reconsider the breaking changes check policy to detect breaking changes against released versions ([#13292](https://github.com/opensearch-project/OpenSearch/pull/13292)) +- Switch to macos-13 runner for precommit and assemble github actions due to macos-latest is now arm64 ([#13412](https://github.com/opensearch-project/OpenSearch/pull/13412)) ### Deprecated From f84d28d4371dbd00356cccc01366d87d21814f36 Mon Sep 17 00:00:00 2001 From: peteralfonsi Date: Sun, 28 Apr 2024 08:15:18 -0700 Subject: [PATCH 4/5] [Tiered Caching] Gate CacheStatsHolder logic behind FeatureFlags.PLUGGABLE_CACHE setting (#13238) Stats rework step 2 of 4 --------- Signed-off-by: Peter Alfonsi Co-authored-by: Peter Alfonsi --- CHANGELOG.md | 1 + .../cache/store/disk/EhcacheDiskCache.java | 4 +- .../common/cache/stats/CacheStatsHolder.java | 283 +--------------- .../cache/stats/DefaultCacheStatsHolder.java | 306 ++++++++++++++++++ .../cache/stats/NoopCacheStatsHolder.java | 68 ++++ .../cache/store/OpenSearchOnHeapCache.java | 13 +- .../indices/IndicesRequestCache.java | 7 - ...java => DefaultCacheStatsHolderTests.java} | 43 +-- .../stats/ImmutableCacheStatsHolderTests.java | 23 +- .../store/OpenSearchOnHeapCacheTests.java | 38 ++- .../indices/IndicesRequestCacheTests.java | 15 +- 11 files changed, 487 insertions(+), 314 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java create mode 100644 server/src/main/java/org/opensearch/common/cache/stats/NoopCacheStatsHolder.java rename server/src/test/java/org/opensearch/common/cache/stats/{CacheStatsHolderTests.java => DefaultCacheStatsHolderTests.java} (85%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90be6b21dfe9e..a7d3ebc7f65dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) - [Streaming Indexing] Ensure support of the new transport by security plugin ([#13174](https://github.com/opensearch-project/OpenSearch/pull/13174)) - Add cluster setting to dynamically configure the buckets for filter rewrite optimization. ([#13179](https://github.com/opensearch-project/OpenSearch/pull/13179)) +- [Tiered Caching] Gate new stats logic behind FeatureFlags.PLUGGABLE_CACHE ([#13238](https://github.com/opensearch-project/OpenSearch/pull/13238)) - [Tiered Caching] Add a dynamic setting to disable/enable disk cache. ([#13373](https://github.com/opensearch-project/OpenSearch/pull/13373)) - [Remote Store] Add capability of doing refresh as determined by the translog ([#12992](https://github.com/opensearch-project/OpenSearch/pull/12992)) - [Tiered caching] Make Indices Request Cache Stale Key Mgmt Threshold setting dynamic ([#12941](https://github.com/opensearch-project/OpenSearch/pull/12941)) diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 185d51732a116..eea13ce70ccb5 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -25,6 +25,7 @@ import org.opensearch.common.cache.serializer.ICacheKeySerializer; import org.opensearch.common.cache.serializer.Serializer; import org.opensearch.common.cache.stats.CacheStatsHolder; +import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.builders.ICacheBuilder; import org.opensearch.common.cache.store.config.CacheConfig; @@ -162,7 +163,8 @@ private EhcacheDiskCache(Builder builder) { this.ehCacheEventListener = new EhCacheEventListener(builder.getRemovalListener(), builder.getWeigher()); this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); List dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - this.cacheStatsHolder = new CacheStatsHolder(dimensionNames); + // If this cache is being used, FeatureFlags.PLUGGABLE_CACHE is already on, so we can always use the DefaultCacheStatsHolder. + this.cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); } @SuppressWarnings({ "rawtypes" }) diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java index a8b7c27ef9e79..a1cfb8d806af3 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java @@ -8,288 +8,31 @@ package org.opensearch.common.cache.stats; -import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Consumer; /** - * A class ICache implementations use to internally keep track of their stats across multiple dimensions. - * Not intended to be exposed outside the cache; for this, caches use getImmutableCacheStatsHolder() to create an immutable - * copy of the current state of the stats. - * Currently, in the IRC, the stats tracked in a CacheStatsHolder will not appear for empty shards that have had no cache - * operations done on them yet. This might be changed in the future, by exposing a method to add empty nodes to the - * tree in CacheStatsHolder in the ICache interface. - * - * @opensearch.experimental + * An interface extended by DefaultCacheStatsHolder and NoopCacheStatsHolder. */ -public class CacheStatsHolder { - - // The list of permitted dimensions. Should be ordered from "outermost" to "innermost", as you would like to - // aggregate them in an API response. - private final List dimensionNames; - // A tree structure based on dimension values, which stores stats values in its leaf nodes. - // Non-leaf nodes have stats matching the sum of their children. - // We use a tree structure, rather than a map with concatenated keys, to save on memory usage. If there are many leaf - // nodes that share a parent, that parent's dimension value will only be stored once, not many times. - private final Node statsRoot; - // To avoid sync problems, obtain a lock before creating or removing nodes in the stats tree. - // No lock is needed to edit stats on existing nodes. - private final Lock lock = new ReentrantLock(); - - public CacheStatsHolder(List dimensionNames) { - this.dimensionNames = Collections.unmodifiableList(dimensionNames); - this.statsRoot = new Node("", true); // The root node has the empty string as its dimension value - } - - public List getDimensionNames() { - return dimensionNames; - } - - // For all these increment functions, the dimensions list comes from the key, and contains all dimensions present in dimensionNames. - // The order has to match the order given in dimensionNames. - public void incrementHits(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementHits, true); - } - - public void incrementMisses(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementMisses, true); - } - - public void incrementEvictions(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementEvictions, true); - } - - public void incrementSizeInBytes(List dimensionValues, long amountBytes) { - internalIncrement(dimensionValues, (node) -> node.incrementSizeInBytes(amountBytes), true); - } - - // For decrements, we should not create nodes if they are absent. This protects us from erroneously decrementing values for keys - // which have been entirely deleted, for example in an async removal listener. - public void decrementSizeInBytes(List dimensionValues, long amountBytes) { - internalIncrement(dimensionValues, (node) -> node.decrementSizeInBytes(amountBytes), false); - } - - public void incrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementEntries, true); - } - - public void decrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, Node::decrementEntries, false); - } - - /** - * Reset number of entries and memory size when all keys leave the cache, but don't reset hit/miss/eviction numbers. - * This is in line with the behavior of the existing API when caches are cleared. - */ - public void reset() { - resetHelper(statsRoot); - } - - private void resetHelper(Node current) { - current.resetSizeAndEntries(); - for (Node child : current.children.values()) { - resetHelper(child); - } - } - - public long count() { - // Include this here so caches don't have to create an entire CacheStats object to run count(). - return statsRoot.getEntries(); - } - - private void internalIncrement(List dimensionValues, Consumer adder, boolean createNodesIfAbsent) { - assert dimensionValues.size() == dimensionNames.size(); - // First try to increment without creating nodes - boolean didIncrement = internalIncrementHelper(dimensionValues, statsRoot, 0, adder, false); - // If we failed to increment, because nodes had to be created, obtain the lock and run again while creating nodes if needed - if (!didIncrement && createNodesIfAbsent) { - try { - lock.lock(); - internalIncrementHelper(dimensionValues, statsRoot, 0, adder, true); - } finally { - lock.unlock(); - } - } - } - - /** - * Use the incrementer function to increment/decrement a value in the stats for a set of dimensions. - * If createNodesIfAbsent is true, and there is no stats for this set of dimensions, create one. - * Returns true if the increment was applied, false if not. - */ - private boolean internalIncrementHelper( - List dimensionValues, - Node node, - int depth, // Pass in the depth to avoid having to slice the list for each node. - Consumer adder, - boolean createNodesIfAbsent - ) { - if (depth == dimensionValues.size()) { - // This is the leaf node we are trying to reach - adder.accept(node); - return true; - } - - Node child = node.getChild(dimensionValues.get(depth)); - if (child == null) { - if (createNodesIfAbsent) { - boolean createMapInChild = depth < dimensionValues.size() - 1; - child = node.createChild(dimensionValues.get(depth), createMapInChild); - } else { - return false; - } - } - if (internalIncrementHelper(dimensionValues, child, depth + 1, adder, createNodesIfAbsent)) { - // Function returns true if the next node down was incremented - adder.accept(node); - return true; - } - return false; - } - - /** - * Produce an immutable version of these stats. - */ - public ImmutableCacheStatsHolder getImmutableCacheStatsHolder() { - return new ImmutableCacheStatsHolder(statsRoot.snapshot(), dimensionNames); - } - - public void removeDimensions(List dimensionValues) { - assert dimensionValues.size() == dimensionNames.size() : "Must specify a value for every dimension when removing from StatsHolder"; - // As we are removing nodes from the tree, obtain the lock - lock.lock(); - try { - removeDimensionsHelper(dimensionValues, statsRoot, 0); - } finally { - lock.unlock(); - } - } - - // Returns a CacheStatsCounterSnapshot object for the stats to decrement if the removal happened, null otherwise. - private ImmutableCacheStats removeDimensionsHelper(List dimensionValues, Node node, int depth) { - if (depth == dimensionValues.size()) { - // Pass up a snapshot of the original stats to avoid issues when the original is decremented by other fn invocations - return node.getImmutableStats(); - } - Node child = node.getChild(dimensionValues.get(depth)); - if (child == null) { - return null; - } - ImmutableCacheStats statsToDecrement = removeDimensionsHelper(dimensionValues, child, depth + 1); - if (statsToDecrement != null) { - // The removal took place, decrement values and remove this node from its parent if it's now empty - node.decrementBySnapshot(statsToDecrement); - if (child.getChildren().isEmpty()) { - node.children.remove(child.getDimensionValue()); - } - } - return statsToDecrement; - } - - // pkg-private for testing - Node getStatsRoot() { - return statsRoot; - } - - static class Node { - private final String dimensionValue; - // Map from dimensionValue to the DimensionNode for that dimension value. - final Map children; - // The stats for this node. If a leaf node, corresponds to the stats for this combination of dimensions; if not, - // contains the sum of its children's stats. - private CacheStats stats; - - // Used for leaf nodes to avoid allocating many unnecessary maps - private static final Map EMPTY_CHILDREN_MAP = new HashMap<>(); - - Node(String dimensionValue, boolean createChildrenMap) { - this.dimensionValue = dimensionValue; - if (createChildrenMap) { - this.children = new ConcurrentHashMap<>(); - } else { - this.children = EMPTY_CHILDREN_MAP; - } - this.stats = new CacheStats(); - } - - public String getDimensionValue() { - return dimensionValue; - } - - protected Map getChildren() { - // We can safely iterate over ConcurrentHashMap without worrying about thread issues. - return children; - } - - // Functions for modifying internal CacheStatsCounter without callers having to be aware of CacheStatsCounter - - void incrementHits() { - this.stats.incrementHits(); - } - - void incrementMisses() { - this.stats.incrementMisses(); - } - - void incrementEvictions() { - this.stats.incrementEvictions(); - } - - void incrementSizeInBytes(long amountBytes) { - this.stats.incrementSizeInBytes(amountBytes); - } +public interface CacheStatsHolder { + void incrementHits(List dimensionValues); - void decrementSizeInBytes(long amountBytes) { - this.stats.decrementSizeInBytes(amountBytes); - } + void incrementMisses(List dimensionValues); - void incrementEntries() { - this.stats.incrementEntries(); - } + void incrementEvictions(List dimensionValues); - void decrementEntries() { - this.stats.decrementEntries(); - } + void incrementSizeInBytes(List dimensionValues, long amountBytes); - long getEntries() { - return this.stats.getEntries(); - } + void decrementSizeInBytes(List dimensionValues, long amountBytes); - ImmutableCacheStats getImmutableStats() { - return this.stats.immutableSnapshot(); - } + void incrementEntries(List dimensionValues); - void decrementBySnapshot(ImmutableCacheStats snapshot) { - this.stats.subtract(snapshot); - } + void decrementEntries(List dimensionValues); - void resetSizeAndEntries() { - this.stats.resetSizeAndEntries(); - } + void reset(); - Node getChild(String dimensionValue) { - return children.get(dimensionValue); - } + long count(); - Node createChild(String dimensionValue, boolean createMapInChild) { - return children.computeIfAbsent(dimensionValue, (key) -> new Node(dimensionValue, createMapInChild)); - } + void removeDimensions(List dimensionValues); - ImmutableCacheStatsHolder.Node snapshot() { - TreeMap snapshotChildren = null; - if (!children.isEmpty()) { - snapshotChildren = new TreeMap<>(); - for (Node child : children.values()) { - snapshotChildren.put(child.getDimensionValue(), child.snapshot()); - } - } - return new ImmutableCacheStatsHolder.Node(dimensionValue, snapshotChildren, getImmutableStats()); - } - } + ImmutableCacheStatsHolder getImmutableCacheStatsHolder(); } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java new file mode 100644 index 0000000000000..ad943e0b2ed1a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolder.java @@ -0,0 +1,306 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.stats; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; + +/** + * A class ICache implementations use to internally keep track of their stats across multiple dimensions. + * Not intended to be exposed outside the cache; for this, caches use getImmutableCacheStatsHolder() to create an immutable + * copy of the current state of the stats. + * Currently, in the IRC, the stats tracked in a CacheStatsHolder will not appear for empty shards that have had no cache + * operations done on them yet. This might be changed in the future, by exposing a method to add empty nodes to the + * tree in CacheStatsHolder in the ICache interface. + * + * @opensearch.experimental + */ +public class DefaultCacheStatsHolder implements CacheStatsHolder { + + // The list of permitted dimensions. Should be ordered from "outermost" to "innermost", as you would like to + // aggregate them in an API response. + private final List dimensionNames; + // A tree structure based on dimension values, which stores stats values in its leaf nodes. + // Non-leaf nodes have stats matching the sum of their children. + // We use a tree structure, rather than a map with concatenated keys, to save on memory usage. If there are many leaf + // nodes that share a parent, that parent's dimension value will only be stored once, not many times. + private final Node statsRoot; + // To avoid sync problems, obtain a lock before creating or removing nodes in the stats tree. + // No lock is needed to edit stats on existing nodes. + private final Lock lock = new ReentrantLock(); + + public DefaultCacheStatsHolder(List dimensionNames) { + this.dimensionNames = Collections.unmodifiableList(dimensionNames); + this.statsRoot = new Node("", true); // The root node has the empty string as its dimension value + } + + public List getDimensionNames() { + return dimensionNames; + } + + // For all these increment functions, the dimensions list comes from the key, and contains all dimensions present in dimensionNames. + // The order has to match the order given in dimensionNames. + @Override + public void incrementHits(List dimensionValues) { + internalIncrement(dimensionValues, Node::incrementHits, true); + } + + @Override + public void incrementMisses(List dimensionValues) { + internalIncrement(dimensionValues, Node::incrementMisses, true); + } + + @Override + public void incrementEvictions(List dimensionValues) { + internalIncrement(dimensionValues, Node::incrementEvictions, true); + } + + @Override + public void incrementSizeInBytes(List dimensionValues, long amountBytes) { + internalIncrement(dimensionValues, (node) -> node.incrementSizeInBytes(amountBytes), true); + } + + // For decrements, we should not create nodes if they are absent. This protects us from erroneously decrementing values for keys + // which have been entirely deleted, for example in an async removal listener. + @Override + public void decrementSizeInBytes(List dimensionValues, long amountBytes) { + internalIncrement(dimensionValues, (node) -> node.decrementSizeInBytes(amountBytes), false); + } + + @Override + public void incrementEntries(List dimensionValues) { + internalIncrement(dimensionValues, Node::incrementEntries, true); + } + + @Override + public void decrementEntries(List dimensionValues) { + internalIncrement(dimensionValues, Node::decrementEntries, false); + } + + /** + * Reset number of entries and memory size when all keys leave the cache, but don't reset hit/miss/eviction numbers. + * This is in line with the behavior of the existing API when caches are cleared. + */ + @Override + public void reset() { + resetHelper(statsRoot); + } + + private void resetHelper(Node current) { + current.resetSizeAndEntries(); + for (Node child : current.children.values()) { + resetHelper(child); + } + } + + @Override + public long count() { + // Include this here so caches don't have to create an entire CacheStats object to run count(). + return statsRoot.getEntries(); + } + + private void internalIncrement(List dimensionValues, Consumer adder, boolean createNodesIfAbsent) { + assert dimensionValues.size() == dimensionNames.size(); + // First try to increment without creating nodes + boolean didIncrement = internalIncrementHelper(dimensionValues, statsRoot, 0, adder, false); + // If we failed to increment, because nodes had to be created, obtain the lock and run again while creating nodes if needed + if (!didIncrement && createNodesIfAbsent) { + try { + lock.lock(); + internalIncrementHelper(dimensionValues, statsRoot, 0, adder, true); + } finally { + lock.unlock(); + } + } + } + + /** + * Use the incrementer function to increment/decrement a value in the stats for a set of dimensions. + * If createNodesIfAbsent is true, and there is no stats for this set of dimensions, create one. + * Returns true if the increment was applied, false if not. + */ + private boolean internalIncrementHelper( + List dimensionValues, + Node node, + int depth, // Pass in the depth to avoid having to slice the list for each node. + Consumer adder, + boolean createNodesIfAbsent + ) { + if (depth == dimensionValues.size()) { + // This is the leaf node we are trying to reach + adder.accept(node); + return true; + } + + Node child = node.getChild(dimensionValues.get(depth)); + if (child == null) { + if (createNodesIfAbsent) { + boolean createMapInChild = depth < dimensionValues.size() - 1; + child = node.createChild(dimensionValues.get(depth), createMapInChild); + } else { + return false; + } + } + if (internalIncrementHelper(dimensionValues, child, depth + 1, adder, createNodesIfAbsent)) { + // Function returns true if the next node down was incremented + adder.accept(node); + return true; + } + return false; + } + + /** + * Produce an immutable version of these stats. + */ + @Override + public ImmutableCacheStatsHolder getImmutableCacheStatsHolder() { + return new ImmutableCacheStatsHolder(statsRoot.snapshot(), dimensionNames); + } + + @Override + public void removeDimensions(List dimensionValues) { + assert dimensionValues.size() == dimensionNames.size() : "Must specify a value for every dimension when removing from StatsHolder"; + // As we are removing nodes from the tree, obtain the lock + lock.lock(); + try { + removeDimensionsHelper(dimensionValues, statsRoot, 0); + } finally { + lock.unlock(); + } + } + + // Returns a CacheStatsCounterSnapshot object for the stats to decrement if the removal happened, null otherwise. + private ImmutableCacheStats removeDimensionsHelper(List dimensionValues, Node node, int depth) { + if (depth == dimensionValues.size()) { + // Pass up a snapshot of the original stats to avoid issues when the original is decremented by other fn invocations + return node.getImmutableStats(); + } + Node child = node.getChild(dimensionValues.get(depth)); + if (child == null) { + return null; + } + ImmutableCacheStats statsToDecrement = removeDimensionsHelper(dimensionValues, child, depth + 1); + if (statsToDecrement != null) { + // The removal took place, decrement values and remove this node from its parent if it's now empty + node.decrementBySnapshot(statsToDecrement); + if (child.getChildren().isEmpty()) { + node.children.remove(child.getDimensionValue()); + } + } + return statsToDecrement; + } + + // pkg-private for testing + Node getStatsRoot() { + return statsRoot; + } + + static class Node { + private final String dimensionValue; + // Map from dimensionValue to the DimensionNode for that dimension value. + final Map children; + // The stats for this node. If a leaf node, corresponds to the stats for this combination of dimensions; if not, + // contains the sum of its children's stats. + private CacheStats stats; + + // Used for leaf nodes to avoid allocating many unnecessary maps + private static final Map EMPTY_CHILDREN_MAP = new HashMap<>(); + + Node(String dimensionValue, boolean createChildrenMap) { + this.dimensionValue = dimensionValue; + if (createChildrenMap) { + this.children = new ConcurrentHashMap<>(); + } else { + this.children = EMPTY_CHILDREN_MAP; + } + this.stats = new CacheStats(); + } + + public String getDimensionValue() { + return dimensionValue; + } + + protected Map getChildren() { + // We can safely iterate over ConcurrentHashMap without worrying about thread issues. + return children; + } + + // Functions for modifying internal CacheStatsCounter without callers having to be aware of CacheStatsCounter + + void incrementHits() { + this.stats.incrementHits(); + } + + void incrementMisses() { + this.stats.incrementMisses(); + } + + void incrementEvictions() { + this.stats.incrementEvictions(); + } + + void incrementSizeInBytes(long amountBytes) { + this.stats.incrementSizeInBytes(amountBytes); + } + + void decrementSizeInBytes(long amountBytes) { + this.stats.decrementSizeInBytes(amountBytes); + } + + void incrementEntries() { + this.stats.incrementEntries(); + } + + void decrementEntries() { + this.stats.decrementEntries(); + } + + long getEntries() { + return this.stats.getEntries(); + } + + ImmutableCacheStats getImmutableStats() { + return this.stats.immutableSnapshot(); + } + + void decrementBySnapshot(ImmutableCacheStats snapshot) { + this.stats.subtract(snapshot); + } + + void resetSizeAndEntries() { + this.stats.resetSizeAndEntries(); + } + + Node getChild(String dimensionValue) { + return children.get(dimensionValue); + } + + Node createChild(String dimensionValue, boolean createMapInChild) { + return children.computeIfAbsent(dimensionValue, (key) -> new Node(dimensionValue, createMapInChild)); + } + + ImmutableCacheStatsHolder.Node snapshot() { + TreeMap snapshotChildren = null; + if (!children.isEmpty()) { + snapshotChildren = new TreeMap<>(); + for (Node child : children.values()) { + snapshotChildren.put(child.getDimensionValue(), child.snapshot()); + } + } + return new ImmutableCacheStatsHolder.Node(dimensionValue, snapshotChildren, getImmutableStats()); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/stats/NoopCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/NoopCacheStatsHolder.java new file mode 100644 index 0000000000000..b7debbd8a8eab --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/stats/NoopCacheStatsHolder.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.stats; + +import java.util.List; + +/** + * A dummy version of CacheStatsHolder, which cache implementations use when FeatureFlags.PLUGGABLE_CACHES is false. + * Returns all-zero stats when calling getImmutableCacheStatsHolder(). Always returns 0 for count(). + * A singleton instance is used for memory purposes. + */ +public class NoopCacheStatsHolder implements CacheStatsHolder { + private static final NoopCacheStatsHolder singletonInstance = new NoopCacheStatsHolder(); + private static final ImmutableCacheStatsHolder immutableCacheStatsHolder; + static { + ImmutableCacheStatsHolder.Node dummyNode = new ImmutableCacheStatsHolder.Node("", null, new ImmutableCacheStats(0, 0, 0, 0, 0)); + immutableCacheStatsHolder = new ImmutableCacheStatsHolder(dummyNode, List.of()); + } + + private NoopCacheStatsHolder() {} + + public static NoopCacheStatsHolder getInstance() { + return singletonInstance; + } + + @Override + public void incrementHits(List dimensionValues) {} + + @Override + public void incrementMisses(List dimensionValues) {} + + @Override + public void incrementEvictions(List dimensionValues) {} + + @Override + public void incrementSizeInBytes(List dimensionValues, long amountBytes) {} + + @Override + public void decrementSizeInBytes(List dimensionValues, long amountBytes) {} + + @Override + public void incrementEntries(List dimensionValues) {} + + @Override + public void decrementEntries(List dimensionValues) {} + + @Override + public void reset() {} + + @Override + public long count() { + return 0; + } + + @Override + public void removeDimensions(List dimensionValues) {} + + @Override + public ImmutableCacheStatsHolder getImmutableCacheStatsHolder() { + return immutableCacheStatsHolder; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 29e5667c9f27d..35c951e240a3a 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -19,7 +19,9 @@ import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.CacheStatsHolder; +import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.common.cache.stats.NoopCacheStatsHolder; import org.opensearch.common.cache.store.builders.ICacheBuilder; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; @@ -62,7 +64,13 @@ public OpenSearchOnHeapCache(Builder builder) { } cache = cacheBuilder.build(); this.dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - this.cacheStatsHolder = new CacheStatsHolder(dimensionNames); + // Use noop stats when pluggable caching is off + boolean useNoopStats = !FeatureFlags.PLUGGABLE_CACHE_SETTING.get(builder.getSettings()); + if (useNoopStats) { + this.cacheStatsHolder = NoopCacheStatsHolder.getInstance(); + } else { + this.cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); + } this.removalListener = builder.getRemovalListener(); this.weigher = builder.getWeigher(); } @@ -121,7 +129,7 @@ public Iterable> keys() { @Override public long count() { - return cacheStatsHolder.count(); + return cache.count(); } @Override @@ -164,6 +172,7 @@ public ICache create(CacheConfig config, CacheType cacheType, Map> settingList = OpenSearchOnHeapCacheSettings.getSettingListForCacheType(cacheType); Settings settings = config.getSettings(); ICacheBuilder builder = new Builder().setDimensionNames(config.getDimensionNames()) + .setSettings(config.getSettings()) .setMaximumWeightInBytes(((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes()) .setExpireAfterAccess(((TimeValue) settingList.get(EXPIRE_AFTER_ACCESS_KEY).get(settings))) .setWeigher(config.getWeigher()) diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 105239b2c351b..f9a9c1830e1ad 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -825,13 +825,6 @@ long count() { return cache.count(); } - /** - * Returns the current size in bytes of the cache - */ - long getSizeInBytes() { - return cache.stats().getTotalSizeInBytes(); - } - /** * Returns the current cache stats. Pkg-private for testing. */ diff --git a/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolderTests.java similarity index 85% rename from server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java rename to server/src/test/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolderTests.java index 390cd4d601a4b..fe12673bb9f6a 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/DefaultCacheStatsHolderTests.java @@ -21,18 +21,23 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; -public class CacheStatsHolderTests extends OpenSearchTestCase { +public class DefaultCacheStatsHolderTests extends OpenSearchTestCase { public void testAddAndGet() throws Exception { List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); - Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 10); - Map, CacheStats> expected = CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 1000, 10); + DefaultCacheStatsHolder cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); + Map> usedDimensionValues = DefaultCacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 10); + Map, CacheStats> expected = DefaultCacheStatsHolderTests.populateStats( + cacheStatsHolder, + usedDimensionValues, + 1000, + 10 + ); // test the value in the map is as expected for each distinct combination of values for (List dimensionValues : expected.keySet()) { CacheStats expectedCounter = expected.get(dimensionValues); - ImmutableCacheStats actualStatsHolder = CacheStatsHolderTests.getNode(dimensionValues, cacheStatsHolder.getStatsRoot()) + ImmutableCacheStats actualStatsHolder = DefaultCacheStatsHolderTests.getNode(dimensionValues, cacheStatsHolder.getStatsRoot()) .getImmutableStats(); ImmutableCacheStats actualCacheStats = getNode(dimensionValues, cacheStatsHolder.getStatsRoot()).getImmutableStats(); @@ -53,7 +58,7 @@ public void testAddAndGet() throws Exception { public void testReset() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + DefaultCacheStatsHolder cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); Map> usedDimensionValues = getUsedDimensionValues(cacheStatsHolder, 10); Map, CacheStats> expected = populateStats(cacheStatsHolder, usedDimensionValues, 100, 10); @@ -64,7 +69,7 @@ public void testReset() throws Exception { originalCounter.sizeInBytes = new CounterMetric(); originalCounter.entries = new CounterMetric(); - CacheStatsHolder.Node node = getNode(dimensionValues, cacheStatsHolder.getStatsRoot()); + DefaultCacheStatsHolder.Node node = getNode(dimensionValues, cacheStatsHolder.getStatsRoot()); ImmutableCacheStats actual = node.getImmutableStats(); assertEquals(originalCounter.immutableSnapshot(), actual); } @@ -72,7 +77,7 @@ public void testReset() throws Exception { public void testDropStatsForDimensions() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + DefaultCacheStatsHolder cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); // Create stats for the following dimension sets List> populatedStats = List.of(List.of("A1", "B1"), List.of("A2", "B2"), List.of("A2", "B3")); @@ -108,7 +113,7 @@ public void testDropStatsForDimensions() throws Exception { public void testCount() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + DefaultCacheStatsHolder cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); Map> usedDimensionValues = getUsedDimensionValues(cacheStatsHolder, 10); Map, CacheStats> expected = populateStats(cacheStatsHolder, usedDimensionValues, 100, 10); @@ -121,7 +126,7 @@ public void testCount() throws Exception { public void testConcurrentRemoval() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + DefaultCacheStatsHolder cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); // Create stats for the following dimension sets List> populatedStats = List.of(List.of("A1", "B1"), List.of("A2", "B2"), List.of("A2", "B3")); @@ -169,8 +174,8 @@ public void testConcurrentRemoval() throws Exception { * Returns the node found by following these dimension values down from the root node. * Returns null if no such node exists. */ - static CacheStatsHolder.Node getNode(List dimensionValues, CacheStatsHolder.Node root) { - CacheStatsHolder.Node current = root; + static DefaultCacheStatsHolder.Node getNode(List dimensionValues, DefaultCacheStatsHolder.Node root) { + DefaultCacheStatsHolder.Node current = root; for (String dimensionValue : dimensionValues) { current = current.getChildren().get(dimensionValue); if (current == null) { @@ -181,7 +186,7 @@ static CacheStatsHolder.Node getNode(List dimensionValues, CacheStatsHol } static Map, CacheStats> populateStats( - CacheStatsHolder cacheStatsHolder, + DefaultCacheStatsHolder cacheStatsHolder, Map> usedDimensionValues, int numDistinctValuePairs, int numRepetitionsPerValue @@ -211,7 +216,7 @@ static Map, CacheStats> populateStats( expected.get(dimensions).evictions.inc(statsToInc.getEvictions()); expected.get(dimensions).sizeInBytes.inc(statsToInc.getSizeInBytes()); expected.get(dimensions).entries.inc(statsToInc.getEntries()); - CacheStatsHolderTests.populateStatsHolderFromStatsValueMap(cacheStatsHolder, Map.of(dimensions, statsToInc)); + DefaultCacheStatsHolderTests.populateStatsHolderFromStatsValueMap(cacheStatsHolder, Map.of(dimensions, statsToInc)); } countDownLatch.countDown(); }); @@ -240,7 +245,7 @@ private static List getRandomDimList( return result; } - static Map> getUsedDimensionValues(CacheStatsHolder cacheStatsHolder, int numValuesPerDim) { + static Map> getUsedDimensionValues(DefaultCacheStatsHolder cacheStatsHolder, int numValuesPerDim) { Map> usedDimensionValues = new HashMap<>(); for (int i = 0; i < cacheStatsHolder.getDimensionNames().size(); i++) { List values = new ArrayList<>(); @@ -252,20 +257,20 @@ static Map> getUsedDimensionValues(CacheStatsHolder cacheSt return usedDimensionValues; } - private void assertSumOfChildrenStats(CacheStatsHolder.Node current) { + private void assertSumOfChildrenStats(DefaultCacheStatsHolder.Node current) { if (!current.children.isEmpty()) { CacheStats expectedTotal = new CacheStats(); - for (CacheStatsHolder.Node child : current.children.values()) { + for (DefaultCacheStatsHolder.Node child : current.children.values()) { expectedTotal.add(child.getImmutableStats()); } assertEquals(expectedTotal.immutableSnapshot(), current.getImmutableStats()); - for (CacheStatsHolder.Node child : current.children.values()) { + for (DefaultCacheStatsHolder.Node child : current.children.values()) { assertSumOfChildrenStats(child); } } } - static void populateStatsHolderFromStatsValueMap(CacheStatsHolder cacheStatsHolder, Map, CacheStats> statsMap) { + static void populateStatsHolderFromStatsValueMap(DefaultCacheStatsHolder cacheStatsHolder, Map, CacheStats> statsMap) { for (Map.Entry, CacheStats> entry : statsMap.entrySet()) { CacheStats stats = entry.getValue(); List dims = entry.getKey(); diff --git a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java index 933b8abd6e392..5a4511fa654dd 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java @@ -17,17 +17,24 @@ public class ImmutableCacheStatsHolderTests extends OpenSearchTestCase { public void testGet() throws Exception { List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); - Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 10); - Map, CacheStats> expected = CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 1000, 10); + DefaultCacheStatsHolder cacheStatsHolder = new DefaultCacheStatsHolder(dimensionNames); + Map> usedDimensionValues = DefaultCacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 10); + Map, CacheStats> expected = DefaultCacheStatsHolderTests.populateStats( + cacheStatsHolder, + usedDimensionValues, + 1000, + 10 + ); ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(); // test the value in the map is as expected for each distinct combination of values for (List dimensionValues : expected.keySet()) { CacheStats expectedCounter = expected.get(dimensionValues); - ImmutableCacheStats actualCacheStatsHolder = CacheStatsHolderTests.getNode(dimensionValues, cacheStatsHolder.getStatsRoot()) - .getImmutableStats(); + ImmutableCacheStats actualCacheStatsHolder = DefaultCacheStatsHolderTests.getNode( + dimensionValues, + cacheStatsHolder.getStatsRoot() + ).getImmutableStats(); ImmutableCacheStats actualImmutableCacheStatsHolder = getNode(dimensionValues, stats.getStatsRoot()).getStats(); assertEquals(expectedCounter.immutableSnapshot(), actualCacheStatsHolder); @@ -52,9 +59,9 @@ public void testGet() throws Exception { public void testEmptyDimsList() throws Exception { // If the dimension list is empty, the tree should have only the root node containing the total stats. - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(List.of()); - Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 100); - CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 10, 100); + DefaultCacheStatsHolder cacheStatsHolder = new DefaultCacheStatsHolder(List.of()); + Map> usedDimensionValues = DefaultCacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 100); + DefaultCacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 10, 100); ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(); ImmutableCacheStatsHolder.Node statsRoot = stats.getStatsRoot(); diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 008dc7c2e0902..00dbf43bc37be 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -16,10 +16,12 @@ import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.stats.ImmutableCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -37,7 +39,9 @@ public void testStats() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = between(10, 50); int numEvicted = between(10, 20); - OpenSearchOnHeapCache cache = getCache(maxKeys, listener); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true); + + // When the pluggable caches setting is on, we should get stats as expected from cache.stats(). List> keysAdded = new ArrayList<>(); int numAdded = maxKeys + numEvicted; @@ -77,7 +81,34 @@ public void testStats() throws Exception { } } - private OpenSearchOnHeapCache getCache(int maxSizeKeys, MockRemovalListener listener) { + public void testStatsWithoutPluggableCaches() throws Exception { + // When the pluggable caches setting is off, we should get all-zero stats from cache.stats(), but count() should still work. + MockRemovalListener listener = new MockRemovalListener<>(); + int maxKeys = between(10, 50); + int numEvicted = between(10, 20); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, false); + + List> keysAdded = new ArrayList<>(); + int numAdded = maxKeys + numEvicted; + for (int i = 0; i < numAdded; i++) { + ICacheKey key = getICacheKey(UUID.randomUUID().toString()); + keysAdded.add(key); + cache.computeIfAbsent(key, getLoadAwareCacheLoader()); + + assertEquals(Math.min(maxKeys, i + 1), cache.count()); + assertZeroStats(cache.stats()); + } + } + + private void assertZeroStats(ImmutableCacheStatsHolder stats) { + assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), stats.getTotalStats()); + } + + private OpenSearchOnHeapCache getCache( + int maxSizeKeys, + MockRemovalListener listener, + boolean pluggableCachesSetting + ) { ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); Settings settings = Settings.builder() .put( @@ -86,6 +117,7 @@ private OpenSearchOnHeapCache getCache(int maxSizeKeys, MockRemo .getKey(), maxSizeKeys * keyValueSize + "b" ) + .put(FeatureFlags.PLUGGABLE_CACHE, pluggableCachesSetting) .build(); CacheConfig cacheConfig = new CacheConfig.Builder().setKeyType(String.class) @@ -102,7 +134,7 @@ private OpenSearchOnHeapCache getCache(int maxSizeKeys, MockRemo public void testInvalidateWithDropDimensions() throws Exception { MockRemovalListener listener = new MockRemovalListener<>(); int maxKeys = 50; - OpenSearchOnHeapCache cache = getCache(maxKeys, listener); + OpenSearchOnHeapCache cache = getCache(maxKeys, listener, true); List> keysAdded = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index 71eb30fca2385..fc306f7c595d6 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -51,6 +51,7 @@ import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.module.CacheModule; import org.opensearch.common.cache.stats.ImmutableCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; @@ -812,8 +813,12 @@ public void testClosingIndexWipesStats() throws Exception { assertNotNull(indexToKeep.getShard(i)); assertNotNull(indexToClose.getShard(i)); } + threadPool = getThreadPool(); - Settings settings = Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.001%").build(); + Settings settings = Settings.builder() + .put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), "0.001%") + .put(FeatureFlags.PLUGGABLE_CACHE, true) + .build(); cache = new IndicesRequestCache(settings, (shardId -> { IndexService indexService = null; try { @@ -868,6 +873,7 @@ public void testClosingIndexWipesStats() throws Exception { ShardId shardId = indexService.getShard(i).shardId(); List dimensionValues = List.of(shardId.getIndexName(), shardId.toString()); initialDimensionValues.add(dimensionValues); + ImmutableCacheStatsHolder holder = cache.stats(); ImmutableCacheStats snapshot = cache.stats().getStatsForDimensionValues(dimensionValues); assertNotNull(snapshot); // check the values are not empty by confirming entries != 0, this should always be true since the missed value is loaded @@ -954,13 +960,14 @@ public void testEviction() throws Exception { assertEquals("foo", value1.streamInput().readString()); BytesReference value2 = cache.getOrCompute(getEntity(indexShard), getLoader(secondReader), secondReader, getTermBytes()); assertEquals("bar", value2.streamInput().readString()); - size = new ByteSizeValue(cache.getSizeInBytes()); + size = indexShard.requestCache().stats().getMemorySize(); // Value from old API IOUtils.close(reader, secondReader, writer, dir, cache); } indexShard = createIndex("test1").getShard(0); IndicesRequestCache cache = new IndicesRequestCache( - // Add 5 instead of 1; the key size now depends on the length of dimension names and values so there's more variation - Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes() + 5 + "b").build(), + // TODO: Add wiggle room to max size to allow for overhead of ICacheKey. This can be removed once API PR goes in, as it updates + // the old API to account for the ICacheKey overhead. + Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), (int) (size.getBytes() * 1.2) + "b").build(), (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(), threadPool, From fc81a906da88d99c7ea09ffd9fec92ff81925cf6 Mon Sep 17 00:00:00 2001 From: Stephen Crawford <65832608+scrawfor99@users.noreply.github.com> Date: Sun, 28 Apr 2024 19:44:34 -0400 Subject: [PATCH 5/5] Restore deleted section (#13415) Signed-off-by: Stephen Crawford --- CONTRIBUTING.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bce6ca0d49294..0ec0abe535dd0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -9,6 +9,7 @@ - [Changelog](#changelog) - [Review Process](#review-process) - [Tips for Success](#tips) +- [Troubleshooting Failing Builds](#troubleshooting-failing-builds) # Contributing to OpenSearch @@ -180,3 +181,13 @@ We have a lot of mechanisms to help expedite towards an accepted PR. Here are so In general, adding more guardrails to your changes increases the likelihood of swift PR acceptance. We can always relax these guard rails in smaller followup PRs. Reverting a GA feature is much more difficult. Check out the [DEVELOPER_GUIDE](./DEVELOPER_GUIDE.md#submitting-changes) for more useful tips. +## Troubleshooting Failing Builds + +The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. + +If your pull request reports a failing test(s) on one of the checks, please: +- look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question +- if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time +- if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label +- add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) +- as a bonus point, try to contribute by fixing the flaky test(s)