diff --git a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt index 81bd3e2ea6f..35e7250c41f 100644 --- a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt +++ b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt @@ -1,13 +1,9 @@ package com.apollographql.apollo3.cache.normalized.api -import com.apollographql.apollo3.cache.normalized.api.internal.ConcurrentMap import com.apollographql.apollo3.cache.normalized.api.internal.Lock import com.apollographql.apollo3.cache.normalized.api.internal.LruCache -import com.apollographql.apollo3.cache.normalized.api.internal.OptimisticNormalizedCache import com.apollographql.apollo3.cache.normalized.api.internal.patternToRegex -import com.benasher44.uuid.Uuid import kotlin.jvm.JvmOverloads -import kotlin.math.max import kotlin.reflect.KClass /** @@ -23,12 +19,10 @@ class MemoryCache( private val nextCache: NormalizedCache? = null, private val maxSizeBytes: Int = Int.MAX_VALUE, private val expireAfterMillis: Long = -1, -) : OptimisticNormalizedCache { +) : NormalizedCache { // A lock is only needed if there is a nextCache private val lock = nextCache?.let { Lock() } - private val recordJournals = ConcurrentMap() - private fun lockWrite(block: () -> T): T { return lock?.write { block() } ?: block() } @@ -54,7 +48,7 @@ class MemoryCache( record ?: nextCache?.loadRecord(key, cacheHeaders)?.also { nextCachedRecord -> lruCache[key] = nextCachedRecord } - }.mergeJournalRecord(key) + } override fun loadRecords(keys: Collection, cacheHeaders: CacheHeaders): Collection { return keys.mapNotNull { key -> loadRecord(key, cacheHeaders) } @@ -65,7 +59,6 @@ class MemoryCache( lruCache.clear() nextCache?.clearAll() } - recordJournals.clear() } override fun remove(cacheKey: CacheKey, cascade: Boolean): Boolean { @@ -79,24 +72,10 @@ class MemoryCache( } val chainRemoved = nextCache?.remove(cacheKey, cascade) ?: false - val journalRemoved = removeFromJournal(cacheKey, cascade) - record != null || chainRemoved || journalRemoved + record != null || chainRemoved } } - private fun removeFromJournal(cacheKey: CacheKey, cascade: Boolean): Boolean { - val recordJournal = recordJournals[cacheKey.key] - if (recordJournal != null) { - recordJournals.remove(cacheKey.key) - if (cascade) { - for (cacheReference in recordJournal.current.referencedFields()) { - removeFromJournal(CacheKey(cacheReference.key), true) - } - } - } - return recordJournal != null - } - override fun remove(pattern: String): Int { val regex = patternToRegex(pattern) return lockWrite { @@ -141,121 +120,13 @@ class MemoryCache( override fun dump(): Map, Map> { return lockRead { - mapOf(OptimisticNormalizedCache::class to recordJournals.mapValues { (_, journal) -> journal.current }) + - mapOf(this::class to lruCache.asMap().mapValues { (_, record) -> record }) + + mapOf(this::class to lruCache.asMap().mapValues { (_, record) -> record }) + nextCache?.dump().orEmpty() } } internal fun clearCurrentCache() { lruCache.clear() - recordJournals.clear() - } - - override fun addOptimisticUpdates(recordSet: Collection): Set { - return recordSet.flatMap { - addOptimisticUpdate(it) - }.toSet() - } - - override fun addOptimisticUpdate(record: Record): Set { - val journal = recordJournals[record.key] - return if (journal == null) { - recordJournals[record.key] = RecordJournal(record) - record.fieldKeys() - } else { - journal.addPatch(record) - } - } - - override fun removeOptimisticUpdates(mutationId: Uuid): Set { - val changedCacheKeys = mutableSetOf() - val keys = HashSet(recordJournals.keys) // local copy to avoid concurrent modification - keys.forEach { - val recordJournal = recordJournals[it] ?: return@forEach - val result = recordJournal.removePatch(mutationId) - changedCacheKeys.addAll(result.changedKeys) - if (result.isEmpty) { - recordJournals.remove(it) - } - } - return changedCacheKeys - } - - private fun Record?.mergeJournalRecord(key: String): Record? { - val journal = recordJournals[key] - return if (journal != null) { - this?.mergeWith(journal.current)?.first ?: journal.current - } else { - this - } - } - - private class RemovalResult( - val changedKeys: Set, - val isEmpty: Boolean, - ) - - private class RecordJournal(record: Record) { - /** - * The latest value of the record made by applying all the patches. - */ - var current: Record = record - - /** - * A list of chronological patches applied to the record. - */ - private val patches = mutableListOf(record) - - /** - * Adds a new patch on top of all the previous ones. - */ - fun addPatch(record: Record): Set { - val (mergedRecord, changedKeys) = current.mergeWith(record) - current = mergedRecord - patches.add(record) - return changedKeys - } - - /** - * Lookup record by mutation id, if it's found removes it from the history and - * computes the new current record. - */ - fun removePatch(mutationId: Uuid): RemovalResult { - val recordIndex = patches.indexOfFirst { mutationId == it.mutationId } - if (recordIndex == -1) { - // The mutation did not impact this Record - return RemovalResult(emptySet(), false) - } - - if (patches.size == 1) { - // The mutation impacted this Record and it was the only one in the history - return RemovalResult(current.fieldKeys(), true) - } - - /** - * There are multiple patches, go over them and compute the new current value - * Remember the oldRecord so that we can compute the changed keys - */ - val oldRecord = current - - patches.removeAt(recordIndex).key - - var cur: Record? = null - val start = max(0, recordIndex - 1) - for (i in start until patches.size) { - val record = patches[i] - if (cur == null) { - cur = record - } else { - val (mergedRecord, _) = cur.mergeWith(record) - cur = mergedRecord - } - } - current = cur!! - - return RemovalResult(Record.changedKeys(oldRecord, current), false) - } } } diff --git a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticNormalizedCache.kt b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticNormalizedCache.kt index 3957d6caa5f..c683f2d3f77 100644 --- a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticNormalizedCache.kt +++ b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticNormalizedCache.kt @@ -1,13 +1,185 @@ package com.apollographql.apollo3.cache.normalized.api.internal +import com.apollographql.apollo3.annotations.ApolloInternal +import com.apollographql.apollo3.cache.normalized.api.CacheHeaders +import com.apollographql.apollo3.cache.normalized.api.CacheKey import com.apollographql.apollo3.cache.normalized.api.NormalizedCache import com.apollographql.apollo3.cache.normalized.api.Record +import com.apollographql.apollo3.cache.normalized.api.Record.Companion.changedKeys +import com.apollographql.apollo3.cache.normalized.api.RecordMerger import com.benasher44.uuid.Uuid +import kotlin.math.max +import kotlin.reflect.KClass -interface OptimisticNormalizedCache : NormalizedCache { - fun addOptimisticUpdates(recordSet: Collection): Set +@ApolloInternal +class OptimisticNormalizedCache(private val wrapped: NormalizedCache) : NormalizedCache { + private val recordJournals = ConcurrentMap() - fun addOptimisticUpdate(record: Record): Set + override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? { + val nonOptimisticRecord = wrapped.loadRecord(key, cacheHeaders) + return nonOptimisticRecord.mergeJournalRecord(key) + } - fun removeOptimisticUpdates(mutationId: Uuid): Set + override fun loadRecords(keys: Collection, cacheHeaders: CacheHeaders): Collection { + val nonOptimisticRecords = wrapped.loadRecords(keys, cacheHeaders).associateBy { it.key } + return keys.mapNotNull { key -> + nonOptimisticRecords[key].mergeJournalRecord(key) + } + } + + override fun merge(record: Record, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set { + return wrapped.merge(record, cacheHeaders, recordMerger) + } + + override fun merge(records: Collection, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set { + return wrapped.merge(records, cacheHeaders, recordMerger) + } + + override fun clearAll() { + wrapped.clearAll() + recordJournals.clear() + } + + override fun remove(cacheKey: CacheKey, cascade: Boolean): Boolean { + var removed = wrapped.remove(cacheKey, cascade) + + val recordJournal = recordJournals[cacheKey.key] + if (recordJournal != null) { + recordJournals.remove(cacheKey.key) + removed = true + if (cascade) { + for (cacheReference in recordJournal.current.referencedFields()) { + remove(CacheKey(cacheReference.key), true) + } + } + } + return removed + } + + override fun remove(pattern: String): Int { + var removed = wrapped.remove(pattern) + + val regex = patternToRegex(pattern) + val keys = HashSet(recordJournals.keys) // local copy to avoid concurrent modification + keys.forEach { key -> + if (regex.matches(key)) { + recordJournals.remove(key) + removed++ + } + } + + return removed + } + + fun addOptimisticUpdates(recordSet: Collection): Set { + return recordSet.flatMap { + addOptimisticUpdate(it) + }.toSet() + } + + fun addOptimisticUpdate(record: Record): Set { + val journal = recordJournals[record.key] + return if (journal == null) { + recordJournals[record.key] = RecordJournal(record) + record.fieldKeys() + } else { + journal.addPatch(record) + } + } + + fun removeOptimisticUpdates(mutationId: Uuid): Set { + val changedCacheKeys = mutableSetOf() + val keys = HashSet(recordJournals.keys) // local copy to avoid concurrent modification + keys.forEach { + val recordJournal = recordJournals[it] ?: return@forEach + val result = recordJournal.removePatch(mutationId) + changedCacheKeys.addAll(result.changedKeys) + if (result.isEmpty) { + recordJournals.remove(it) + } + } + return changedCacheKeys + } + + override fun dump(): Map, Map> { + return mapOf(this::class to recordJournals.mapValues { (_, journal) -> journal.current }) + wrapped.dump() + } + + private fun Record?.mergeJournalRecord(key: String): Record? { + val journal = recordJournals[key] + return if (journal != null) { + this?.mergeWith(journal.current)?.first ?: journal.current + } else { + this + } + } + + private class RemovalResult( + val changedKeys: Set, + val isEmpty: Boolean, + ) + + private class RecordJournal(record: Record) { + /** + * The latest value of the record made by applying all the patches. + */ + var current: Record = record + + /** + * A list of chronological patches applied to the record. + */ + private val patches = mutableListOf(record) + + /** + * Adds a new patch on top of all the previous ones. + */ + fun addPatch(record: Record): Set { + val (mergedRecord, changedKeys) = current.mergeWith(record) + current = mergedRecord + patches.add(record) + return changedKeys + } + + /** + * Lookup record by mutation id, if it's found removes it from the history and + * computes the new current record. + * + * @return the changed keys or null if + */ + fun removePatch(mutationId: Uuid): RemovalResult { + val recordIndex = patches.indexOfFirst { mutationId == it.mutationId } + if (recordIndex == -1) { + // The mutation did not impact this Record + return RemovalResult(emptySet(), false) + } + + if (patches.size == 1) { + // The mutation impacted this Record and it was the only one in the history + return RemovalResult(current.fieldKeys(), true) + } + + /** + * There are multiple patches, go over them and compute the new current value + * Remember the oldRecord so that we can compute the changed keys + */ + val oldRecord = current + + patches.removeAt(recordIndex).key + + var cur: Record? = null + val start = max(0, recordIndex - 1) + for (i in start until patches.size) { + val record = patches[i] + if (cur == null) { + cur = record + } else { + val (mergedRecord, _) = cur.mergeWith(record) + cur = mergedRecord + } + } + current = cur!! + + return RemovalResult(changedKeys(oldRecord, current), false) + } + } } diff --git a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticNormalizedCacheWrapper.kt b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticNormalizedCacheWrapper.kt deleted file mode 100644 index f53b9fbb929..00000000000 --- a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticNormalizedCacheWrapper.kt +++ /dev/null @@ -1,209 +0,0 @@ -package com.apollographql.apollo3.cache.normalized.api.internal - -import com.apollographql.apollo3.annotations.ApolloInternal -import com.apollographql.apollo3.cache.normalized.api.CacheHeaders -import com.apollographql.apollo3.cache.normalized.api.CacheKey -import com.apollographql.apollo3.cache.normalized.api.NormalizedCache -import com.apollographql.apollo3.cache.normalized.api.Record -import com.apollographql.apollo3.cache.normalized.api.Record.Companion.changedKeys -import com.apollographql.apollo3.cache.normalized.api.RecordMerger -import com.benasher44.uuid.Uuid -import kotlin.math.max -import kotlin.reflect.KClass - -@ApolloInternal -class OptimisticNormalizedCacheWrapper(private val wrapped: NormalizedCache) : OptimisticNormalizedCache { - private val lock = Lock() - private val recordJournals = mutableMapOf() - - override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? { - return lock.read { - try { - val nonOptimisticRecord = wrapped.loadRecord(key, cacheHeaders) - nonOptimisticRecord.mergeJournalRecord(key) - } catch (ignore: Exception) { - null - } - } - } - - override fun loadRecords(keys: Collection, cacheHeaders: CacheHeaders): Collection { - return lock.read { - val nonOptimisticRecords = wrapped.loadRecords(keys, cacheHeaders).associateBy { it.key } - keys.mapNotNull { key -> - nonOptimisticRecords[key].mergeJournalRecord(key) - } - } - } - - override fun merge(record: Record, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set { - return lock.write { wrapped.merge(record, cacheHeaders, recordMerger) } - } - - override fun merge(records: Collection, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set { - return lock.write { wrapped.merge(records, cacheHeaders, recordMerger) } - } - - override fun clearAll() { - lock.write { - recordJournals.clear() - wrapped.clearAll() - } - } - - override fun remove(cacheKey: CacheKey, cascade: Boolean): Boolean { - return lock.write { - var result: Boolean = wrapped.remove(cacheKey, cascade) - - val recordJournal = recordJournals[cacheKey.key] - if (recordJournal != null) { - recordJournals.remove(cacheKey.key) - result = true - if (cascade) { - for (cacheReference in recordJournal.current.referencedFields()) { - remove(CacheKey(cacheReference.key), true) - } - } - } - result - } - } - - override fun remove(pattern: String): Int { - val regex = patternToRegex(pattern) - var total = 0 - return lock.write { - val iterator = recordJournals.iterator() - while (iterator.hasNext()) { - val entry = iterator.next() - if (regex.matches(entry.key)) { - iterator.remove() - total++ - } - } - - val chainRemoved = wrapped.remove(pattern) - total + chainRemoved - } - } - - override fun addOptimisticUpdates(recordSet: Collection): Set { - return lock.write { - recordSet.flatMap { - addOptimisticUpdate(it) - } - }.toSet() - } - - override fun addOptimisticUpdate(record: Record): Set { - return lock.write { - val journal = recordJournals[record.key] - if (journal == null) { - recordJournals[record.key] = RecordJournal(record) - record.fieldKeys() - } else { - journal.addPatch(record) - } - } - } - - override fun removeOptimisticUpdates(mutationId: Uuid): Set { - return lock.write { - val changedCacheKeys = mutableSetOf() - val iterator = recordJournals.iterator() - while (iterator.hasNext()) { - val entry = iterator.next() - val result = entry.value.removePatch(mutationId) - changedCacheKeys.addAll(result.changedKeys) - if (result.isEmpty) { - iterator.remove() - } - } - changedCacheKeys - } - } - - override fun dump(): Map, Map> { - return lock.read { - mapOf(this::class to recordJournals.mapValues { (_, journal) -> journal.current }) + wrapped.dump() - } - } - - private fun Record?.mergeJournalRecord(key: String): Record? { - val journal = recordJournals[key] - return if (journal != null) { - this?.mergeWith(journal.current)?.first ?: journal.current - } else { - this - } - } - - private class RemovalResult( - val changedKeys: Set, - val isEmpty: Boolean, - ) - - private class RecordJournal(record: Record) { - /** - * The latest value of the record made by applying all the patches. - */ - var current: Record = record - - /** - * A list of chronological patches applied to the record. - */ - private val patches = mutableListOf(record) - - /** - * Adds a new patch on top of all the previous ones. - */ - fun addPatch(record: Record): Set { - val (mergedRecord, changedKeys) = current.mergeWith(record) - current = mergedRecord - patches.add(record) - return changedKeys - } - - /** - * Lookup record by mutation id, if it's found removes it from the history and - * computes the new current record. - * - * @return the changed keys or null if - */ - fun removePatch(mutationId: Uuid): RemovalResult { - val recordIndex = patches.indexOfFirst { mutationId == it.mutationId } - if (recordIndex == -1) { - // The mutation did not impact this Record - return RemovalResult(emptySet(), false) - } - - if (patches.size == 1) { - // The mutation impacted this Record and it was the only one in the history - return RemovalResult(current.fieldKeys(), true) - } - - /** - * There are multiple patches, go over them and compute the new current value - * Remember the oldRecord so that we can compute the changed keys - */ - val oldRecord = current - - patches.removeAt(recordIndex).key - - var cur: Record? = null - val start = max(0, recordIndex - 1) - for (i in start until patches.size) { - val record = patches[i] - if (cur == null) { - cur = record - } else { - val (mergedRecord, _) = cur.mergeWith(record) - cur = mergedRecord - } - } - current = cur!! - - return RemovalResult(changedKeys(oldRecord, current), false) - } - } -} diff --git a/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt b/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt index 8f85800f5e9..0e137a3db1c 100644 --- a/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt +++ b/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt @@ -19,7 +19,6 @@ import com.apollographql.apollo3.cache.normalized.api.ReadOnlyNormalizedCache import com.apollographql.apollo3.cache.normalized.api.Record import com.apollographql.apollo3.cache.normalized.api.RecordMerger import com.apollographql.apollo3.cache.normalized.api.internal.OptimisticNormalizedCache -import com.apollographql.apollo3.cache.normalized.api.internal.OptimisticNormalizedCacheWrapper import com.apollographql.apollo3.cache.normalized.api.normalize import com.apollographql.apollo3.cache.normalized.api.readDataFromCacheInternal import com.apollographql.apollo3.cache.normalized.api.toData @@ -49,11 +48,7 @@ internal class DefaultApolloStore( // Keeping this as lazy to avoid accessing the disk at initialization which usually happens on the main thread private val cache: OptimisticNormalizedCache by lazy { - val normalizedCache = normalizedCacheFactory.create() - if (normalizedCache is OptimisticNormalizedCache) { - normalizedCache - } else - OptimisticNormalizedCacheWrapper(normalizedCache) + OptimisticNormalizedCache(normalizedCacheFactory.create()) } override fun publish(keys: Set) { diff --git a/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt b/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt index 50592b445f3..cecb9acadb2 100644 --- a/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt +++ b/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt @@ -11,7 +11,6 @@ import com.apollographql.apollo3.cache.normalized.api.NormalizedCacheFactory import com.apollographql.apollo3.cache.normalized.api.Record import com.apollographql.apollo3.cache.normalized.api.TypePolicyCacheKeyGenerator import com.apollographql.apollo3.cache.normalized.api.internal.OptimisticNormalizedCache -import com.apollographql.apollo3.cache.normalized.api.internal.OptimisticNormalizedCacheWrapper import com.apollographql.apollo3.cache.normalized.sql.SqlNormalizedCacheFactory import com.apollographql.apollo3.testing.internal.runTest import pagination.offsetBasedWithPage.UsersQuery @@ -235,7 +234,7 @@ class OffsetBasedWithPagePaginationTest { internal fun assertChainedCachesAreEqual(apolloStore: ApolloStore) { val dump = apolloStore.dump().filterKeys { // Ignore optimistic cache for comparison - it != OptimisticNormalizedCache::class && it != OptimisticNormalizedCacheWrapper::class + it != OptimisticNormalizedCache::class } if (dump.size < 2) return val caches = dump.values.toList()