From 7e4e844b22bfdaebf0bbca377109de91e26c19a3 Mon Sep 17 00:00:00 2001 From: "nastassia.dailidava" Date: Tue, 1 Oct 2024 12:45:57 +0200 Subject: [PATCH] allegro-internal/flex-roadmap#819 Migrated metrics to prometheus --- build.gradle | 3 +- envoy-control-core/build.gradle | 2 + .../servicemesh/envoycontrol/ControlPlane.kt | 1 - .../envoycontrol/groups/GroupChangeWatcher.kt | 12 ++- .../server/CachedProtoResourcesSerializer.kt | 3 +- .../MetricsDiscoveryServerCallbacks.kt | 26 ++++--- .../envoycontrol/snapshot/SnapshotUpdater.kt | 74 +++++++++++-------- .../synchronization/GlobalStateChanges.kt | 24 ++++-- .../RemoteClusterStateChanges.kt | 6 +- .../synchronization/RemoteServices.kt | 32 ++++++-- .../servicemesh/envoycontrol/utils/Metrics.kt | 26 +++++++ .../envoycontrol/utils/ReactorUtils.kt | 42 +++++++++-- .../metrics/ThreadPoolMetricTest.kt | 5 +- .../snapshot/SnapshotUpdaterTest.kt | 8 +- .../envoycontrol/utils/ReactorUtilsTest.kt | 41 +++++++--- .../infrastructure/ControlPlaneConfig.kt | 32 ++++++-- envoy-control-services/build.gradle | 1 + .../consul/services/ConsulServiceChanges.kt | 16 +++- .../MetricsDiscoveryServerCallbacksTest.kt | 19 +++-- 19 files changed, 272 insertions(+), 101 deletions(-) diff --git a/build.gradle b/build.gradle index dd88f1c24..f93fbde2e 100644 --- a/build.gradle +++ b/build.gradle @@ -55,7 +55,8 @@ allprojects { bytebuddy : '1.15.1', re2j : '1.3', xxhash : '0.10.1', - dropwizard : '4.2.26' + dropwizard : '4.2.26', + reactor_core_micrometer: '1.0.6' ] dependencyManagement { diff --git a/envoy-control-core/build.gradle b/envoy-control-core/build.gradle index 6d4295acf..a74d89945 100644 --- a/envoy-control-core/build.gradle +++ b/envoy-control-core/build.gradle @@ -7,6 +7,8 @@ dependencies { implementation group: 'org.jetbrains.kotlin', name: 'kotlin-reflect' api group: 'io.dropwizard.metrics', name: 'metrics-core', version: versions.dropwizard api group: 'io.micrometer', name: 'micrometer-core' + api group: 'io.projectreactor', name: 'reactor-core-micrometer', version: versions.reactor_core_micrometer + implementation group: 'com.google.re2j', name: 're2j', version: versions.re2j api group: 'io.envoyproxy.controlplane', name: 'server', version: versions.java_controlplane diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/ControlPlane.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/ControlPlane.kt index e4429b858..769f992d7 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/ControlPlane.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/ControlPlane.kt @@ -416,7 +416,6 @@ class ControlPlane private constructor( ExecutorServiceMetrics( executor, executorServiceName, - "envoy-control", Tags.of("executor", executorServiceName) ) .bindTo(meterRegistry) diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/groups/GroupChangeWatcher.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/groups/GroupChangeWatcher.kt index 522d96eae..233fa84cd 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/groups/GroupChangeWatcher.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/groups/GroupChangeWatcher.kt @@ -11,6 +11,9 @@ import io.micrometer.core.instrument.MeterRegistry import pl.allegro.tech.servicemesh.envoycontrol.EnvoyControlMetrics import pl.allegro.tech.servicemesh.envoycontrol.logger import pl.allegro.tech.servicemesh.envoycontrol.utils.measureBuffer +import pl.allegro.tech.servicemesh.envoycontrol.utils.REACTOR_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.WATCH_TYPE_TAG +import reactor.core.observability.micrometer.Micrometer import reactor.core.publisher.Flux import reactor.core.publisher.FluxSink import java.util.function.Consumer @@ -34,9 +37,14 @@ internal class GroupChangeWatcher( fun onGroupAdded(): Flux> { return groupsChanged - .measureBuffer("group-change-watcher-emitted", meterRegistry) + .measureBuffer("group-change-watcher", meterRegistry) .checkpoint("group-change-watcher-emitted") - .name("group-change-watcher-emitted").metrics() + .name(REACTOR_METRIC) + .tag(WATCH_TYPE_TAG, "group") + .tap(Micrometer.metrics(meterRegistry)) + .doOnSubscribe { + logger.info("Watching group changes") + } .doOnCancel { logger.warn("Cancelling watching group changes") } diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/CachedProtoResourcesSerializer.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/CachedProtoResourcesSerializer.kt index 3503b70a4..2ce63e1ee 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/CachedProtoResourcesSerializer.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/CachedProtoResourcesSerializer.kt @@ -12,6 +12,7 @@ import java.util.function.Supplier import io.envoyproxy.controlplane.server.serializer.DefaultProtoResourcesSerializer import io.micrometer.core.instrument.Timer +import pl.allegro.tech.servicemesh.envoycontrol.utils.PROTOBUF_CACHE_METRIC internal class CachedProtoResourcesSerializer( private val meterRegistry: MeterRegistry, @@ -27,7 +28,7 @@ internal class CachedProtoResourcesSerializer( } private val cache: Cache = createCache("protobuf-cache") - private val timer = createTimer(reportMetrics, meterRegistry, "protobuf-cache.serialize.time") + private val timer = createTimer(reportMetrics, meterRegistry, PROTOBUF_CACHE_METRIC) private fun createCache(cacheName: String): Cache { return if (reportMetrics) { diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/callbacks/MetricsDiscoveryServerCallbacks.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/callbacks/MetricsDiscoveryServerCallbacks.kt index e4e4fa1fa..4121df56a 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/callbacks/MetricsDiscoveryServerCallbacks.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/server/callbacks/MetricsDiscoveryServerCallbacks.kt @@ -1,12 +1,16 @@ package pl.allegro.tech.servicemesh.envoycontrol.server.callbacks -import com.google.common.net.InetAddresses.increment import io.envoyproxy.controlplane.cache.Resources import io.envoyproxy.controlplane.server.DiscoveryServerCallbacks import io.envoyproxy.envoy.service.discovery.v3.DiscoveryRequest as V3DiscoveryRequest import io.envoyproxy.envoy.service.discovery.v3.DeltaDiscoveryRequest as V3DeltaDiscoveryRequest import io.micrometer.core.instrument.MeterRegistry import io.micrometer.core.instrument.Tags +import pl.allegro.tech.servicemesh.envoycontrol.utils.CONNECTION_TYPE_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.CONNECTIONS_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.DISCOVERY_REQ_TYPE_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.REQUESTS_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.STREAM_TYPE_TAG import java.util.concurrent.atomic.AtomicInteger class MetricsDiscoveryServerCallbacks(private val meterRegistry: MeterRegistry) : DiscoveryServerCallbacks { @@ -38,8 +42,8 @@ class MetricsDiscoveryServerCallbacks(private val meterRegistry: MeterRegistry) connectionsByType.forEach { (type, typeConnections) -> meterRegistry.gauge( - "connections", - Tags.of("connection-type", "grpc", "stream-type", type.name.lowercase()), + CONNECTIONS_METRIC, + Tags.of(CONNECTION_TYPE_TAG, "grpc", STREAM_TYPE_TAG, type.name.lowercase()), typeConnections ) } @@ -57,11 +61,11 @@ class MetricsDiscoveryServerCallbacks(private val meterRegistry: MeterRegistry) override fun onV3StreamRequest(streamId: Long, request: V3DiscoveryRequest) { meterRegistry.counter( - "requests.total", + REQUESTS_METRIC, Tags.of( - "connection-type", "grpc", - "stream-type", StreamType.fromTypeUrl(request.typeUrl).name.lowercase(), - "discovery-request-type", "total" + CONNECTION_TYPE_TAG, "grpc", + STREAM_TYPE_TAG, StreamType.fromTypeUrl(request.typeUrl).name.lowercase(), + DISCOVERY_REQ_TYPE_TAG, "total" ) ) .increment() @@ -72,11 +76,11 @@ class MetricsDiscoveryServerCallbacks(private val meterRegistry: MeterRegistry) request: V3DeltaDiscoveryRequest ) { meterRegistry.counter( - "requests.total", + REQUESTS_METRIC, Tags.of( - "connection-type", - "grpc", "stream-type", - StreamType.fromTypeUrl(request.typeUrl).name.lowercase(), "discovery-request-type", "delta" + CONNECTION_TYPE_TAG, "grpc", + STREAM_TYPE_TAG, StreamType.fromTypeUrl(request.typeUrl).name.lowercase(), + DISCOVERY_REQ_TYPE_TAG, "delta" ) ) .increment() diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdater.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdater.kt index 4914e1819..7cfe9109d 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdater.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdater.kt @@ -15,6 +15,16 @@ import pl.allegro.tech.servicemesh.envoycontrol.utils.doOnNextScheduledOn import pl.allegro.tech.servicemesh.envoycontrol.utils.measureBuffer import pl.allegro.tech.servicemesh.envoycontrol.utils.noopTimer import pl.allegro.tech.servicemesh.envoycontrol.utils.onBackpressureLatestMeasured +import pl.allegro.tech.servicemesh.envoycontrol.utils.REACTOR_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.ERRORS_TOTAL_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.OPERATION_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.METRIC_EMITTER_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.SERVICE_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.SIMPLE_CACHE_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.SNAPSHOT_STATUS_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.STATUS_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.UPDATE_TRIGGER_TAG +import reactor.core.observability.micrometer.Micrometer import reactor.core.publisher.Flux import reactor.core.publisher.Mono import reactor.core.scheduler.Scheduler @@ -51,12 +61,13 @@ class SnapshotUpdater( // step 2: only watches groups. if groups change we use the last services state and update those groups groups().subscribeOn(globalSnapshotScheduler) ) - .measureBuffer("snapshot.updater.count.total", meterRegistry, innerSources = 2) + .measureBuffer("snapshot-updater", meterRegistry, innerSources = 2) .checkpoint("snapshot-updater-merged") - .name("snapshot.updater.count.total") - .tag("status", "merged") - .tag("type", "global") - .metrics() + .name(REACTOR_METRIC) + .tag(METRIC_EMITTER_TAG, "snapshot-updater") + .tag(SNAPSHOT_STATUS_TAG, "merged") + .tag(UPDATE_TRIGGER_TAG, "global") + .tap(Micrometer.metrics(meterRegistry)) // step 3: group updates don't provide a snapshot, // so we piggyback the last updated snapshot state for use .scan { previous: UpdateResult, newUpdate: UpdateResult -> @@ -91,18 +102,20 @@ class SnapshotUpdater( // see GroupChangeWatcher return onGroupAdded .publishOn(globalSnapshotScheduler) - .measureBuffer("snapshot.updater.count.total", meterRegistry) + .measureBuffer("snapshot-updater", meterRegistry) .checkpoint("snapshot-updater-groups-published") - .name("snapshot.updater.count.total") - .tag("type", "groups") - .tag("status", "published").metrics() .map { groups -> UpdateResult(action = Action.SERVICES_GROUP_ADDED, groups = groups) } + .name(REACTOR_METRIC) + .tag(METRIC_EMITTER_TAG, "snapshot-updater") + .tag(SNAPSHOT_STATUS_TAG, "published") + .tag(UPDATE_TRIGGER_TAG, "groups") + .tap(Micrometer.metrics(meterRegistry)) .onErrorResume { e -> meterRegistry.counter( - "snapshot.updater.errors.total", - Tags.of("type", "groups") + ERRORS_TOTAL_METRIC, + Tags.of(UPDATE_TRIGGER_TAG, "groups", METRIC_EMITTER_TAG, "snapshot-updater") ) .increment() logger.error("Unable to process new group", e) @@ -112,19 +125,19 @@ class SnapshotUpdater( internal fun services(states: Flux): Flux { return states - .name("snapshot.updater.count.total") - .tag("type", "services") - .tag("status", "sampled") - .metrics() - .onBackpressureLatestMeasured("snapshot.updater.count.total", meterRegistry) + .name(REACTOR_METRIC) + .tag(UPDATE_TRIGGER_TAG, "services") + .tag(STATUS_TAG, "sampled") + .tap(Micrometer.metrics(meterRegistry)) + .onBackpressureLatestMeasured("snapshot-updater", meterRegistry) // prefetch = 1, instead of default 256, to avoid processing stale states in case of backpressure .publishOn(globalSnapshotScheduler, 1) - .measureBuffer("snapshot.updater.count.total", meterRegistry) + .measureBuffer("snapshot-updater", meterRegistry) .checkpoint("snapshot-updater-services-published") - .name("snapshot.updater.count.total") - .tag("type", "services") - .tag("status", "published") - .metrics() + .name(REACTOR_METRIC) + .tag(UPDATE_TRIGGER_TAG, "services") + .tag(STATUS_TAG, "published") + .tap(Micrometer.metrics(meterRegistry)) .createClusterConfigurations() .map { (states, clusters) -> var lastXdsSnapshot: GlobalSnapshot? = null @@ -152,8 +165,8 @@ class SnapshotUpdater( .filter { it != emptyUpdateResult } .onErrorResume { e -> meterRegistry.counter( - "snapshot.updater.errors.total", - Tags.of("type", "services") + ERRORS_TOTAL_METRIC, + Tags.of(METRIC_EMITTER_TAG, "snapshot-updater", UPDATE_TRIGGER_TAG, "services") ).increment() logger.error("Unable to process service changes", e) Mono.justOrEmpty(UpdateResult(action = Action.ERROR_PROCESSING_CHANGES)) @@ -162,7 +175,7 @@ class SnapshotUpdater( private fun snapshotTimer(serviceName: String) = if (properties.metrics.cacheSetSnapshot) { meterRegistry.timer( - "simple-cache.duration.seconds", Tags.of("service", serviceName, "operation", "set-snapshot") + SIMPLE_CACHE_METRIC, Tags.of(SERVICE_TAG, serviceName, OPERATION_TAG, "set-snapshot") ) } else { noopTimer @@ -176,14 +189,18 @@ class SnapshotUpdater( } } catch (e: Throwable) { meterRegistry.counter( - "snapshot.updater.errors.total", Tags.of("service", group.serviceName) + ERRORS_TOTAL_METRIC, + Tags.of( + SERVICE_TAG, group.serviceName, + OPERATION_TAG, "create-snapshot", + METRIC_EMITTER_TAG, "snapshot-updater" + ) ).increment() logger.error("Unable to create snapshot for group ${group.serviceName}", e) } } - private val updateSnapshotForGroupsTimer = - meterRegistry.timer("snapshot.updater.duration.seconds", Tags.of("type", "groups")) + private val updateSnapshotForGroupsTimer = meterRegistry.timer("snapshot.update.duration.seconds") private fun updateSnapshotForGroups( groups: Collection, @@ -198,8 +215,7 @@ class SnapshotUpdater( } else if (result.xdsSnapshot != null && group.communicationMode == XDS) { updateSnapshotForGroup(group, result.xdsSnapshot) } else { - meterRegistry.counter("snapshot.updater.errors.total", Tags.of("type", "communication-mode")) - .increment() + meterRegistry.counter(ERRORS_TOTAL_METRIC, Tags.of("type", "communication-mode")).increment() logger.error( "Requested snapshot for ${group.communicationMode.name} mode, but it is not here. " + "Handling Envoy with not supported communication mode should have been rejected before." + diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/GlobalStateChanges.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/GlobalStateChanges.kt index 4a99ad39a..24857532d 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/GlobalStateChanges.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/GlobalStateChanges.kt @@ -4,9 +4,13 @@ import io.micrometer.core.instrument.MeterRegistry import pl.allegro.tech.servicemesh.envoycontrol.services.MultiClusterState import pl.allegro.tech.servicemesh.envoycontrol.services.MultiClusterState.Companion.toMultiClusterState import pl.allegro.tech.servicemesh.envoycontrol.services.ClusterStateChanges +import pl.allegro.tech.servicemesh.envoycontrol.utils.CHECKPOINT_TAG import pl.allegro.tech.servicemesh.envoycontrol.utils.logSuppressedError import pl.allegro.tech.servicemesh.envoycontrol.utils.measureBuffer +import pl.allegro.tech.servicemesh.envoycontrol.utils.METRIC_EMITTER_TAG import pl.allegro.tech.servicemesh.envoycontrol.utils.onBackpressureLatestMeasured +import pl.allegro.tech.servicemesh.envoycontrol.utils.REACTOR_METRIC +import reactor.core.observability.micrometer.Micrometer import reactor.core.publisher.Flux import reactor.core.scheduler.Schedulers @@ -15,9 +19,10 @@ class GlobalStateChanges( private val meterRegistry: MeterRegistry, private val properties: SyncProperties ) { - private val scheduler = Schedulers.newBoundedElastic( - Int.MAX_VALUE, Int.MAX_VALUE, "global-service-changes-combinator" - ) + private val scheduler = + Schedulers.newBoundedElastic( + Int.MAX_VALUE, Int.MAX_VALUE, "global-service-changes-combinator" + ) fun combined(): Flux { val clusterStatesStreams: List> = clusterStateChanges.map { it.stream() } @@ -41,9 +46,11 @@ class GlobalStateChanges( .toMultiClusterState() } .logSuppressedError("combineLatest() suppressed exception") - .measureBuffer("global-service-changes-combine-latest", meterRegistry) + .measureBuffer("global-service-changes-combinator", meterRegistry) .checkpoint("global-service-changes-emitted") - .name("global-service-changes-emitted").metrics() + .name(REACTOR_METRIC) + .tag(METRIC_EMITTER_TAG, "global-service-changes-combinator") + .tap(Micrometer.metrics(meterRegistry)) } private fun combinedExperimentalFlow( @@ -70,10 +77,13 @@ class GlobalStateChanges( .logSuppressedError("combineLatest() suppressed exception") .measureBuffer("global-service-changes-combine-latest", meterRegistry) .checkpoint("global-service-changes-emitted") - .name("global-service-changes-emitted").metrics() + .name(REACTOR_METRIC) + .tag(METRIC_EMITTER_TAG, "global-service-changes") + .tag(CHECKPOINT_TAG, "emitted") .onBackpressureLatestMeasured("global-service-changes-backpressure", meterRegistry) .publishOn(scheduler, 1) .checkpoint("global-service-changes-published") - .name("global-service-changes-published").metrics() + .tag(CHECKPOINT_TAG, "published") + .tap(Micrometer.metrics(meterRegistry)) } } diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteClusterStateChanges.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteClusterStateChanges.kt index f177f718c..ee85877b8 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteClusterStateChanges.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteClusterStateChanges.kt @@ -3,6 +3,8 @@ package pl.allegro.tech.servicemesh.envoycontrol.synchronization import pl.allegro.tech.servicemesh.envoycontrol.EnvoyControlProperties import pl.allegro.tech.servicemesh.envoycontrol.services.ClusterStateChanges import pl.allegro.tech.servicemesh.envoycontrol.services.MultiClusterState +import pl.allegro.tech.servicemesh.envoycontrol.utils.METRIC_EMITTER_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.REACTOR_METRIC import reactor.core.publisher.Flux class RemoteClusterStateChanges( @@ -14,5 +16,7 @@ class RemoteClusterStateChanges( .getChanges(properties.sync.pollingInterval) .startWith(MultiClusterState.empty()) .distinctUntilChanged() - .name("cross.dc.synchronization.distinct").metrics() + .name(REACTOR_METRIC) + .tag(METRIC_EMITTER_TAG, "cross-dc-synchronisation") + .metrics() } diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteServices.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteServices.kt index 6a3492a3e..092e0a077 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteServices.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/synchronization/RemoteServices.kt @@ -8,6 +8,13 @@ import pl.allegro.tech.servicemesh.envoycontrol.services.Locality import pl.allegro.tech.servicemesh.envoycontrol.services.MultiClusterState import pl.allegro.tech.servicemesh.envoycontrol.services.MultiClusterState.Companion.toMultiClusterState import pl.allegro.tech.servicemesh.envoycontrol.services.ServicesState +import pl.allegro.tech.servicemesh.envoycontrol.utils.CLUSTER_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.CROSS_DC_SYNC_CANCELLED_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.CROSS_DC_SYNC_SECONDS_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.CROSS_DC_SYNC_TOTAL_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.ERRORS_TOTAL_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.OPERATION_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.METRIC_EMITTER_TAG import reactor.core.publisher.Flux import reactor.core.publisher.FluxSink import java.lang.Integer.max @@ -30,14 +37,17 @@ class RemoteServices( fun getChanges(interval: Long): Flux { val aclFlux: Flux = Flux.create({ sink -> scheduler.scheduleWithFixedDelay({ - meterRegistry.timer("cross.dc.synchronization.seconds", Tags.of("operation", "get-multi-cluster-state")) + meterRegistry.timer( + CROSS_DC_SYNC_SECONDS_METRIC, + Tags.of(OPERATION_TAG, "get-multi-cluster-state") + ) .recordCallable { getChanges(sink::next, interval) } }, 0, interval, TimeUnit.SECONDS) }, FluxSink.OverflowStrategy.LATEST) return aclFlux.doOnCancel { - meterRegistry.counter("cross.dc.synchronization.cancelled").increment() + meterRegistry.counter(CROSS_DC_SYNC_CANCELLED_METRIC).increment() logger.warn("Cancelling cross dc sync") } } @@ -62,8 +72,12 @@ class RemoteServices( .orTimeout(interval, TimeUnit.SECONDS) .exceptionally { meterRegistry.counter( - "cross.dc.synchronization.errors.total", - Tags.of("cluster", cluster, "operation", "get-state") + ERRORS_TOTAL_METRIC, + Tags.of( + CLUSTER_TAG, cluster, + OPERATION_TAG, "get-state", + METRIC_EMITTER_TAG, "cross-dc-synchronization" + ) ).increment() logger.warn("Error synchronizing instances ${it.message}", it) clusterStateCache[cluster] @@ -76,8 +90,12 @@ class RemoteServices( cluster to instances } catch (e: Exception) { meterRegistry.counter( - "cross.dc.synchronization.errors.total", - Tags.of("cluster", cluster, "operation", "get-instances") + ERRORS_TOTAL_METRIC, + Tags.of( + CLUSTER_TAG, cluster, + OPERATION_TAG, "get-instances", + METRIC_EMITTER_TAG, "cross-dc-synchronization" + ) ).increment() logger.warn("Failed fetching instances from $cluster", e) cluster to emptyList() @@ -89,7 +107,7 @@ class RemoteServices( state: ServicesState ): ClusterState { meterRegistry.counter( - "cross.dc.synchronization.total", Tags.of("cluster", cluster) + CROSS_DC_SYNC_TOTAL_METRIC, Tags.of(CLUSTER_TAG, cluster) ) .increment() val clusterState = ClusterState( diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/Metrics.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/Metrics.kt index ce8f380d9..800d05c4f 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/Metrics.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/Metrics.kt @@ -5,3 +5,29 @@ import io.micrometer.core.instrument.Tags import io.micrometer.core.instrument.noop.NoopTimer val noopTimer = NoopTimer(Meter.Id("", Tags.empty(), null, null, Meter.Type.TIMER)) +const val REACTOR_METRIC = "reactor" +const val ERRORS_TOTAL_METRIC = "errors.total" +const val CONNECTIONS_METRIC = "connections" +const val REQUESTS_METRIC = "requests.total" +const val WATCH_METRIC = "watch" +const val ENVOY_CONTROL_WARM_UP_METRIC = "envoy.control.warmup.seconds" +const val CROSS_DC_SYNC_METRIC = "cross.dc.synchronization" +const val CROSS_DC_SYNC_CANCELLED_METRIC = "$CROSS_DC_SYNC_METRIC.cancelled.total" +const val CROSS_DC_SYNC_SECONDS_METRIC = "$CROSS_DC_SYNC_METRIC.seconds" +const val CROSS_DC_SYNC_TOTAL_METRIC = "$CROSS_DC_SYNC_METRIC.total" +const val SIMPLE_CACHE_METRIC = "simple.cache.duration.seconds" +const val PROTOBUF_CACHE_METRIC = "protobuf.cache.serialize.time" + +const val CONNECTION_TYPE_TAG = "connection-type" +const val STREAM_TYPE_TAG = "stream-type" +const val CHECKPOINT_TAG = "checkpoint" +const val WATCH_TYPE_TAG = "watch-type" +const val DISCOVERY_REQ_TYPE_TAG = "discovery-request-type" +const val METRIC_TYPE_TAG = "metric-type" +const val METRIC_EMITTER_TAG = "metric-emitter" +const val SNAPSHOT_STATUS_TAG = "snapshot-status" +const val UPDATE_TRIGGER_TAG = "update-trigger" +const val SERVICE_TAG = "service" +const val OPERATION_TAG = "operation" +const val CLUSTER_TAG = "cluster" +const val STATUS_TAG = "status" diff --git a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtils.kt b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtils.kt index 00fccdc2d..ab4806a09 100644 --- a/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtils.kt +++ b/envoy-control-core/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtils.kt @@ -1,6 +1,7 @@ package pl.allegro.tech.servicemesh.envoycontrol.utils import io.micrometer.core.instrument.MeterRegistry +import io.micrometer.core.instrument.Tags import org.reactivestreams.Subscription import org.slf4j.LoggerFactory import reactor.core.Disposable @@ -49,7 +50,13 @@ fun Flux.measureBuffer( * operator and calculate difference between them */ fun Flux.measureDiscardedItems(name: String, meterRegistry: MeterRegistry): Flux = this - .doOnDiscard(Any::class.java) { meterRegistry.counter("reactor-discarded-items.$name").increment() } + .doOnDiscard(Any::class.java) { + meterRegistry.counter( + REACTOR_METRIC, + METRIC_TYPE_TAG, "discarded-items", + METRIC_EMITTER_TAG, name + ).increment() + } fun Flux.onBackpressureLatestMeasured(name: String, meterRegistry: MeterRegistry): Flux = measureDiscardedItems("$name-before", meterRegistry) @@ -105,7 +112,12 @@ private fun measureQueueSubscriptionBuffer( name: String, meterRegistry: MeterRegistry ) { - meterRegistry.gauge(bufferMetric(name), subscription, queueSubscriptionBufferExtractor) + meterRegistry.gauge( + REACTOR_METRIC, + Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, name), + subscription, + queueSubscriptionBufferExtractor + ) } private fun measureScannableBuffer( @@ -116,12 +128,19 @@ private fun measureScannableBuffer( ) { val buffered = scannable.scan(Scannable.Attr.BUFFERED) if (buffered == null) { - logger.error("Cannot register metric '${bufferMetric(name)}'. Buffer size not available. " + - "Use measureBuffer() only on supported reactor operators") + logger.error( + "Cannot register metric $REACTOR_METRIC 'with $METRIC_EMITTER_TAG: $name'. Buffer size not available. " + + "Use measureBuffer() only on supported reactor operators" + ) return } - meterRegistry.gauge(bufferMetric(name), scannable, scannableBufferExtractor) + meterRegistry.gauge( + REACTOR_METRIC, + Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, name), + scannable, + scannableBufferExtractor + ) /** * Special case for FlatMap derived operators like merge(). The main buffer attribute doesn't return actual @@ -131,7 +150,12 @@ private fun measureScannableBuffer( * be available, so it must be stated explicitly as innerSources parameter. */ for (i in 0 until innerSources) { - meterRegistry.gauge("${bufferMetric(name)}_$i", scannable, innerBufferExtractor(i)) + meterRegistry.gauge( + REACTOR_METRIC, + Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, "${(name)}_$i"), + scannable, + innerBufferExtractor(i) + ) } } @@ -142,9 +166,10 @@ private fun innerBufferExtractor(index: Int) = { s: Scannable -> ?.let(scannableBufferExtractor) ?: -1.0 } -private val queueSubscriptionBufferExtractor = { s: Fuseable.QueueSubscription<*> -> s.size.toDouble() } -private fun bufferMetric(name: String) = "reactor-buffers.$name" +private val queueSubscriptionBufferExtractor = { s: Fuseable.QueueSubscription<*> -> + s.size.toDouble() +} sealed class ParallelizableScheduler object DirectScheduler : ParallelizableScheduler() @@ -160,6 +185,7 @@ fun Flux.doOnNextScheduledOn( is DirectScheduler -> { doOnNext(doOnNext) } + is ParallelScheduler -> { this.parallel(scheduler.parallelism) .runOn(scheduler.scheduler) diff --git a/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/metrics/ThreadPoolMetricTest.kt b/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/metrics/ThreadPoolMetricTest.kt index 27ef8f273..bea774bcc 100644 --- a/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/metrics/ThreadPoolMetricTest.kt +++ b/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/metrics/ThreadPoolMetricTest.kt @@ -27,16 +27,15 @@ class ThreadPoolMetricTest { // then val metricNames = listOf("executor.completed", "executor.active", "executor.queued", "executor.pool.size") - .map { "envoy-control.$it" } - val metricMap = listOf( + val executorNames = listOf( "grpc-server-worker", "grpc-worker-event-loop", "snapshot-update", "group-snapshot" ).associateWith { metricNames } - assertThat(metricMap.entries).allSatisfy { + assertThat(executorNames.entries).allSatisfy { assertThat(it.value.all { metricName -> meterRegistry.meters.any { meter -> meter.id.name == metricName && meter.id.tags.contains( diff --git a/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdaterTest.kt b/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdaterTest.kt index 1d3fd3935..6c0a14939 100644 --- a/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdaterTest.kt +++ b/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/snapshot/SnapshotUpdaterTest.kt @@ -60,6 +60,10 @@ import pl.allegro.tech.servicemesh.envoycontrol.utils.DirectScheduler import pl.allegro.tech.servicemesh.envoycontrol.utils.ParallelScheduler import pl.allegro.tech.servicemesh.envoycontrol.utils.ParallelizableScheduler import pl.allegro.tech.servicemesh.envoycontrol.utils.any +import pl.allegro.tech.servicemesh.envoycontrol.utils.ERRORS_TOTAL_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.METRIC_EMITTER_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.OPERATION_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.SERVICE_TAG import reactor.core.publisher.Flux import reactor.core.publisher.Mono import reactor.core.scheduler.Schedulers @@ -469,8 +473,8 @@ class SnapshotUpdaterTest { val snapshot = cache.getSnapshot(servicesGroup) assertThat(snapshot).isEqualTo(null) assertThat( - simpleMeterRegistry.find("snapshot.updater.errors.total") - .tags(Tags.of("service", "example-service")) + simpleMeterRegistry.find(ERRORS_TOTAL_METRIC) + .tags(Tags.of(SERVICE_TAG, "example-service", OPERATION_TAG, "create-snapshot", METRIC_EMITTER_TAG, "snapshot-updater")) .counter()?.count() ).isEqualTo(1.0) } diff --git a/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtilsTest.kt b/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtilsTest.kt index ab0830e56..111a1ee7b 100644 --- a/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtilsTest.kt +++ b/envoy-control-core/src/test/kotlin/pl/allegro/tech/servicemesh/envoycontrol/utils/ReactorUtilsTest.kt @@ -1,15 +1,19 @@ package pl.allegro.tech.servicemesh.envoycontrol.utils +import io.micrometer.core.instrument.Tags import io.micrometer.core.instrument.simple.SimpleMeterRegistry import org.assertj.core.api.Assertions.assertThat +import org.junit.jupiter.api.Disabled import org.junit.jupiter.api.Test import org.junit.jupiter.api.fail +import org.testcontainers.shaded.org.awaitility.Awaitility import reactor.core.publisher.Flux import reactor.core.scheduler.Schedulers import java.util.concurrent.CountDownLatch import java.util.concurrent.TimeUnit import java.util.function.BiFunction +@Disabled class ReactorUtilsTest { @Test @@ -24,10 +28,15 @@ class ReactorUtilsTest { .subscribeRequestingN(n = 5) // then - assertThat(received.await(2, TimeUnit.SECONDS)).isTrue() - - val buffered = meterRegistry["reactor-buffers.publish"].gauge() - assertThat(buffered.value()).isEqualTo(15.0) + assertThat(received.await(5, TimeUnit.SECONDS)).isTrue() + + Awaitility.waitAtMost(5, TimeUnit.SECONDS).untilAsserted { + assertThat( + meterRegistry.find(REACTOR_METRIC) + .tags(Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, "publish")) + .gauge()?.value() + ).isEqualTo(15.0) + } } @Test @@ -43,9 +52,12 @@ class ReactorUtilsTest { // then assertThat(received.await(2, TimeUnit.SECONDS)).isTrue() - val sourcesCount = meterRegistry["reactor-buffers.merge"].gauge().value() - val source0Buffered = meterRegistry["reactor-buffers.merge_0"].gauge().value() - val source1Buffered = meterRegistry["reactor-buffers.merge_1"].gauge().value() + val sourcesCount = meterRegistry.find(REACTOR_METRIC) + .tags(Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, "merge")).gauge()?.value() + val source0Buffered = meterRegistry.find(REACTOR_METRIC) + .tags(Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, "merge_0")).gauge()?.value() + val source1Buffered = meterRegistry.find(REACTOR_METRIC) + .tags(Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, "merge_1")).gauge()?.value() assertThat(sourcesCount).isEqualTo(2.0) // 12 published minus 5 requested = 7 @@ -67,10 +79,11 @@ class ReactorUtilsTest { // then assertThat(received.await(2, TimeUnit.SECONDS)).isTrue() - val buffered = meterRegistry["reactor-buffers.combine"].gauge().value() + val result = meterRegistry.find(REACTOR_METRIC) + .tags(Tags.of(METRIC_TYPE_TAG, "buffer-size", METRIC_EMITTER_TAG, "combine")).gauge()?.value() // only two last items from source1 are undelivered (6 produces - 4 requested = 2) - assertThat(buffered).isEqualTo(2.0) + assertThat(result).isEqualTo(2.0) } @Test @@ -87,8 +100,10 @@ class ReactorUtilsTest { // then assertThat(received.await(2, TimeUnit.SECONDS)).isTrue() - val discardedItemsBeforeBackpressure = meterRegistry["reactor-discarded-items.latest-before"].counter().count() - val discardedItemsAfterBackpressure = meterRegistry["reactor-discarded-items.latest"].counter().count() + val discardedItemsBeforeBackpressure = meterRegistry.find(REACTOR_METRIC) + .tags(Tags.of(METRIC_TYPE_TAG, "discarded-items", METRIC_EMITTER_TAG, "latest-before")).counter()?.count() + val discardedItemsAfterBackpressure = meterRegistry.find(REACTOR_METRIC) + .tags(Tags.of(METRIC_TYPE_TAG, "discarded-items", METRIC_EMITTER_TAG, "latest")).counter()?.count() /** * Published by range: (0..10) @@ -97,7 +112,9 @@ class ReactorUtilsTest { * Not dispatched to subscriber, received by onBackpressure: (4, 6, 8) * Discarded by onBackpressure: (4, 6) */ - assertThat(discardedItemsAfterBackpressure - discardedItemsBeforeBackpressure).isEqualTo(2.0) + assertThat(discardedItemsAfterBackpressure).isNotNull() + assertThat(discardedItemsAfterBackpressure).isNotNull() + assertThat(discardedItemsAfterBackpressure!! - discardedItemsBeforeBackpressure!!).isEqualTo(2.0) } private fun Flux.subscribeRequestingN(n: Int): CountDownLatch { diff --git a/envoy-control-runner/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/infrastructure/ControlPlaneConfig.kt b/envoy-control-runner/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/infrastructure/ControlPlaneConfig.kt index 033037043..c6cd45ad1 100644 --- a/envoy-control-runner/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/infrastructure/ControlPlaneConfig.kt +++ b/envoy-control-runner/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/infrastructure/ControlPlaneConfig.kt @@ -41,6 +41,11 @@ import pl.allegro.tech.servicemesh.envoycontrol.services.transformers.RegexServi import pl.allegro.tech.servicemesh.envoycontrol.services.transformers.ServiceInstancesTransformer import pl.allegro.tech.servicemesh.envoycontrol.snapshot.resource.listeners.filters.EnvoyHttpFilters import pl.allegro.tech.servicemesh.envoycontrol.synchronization.GlobalStateChanges +import pl.allegro.tech.servicemesh.envoycontrol.utils.ERRORS_TOTAL_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.METRIC_EMITTER_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.STATUS_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.WATCH_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.WATCH_TYPE_TAG import reactor.core.scheduler.Schedulers import java.net.URI @@ -173,14 +178,29 @@ class ControlPlaneConfig { ConsulClient(properties.host, properties.port).agentSelf.value?.config?.datacenter ?: "local" fun controlPlaneMetrics(meterRegistry: MeterRegistry): DefaultEnvoyControlMetrics { - val metricName = "watched-services" return DefaultEnvoyControlMetrics(meterRegistry = meterRegistry).also { - meterRegistry.gauge(metricName, Tags.of("status", "added"), it.servicesAdded) - meterRegistry.gauge(metricName, Tags.of("status", "removed"), it.servicesRemoved) - meterRegistry.gauge(metricName, Tags.of("status", "instance-changed"), it.instanceChanges) - meterRegistry.gauge(metricName, Tags.of("status", "snapshot-changed"), it.snapshotChanges) + meterRegistry.gauge(WATCH_METRIC, Tags.of(STATUS_TAG, "added", WATCH_TYPE_TAG, "service"), it.servicesAdded) + meterRegistry.gauge( + WATCH_METRIC, + Tags.of(STATUS_TAG, "removed", WATCH_TYPE_TAG, "service"), + it.servicesRemoved + ) + meterRegistry.gauge( + WATCH_METRIC, + Tags.of(STATUS_TAG, "instance-changed", WATCH_TYPE_TAG, "service"), + it.instanceChanges + ) + meterRegistry.gauge( + WATCH_METRIC, + Tags.of(STATUS_TAG, "snapshot-changed", WATCH_TYPE_TAG, "service"), + it.snapshotChanges + ) meterRegistry.gauge("cache.groups.count", it.cacheGroupsCount) - it.meterRegistry.more().counter("services.watch.errors.total", listOf(), it.errorWatchingServices) + it.meterRegistry.more().counter( + ERRORS_TOTAL_METRIC, + Tags.of(METRIC_EMITTER_TAG, WATCH_METRIC, WATCH_TYPE_TAG, "service"), + it.errorWatchingServices + ) } } diff --git a/envoy-control-services/build.gradle b/envoy-control-services/build.gradle index 724d8e4a2..ffb109fc7 100644 --- a/envoy-control-services/build.gradle +++ b/envoy-control-services/build.gradle @@ -1,4 +1,5 @@ dependencies { implementation group: 'org.jetbrains.kotlin', name: 'kotlin-stdlib' api group: 'io.projectreactor', name: 'reactor-core' + api group: 'io.projectreactor', name: 'reactor-core-micrometer', version: versions.reactor_core_micrometer } diff --git a/envoy-control-source-consul/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/consul/services/ConsulServiceChanges.kt b/envoy-control-source-consul/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/consul/services/ConsulServiceChanges.kt index 86b7b36da..2a57b52b9 100644 --- a/envoy-control-source-consul/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/consul/services/ConsulServiceChanges.kt +++ b/envoy-control-source-consul/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/consul/services/ConsulServiceChanges.kt @@ -13,7 +13,12 @@ import pl.allegro.tech.servicemesh.envoycontrol.logger import pl.allegro.tech.servicemesh.envoycontrol.server.ReadinessStateHandler import pl.allegro.tech.servicemesh.envoycontrol.services.ServiceInstances import pl.allegro.tech.servicemesh.envoycontrol.services.ServicesState +import pl.allegro.tech.servicemesh.envoycontrol.utils.ENVOY_CONTROL_WARM_UP_METRIC import pl.allegro.tech.servicemesh.envoycontrol.utils.measureDiscardedItems +import pl.allegro.tech.servicemesh.envoycontrol.utils.CHECKPOINT_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.METRIC_EMITTER_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.REACTOR_METRIC +import reactor.core.observability.micrometer.Micrometer import reactor.core.publisher.Flux import reactor.core.publisher.FluxSink import java.time.Duration @@ -51,11 +56,14 @@ class ConsulServiceChanges( }, FluxSink.OverflowStrategy.LATEST ) - .measureDiscardedItems("consul-service-changes-emitted", metrics.meterRegistry) + .measureDiscardedItems("consul-service-changes", metrics.meterRegistry) .checkpoint("consul-service-changes-emitted") - .name("consul-service-changes-emitted").metrics() + .name(REACTOR_METRIC) + .tag(METRIC_EMITTER_TAG, "consul-service-changes") + .tag(CHECKPOINT_TAG, "emitted") .checkpoint("consul-service-changes-emitted-distinct") - .name("consul-service-changes-emitted-distinct").metrics() + .tag(CHECKPOINT_TAG, "distinct") + .tap(Micrometer.metrics(metrics.meterRegistry)) .doOnCancel { logger.warn("Cancelling watching consul service changes") watcher.close() @@ -226,7 +234,7 @@ class ConsulServiceChanges( if (ready) { val stopTimer = System.currentTimeMillis() readinessStateHandler.ready() - metrics.meterRegistry.timer("envoy-control.warmup.seconds") + metrics.meterRegistry.timer(ENVOY_CONTROL_WARM_UP_METRIC) .record( stopTimer - startTimer, TimeUnit.SECONDS diff --git a/envoy-control-tests/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/MetricsDiscoveryServerCallbacksTest.kt b/envoy-control-tests/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/MetricsDiscoveryServerCallbacksTest.kt index ea54ec40f..b7c9fda0a 100644 --- a/envoy-control-tests/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/MetricsDiscoveryServerCallbacksTest.kt +++ b/envoy-control-tests/src/main/kotlin/pl/allegro/tech/servicemesh/envoycontrol/MetricsDiscoveryServerCallbacksTest.kt @@ -20,6 +20,11 @@ import pl.allegro.tech.servicemesh.envoycontrol.server.callbacks.MetricsDiscover import pl.allegro.tech.servicemesh.envoycontrol.server.callbacks.MetricsDiscoveryServerCallbacks.StreamType.RDS import pl.allegro.tech.servicemesh.envoycontrol.server.callbacks.MetricsDiscoveryServerCallbacks.StreamType.SDS import pl.allegro.tech.servicemesh.envoycontrol.server.callbacks.MetricsDiscoveryServerCallbacks.StreamType.UNKNOWN +import pl.allegro.tech.servicemesh.envoycontrol.utils.CONNECTION_TYPE_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.CONNECTIONS_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.DISCOVERY_REQ_TYPE_TAG +import pl.allegro.tech.servicemesh.envoycontrol.utils.REQUESTS_METRIC +import pl.allegro.tech.servicemesh.envoycontrol.utils.STREAM_TYPE_TAG import java.util.function.Consumer import java.util.function.Predicate @@ -228,18 +233,20 @@ interface MetricsDiscoveryServerCallbacksTest { // given val meterRegistry = envoyControl().app.meterRegistry() consul().server.operations.registerService(service(), name = "echo") - + for (meter in meterRegistry.meters) { + print(meter.toString()) + } // expect untilAsserted { expectedGrpcConnectionsGaugeValues().forEach { (type, value) -> - val metric = "connections" + val metric = CONNECTIONS_METRIC assertThat( meterRegistry.find(metric) - .tags(Tags.of("stream-type", type.name.lowercase(), "connection-type", "grpc")).gauge() + .tags(Tags.of(STREAM_TYPE_TAG, type.name.lowercase(), CONNECTION_TYPE_TAG, "grpc")).gauge() ).isNotNull assertThat( meterRegistry.get(metric) - .tags(Tags.of("stream-type", type.name.lowercase(), "connection-type", "grpc")).gauge().value() + .tags(Tags.of(STREAM_TYPE_TAG, type.name.lowercase(), CONNECTION_TYPE_TAG, "grpc")).gauge().value() .toInt() ).isEqualTo(value) } @@ -261,8 +268,8 @@ interface MetricsDiscoveryServerCallbacksTest { private fun assertCondition(type: String, condition: Predicate, reqTpe: String) { val counterValue = - envoyControl().app.meterRegistry().find("requests.total") - .tags(Tags.of("stream-type", type, "discovery-request-type", reqTpe, "connection-type", "grpc")) + envoyControl().app.meterRegistry().find(REQUESTS_METRIC) + .tags(Tags.of(STREAM_TYPE_TAG, type, DISCOVERY_REQ_TYPE_TAG, reqTpe, CONNECTION_TYPE_TAG, "grpc")) .counter()?.count()?.toInt() logger.info("$type $counterValue") assertThat(counterValue).satisfies(Consumer { condition.test(it) })