diff --git a/.github/jsonnetfile.json b/.github/jsonnetfile.json
index 92b8f0db112b4..2eb94153d1d1d 100644
--- a/.github/jsonnetfile.json
+++ b/.github/jsonnetfile.json
@@ -8,7 +8,7 @@
"subdir": "workflows"
}
},
- "version": "124c4d996f9625478a79f1884465e29ea082d224"
+ "version": "adca1c07a2199374e1646e62331926509699368b"
}
],
"legacyImports": true
diff --git a/.github/jsonnetfile.lock.json b/.github/jsonnetfile.lock.json
index 4ec13c9337380..3077ba9f2d55b 100644
--- a/.github/jsonnetfile.lock.json
+++ b/.github/jsonnetfile.lock.json
@@ -8,8 +8,8 @@
"subdir": "workflows"
}
},
- "version": "124c4d996f9625478a79f1884465e29ea082d224",
- "sum": "8wrJURq48ZBAtZcReO1W7AiXmvUyLqb932Q9sXyfFVo="
+ "version": "adca1c07a2199374e1646e62331926509699368b",
+ "sum": "/6NMt3DFr1mpaBxncbwBJVV5vBpAMIyP3XNOoFArz5Q="
}
],
"legacyImports": false
diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet
index 62f065b40288a..b77d8fe615ec9 100644
--- a/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet
+++ b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet
@@ -176,7 +176,7 @@ local pullRequestFooter = 'Merging this PR will release the [artifacts](https://
step.new('download images')
+ step.withRun(|||
echo "downloading images to $(pwd)/images"
- gsutil cp -r gs://loki-build-artifacts/${{ needs.createRelease.outputs.sha }}/images .
+ gsutil cp -r gs://${BUILD_ARTIFACTS_BUCKET}/${{ needs.createRelease.outputs.sha }}/images .
|||),
step.new('publish docker images', './lib/actions/push-images')
+ step.with({
diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet
index 28df05dd215d6..40bf097049e83 100644
--- a/.github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet
+++ b/.github/vendor/github.com/grafana/loki-release/workflows/validate.libsonnet
@@ -24,15 +24,6 @@ local setupValidationDeps = function(job) job {
smoke_test: '${binary} --version',
tar_args: 'xvf',
}),
- step.new('install jsonnetfmt', './lib/actions/install-binary')
- + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
- + step.with({
- binary: 'jsonnetfmt',
- version: '0.18.0',
- download_url: 'https://github.com/google/go-jsonnet/releases/download/v${version}/go-jsonnet_${version}_Linux_x86_64.tar.gz',
- tarball_binary_path: '${binary}',
- smoke_test: '${binary} --version',
- }),
] + job.steps,
};
@@ -44,45 +35,77 @@ local validationJob = _validationJob(false);
+ step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ step.withRun(common.makeTarget(target)),
- test: setupValidationDeps(
- validationJob
- + job.withSteps([
- validationMakeStep('test', 'test'),
- ])
- ),
+ // Test jobs
+ collectPackages: job.new()
+ + job.withSteps([
+ common.checkout,
+ common.fixDubiousOwnership,
+ step.new('gather packages')
+ + step.withId('gather-tests')
+ + step.withRun(|||
+ echo "packages=$(find . -path '*_test.go' -printf '%h\n' \
+ | grep -e "pkg/push" -e "integration" -e "operator" -e "lambda-promtail" -e "helm" -v \
+ | cut -d / -f 2,3 \
+ | uniq \
+ | sort \
+ | jq --raw-input --slurp --compact-output 'split("\n")[:-1]')" >> ${GITHUB_OUTPUT}
+ |||),
+ ])
+ + job.withOutputs({
+ packages: '${{ steps.gather-tests.outputs.packages }}',
+ }),
- integration: setupValidationDeps(
- validationJob
- + job.withSteps([
- validationMakeStep('integration', 'test-integration'),
- ])
- ),
+ integration: validationJob
+ + job.withSteps([
+ common.checkout,
+ common.fixDubiousOwnership,
+ validationMakeStep('integration', 'test-integration'),
+ ]),
- lint: setupValidationDeps(
- validationJob
- + job.withSteps(
- [
- validationMakeStep('lint', 'lint'),
- validationMakeStep('lint jsonnet', 'lint-jsonnet'),
- validationMakeStep('lint scripts', 'lint-scripts'),
- step.new('check format')
- + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
- + step.withRun(|||
- git fetch origin
- make check-format
- |||),
- ] + [
- step.new('golangci-lint', 'golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5')
- + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
- + step.with({
- version: '${{ inputs.golang_ci_lint_version }}',
- 'only-new-issues': true,
- }),
- ],
- )
- ),
+ testPackages: validationJob
+ + job.withNeeds(['collectPackages'])
+ + job.withStrategy({
+ matrix: {
+ package: '${{fromJson(needs.collectPackages.outputs.packages)}}',
+ },
+ })
+ + job.withSteps([
+ common.checkout,
+ common.fixDubiousOwnership,
+ step.new('test ${{ matrix.package }}')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withRun(|||
+ gotestsum -- -covermode=atomic -coverprofile=coverage.txt -p=4 ./${{ matrix.package }}/...
+ |||),
+ ]),
- check: setupValidationDeps(
+
+ testLambdaPromtail: validationJob
+ + job.withSteps([
+ common.checkout,
+ common.fixDubiousOwnership,
+ step.new('test push package')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withWorkingDirectory('tools/lambda-promtail')
+ + step.withRun(|||
+ gotestsum -- -covermode=atomic -coverprofile=coverage.txt -p=4 ./...
+ |||),
+ ]),
+
+ testPushPackage: validationJob
+ + job.withSteps([
+ common.checkout,
+ common.fixDubiousOwnership,
+ step.new('test push package')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withWorkingDirectory('pkg/push')
+ + step.withRun(|||
+ gotestsum -- -covermode=atomic -coverprofile=coverage.txt -p=4 ./...
+ |||),
+ ]),
+
+ // Check / lint jobs
+ checkFiles: setupValidationDeps(
validationJob
+ job.withSteps([
validationMakeStep('check generated files', 'check-generated-files'),
@@ -115,4 +138,94 @@ local validationJob = _validationJob(false);
],
}
),
+
+ faillint:
+ validationJob
+ + job.withSteps([
+ common.checkout,
+ common.fixDubiousOwnership,
+ step.new('faillint')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withRun(|||
+ faillint -paths "sync/atomic=go.uber.org/atomic" ./...
+
+ |||),
+ ]),
+
+ golangciLint: setupValidationDeps(
+ validationJob
+ + job.withSteps(
+ [
+ step.new('golangci-lint', 'golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.with({
+ version: '${{ inputs.golang_ci_lint_version }}',
+ 'only-new-issues': true,
+ }),
+ ],
+ )
+ ),
+
+ lintFiles: setupValidationDeps(
+ validationJob
+ + job.withSteps(
+ [
+ validationMakeStep('lint scripts', 'lint-scripts'),
+ step.new('check format')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withRun(|||
+ git fetch origin
+ make check-format
+ |||),
+ ]
+ )
+ ),
+
+ failCheck: job.new()
+ + job.withNeeds([
+ 'checkFiles',
+ 'faillint',
+ 'golangciLint',
+ 'lintFiles',
+ 'integration',
+ 'testLambdaPromtail',
+ 'testPackages',
+ 'testPushPackage',
+ ])
+ + job.withEnv({
+ SKIP_VALIDATION: '${{ inputs.skip_validation }}',
+ })
+ + job.withIf("${{ !fromJSON(inputs.skip_validation) && (cancelled() || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'failure')) }}")
+ + job.withSteps([
+ common.checkout,
+ step.new('verify checks passed')
+ + step.withRun(|||
+ echo "Some checks have failed!"
+ exit 1,
+ |||),
+ ]),
+
+ check: job.new()
+ + job.withNeeds([
+ 'checkFiles',
+ 'faillint',
+ 'golangciLint',
+ 'lintFiles',
+ 'integration',
+ 'testLambdaPromtail',
+ 'testPackages',
+ 'testPushPackage',
+ ])
+ + job.withEnv({
+ SKIP_VALIDATION: '${{ inputs.skip_validation }}',
+ })
+ + job.withSteps([
+ common.checkout,
+ step.new('checks passed')
+ + step.withIf('${{ !fromJSON(env.SKIP_VALIDATION) }}')
+ + step.withRun(|||
+ echo "All checks passed"
+ |||),
+ ]),
+
}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 0bbf7eb48465f..9a87b37917757 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -138,7 +138,7 @@ jobs:
- name: "download images"
run: |
echo "downloading images to $(pwd)/images"
- gsutil cp -r gs://loki-build-artifacts/${{ needs.createRelease.outputs.sha }}/images .
+ gsutil cp -r gs://${BUILD_ARTIFACTS_BUCKET}/${{ needs.createRelease.outputs.sha }}/images .
- name: "publish docker images"
uses: "./lib/actions/push-images"
with:
diff --git a/Makefile b/Makefile
index 538ca8ea287ad..dbfc32b660b36 100644
--- a/Makefile
+++ b/Makefile
@@ -906,6 +906,13 @@ release-workflows:
.PHONY: release-workflows-check
release-workflows-check:
+ifeq ($(BUILD_IN_CONTAINER),true)
+ $(SUDO) docker run $(RM) $(TTY) -i \
+ -v $(shell go env GOPATH)/pkg:/go/pkg$(MOUNT_FLAGS) \
+ -v $(shell pwd):/src/loki$(MOUNT_FLAGS) \
+ $(IMAGE_PREFIX)/loki-build-image:$(BUILD_IMAGE_VERSION) $@;
+else
@$(MAKE) release-workflows
@echo "Checking diff"
@git diff --exit-code -- ".github/workflows/*release*" || (echo "Please build release workflows by running 'make release-workflows'" && false)
+endif
diff --git a/clients/pkg/promtail/discovery/consulagent/consul.go b/clients/pkg/promtail/discovery/consulagent/consul.go
index 2a08498efea69..89e69dfe59eb6 100644
--- a/clients/pkg/promtail/discovery/consulagent/consul.go
+++ b/clients/pkg/promtail/discovery/consulagent/consul.go
@@ -316,7 +316,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
ticker := time.NewTicker(d.refreshInterval)
// Watched services and their cancellation functions.
- services := make(map[string]func())
+ services := make(map[string]func(error))
for {
select {
@@ -340,7 +340,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
// Watch the catalog for new services we would like to watch. This is called only
// when we don't know yet the names of the services and need to ask Consul the
// entire list of services.
-func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, services map[string]func()) {
+func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, services map[string]func(error)) {
agent := d.client.Agent()
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","))
@@ -378,7 +378,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
continue // We are already watching the service.
}
- wctx, cancel := context.WithCancel(ctx)
+ wctx, cancel := context.WithCancelCause(ctx)
d.watchService(wctx, ch, name)
services[name] = cancel
}
@@ -390,7 +390,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
"msg", "removing service since consul no longer has a record of it",
"name", name)
// Call the watch cancellation function.
- cancel()
+ cancel(errors.New("canceling service since consul no longer has a record of it"))
delete(services, name)
// Send clearing target group.
diff --git a/cmd/loki/loki-local-config.yaml b/cmd/loki/loki-local-config.yaml
index 03b579647753a..e2c54d5452790 100644
--- a/cmd/loki/loki-local-config.yaml
+++ b/cmd/loki/loki-local-config.yaml
@@ -3,6 +3,7 @@ auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
+ log_level: debug
common:
instance_addr: 127.0.0.1
@@ -33,6 +34,12 @@ schema_config:
prefix: index_
period: 24h
+pattern_ingester:
+ enabled: true
+ metric_aggregation:
+ enabled: true
+ log_push_observations: true
+
ruler:
alertmanager_url: http://localhost:9093
diff --git a/cmd/loki/main.go b/cmd/loki/main.go
index 401085b3aab11..bb839c6cf3ec8 100644
--- a/cmd/loki/main.go
+++ b/cmd/loki/main.go
@@ -60,6 +60,10 @@ func main() {
serverCfg := &config.Server
serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.DefaultRegisterer, false)
+ if config.InternalServer.Enable {
+ config.InternalServer.Log = serverCfg.Log
+ }
+
// Validate the config once both the config file has been loaded
// and CLI flags parsed.
if err := config.Validate(); err != nil {
@@ -102,6 +106,8 @@ func main() {
}()
}
+ setProfilingOptions(config.Profiling)
+
// Allocate a block of memory to reduce the frequency of garbage collection.
// The larger the ballast, the lower the garbage collection frequency.
// https://github.com/grafana/loki/issues/781
@@ -123,3 +129,15 @@ func main() {
err = t.Run(loki.RunOpts{StartTime: startTime})
util_log.CheckFatal("running loki", err, util_log.Logger)
}
+
+func setProfilingOptions(cfg loki.ProfilingConfig) {
+ if cfg.BlockProfileRate > 0 {
+ runtime.SetBlockProfileRate(cfg.BlockProfileRate)
+ }
+ if cfg.CPUProfileRate > 0 {
+ runtime.SetCPUProfileRate(cfg.CPUProfileRate)
+ }
+ if cfg.MutexProfileFraction > 0 {
+ runtime.SetMutexProfileFraction(cfg.MutexProfileFraction)
+ }
+}
diff --git a/docs/sources/_index.md b/docs/sources/_index.md
index 3428c662cd50f..822c01fd6a7ce 100644
--- a/docs/sources/_index.md
+++ b/docs/sources/_index.md
@@ -1,19 +1,48 @@
---
-title: Grafana Loki documentation
-description: "Technical documentation for Grafana Loki"
+title: Grafana Loki
+description: Grafana Loki is a set of open source components that can be composed into a fully featured logging stack.
aliases:
- /docs/loki/
weight: 100
+hero:
+ title: Grafana Loki
+ level: 1
+ image: /media/docs/loki/logo-grafana-loki.png
+ width: 110
+ height: 110
+ description: Grafana Loki is a set of open source components that can be composed into a fully featured logging stack. A small index and highly compressed chunks simplifies the operation and significantly lowers the cost of Loki.
+cards:
+ title_class: pt-0 lh-1
+ items:
+ - title: Learn about Loki
+ href: /docs/loki/latest/get-started/
+ description: Learn about the Loki architecture and components, the various deployment modes, and best practices for labels.
+ - title: Set up Loki
+ href: /docs/loki/latest/setup/
+ description: View instructions for how to configure and install Loki, migrate from previous deployments, and upgrade your Loki environment.
+ - title: Configure Loki
+ href: /docs/loki/latest/configure/
+ description: View the Loki configuration reference and configuration examples.
+ - title: Send logs to Loki
+ href: /docs/loki/latest/send-data/
+ description: Select one or more clients to use to send your logs to Loki.
+ - title: Manage Loki
+ href: /docs/loki/latest/operations/
+ description: Learn how to manage tenants, log ingestion, storage, queries, and more.
+ - title: Query with LogQL
+ href: /docs/loki/latest/query/
+ description: Inspired by PromQL, LogQL is Grafana Loki’s query language. LogQL uses labels and operators for filtering.
---
-# Grafana Loki documentation
+{{< docs/hero-simple key="hero" >}}
-
+---
-Grafana Loki is a set of components that can be composed into a fully featured logging stack.
+## Overview
-Unlike other logging systems, Loki is built around the idea of only indexing metadata about your logs: labels (just like Prometheus labels).
+Unlike other logging systems, Loki is built around the idea of only indexing metadata about your logs' labels (just like Prometheus labels).
Log data itself is then compressed and stored in chunks in object stores such as Amazon Simple Storage Service (S3) or Google Cloud Storage (GCS), or even locally on the filesystem.
-A small index and highly compressed chunks simplifies the operation and significantly lowers the cost of Loki.
-For more information, see the [Loki overview]({{< relref "./get-started/overview" >}}).
+## Explore
+
+{{< card-grid key="cards" type="simple" >}}
diff --git a/docs/sources/get-started/labels/_index.md b/docs/sources/get-started/labels/_index.md
index e33f36d91f419..db918450bd9e1 100644
--- a/docs/sources/get-started/labels/_index.md
+++ b/docs/sources/get-started/labels/_index.md
@@ -37,7 +37,7 @@ Loki places the same restrictions on label naming as [Prometheus](https://promet
This series of examples will illustrate basic use cases and concepts for labeling in Loki.
-Let's take an example:
+Let's take an example Promtail/Alloy config file:
```yaml
scrape_configs:
diff --git a/docs/sources/get-started/labels/bp-labels.md b/docs/sources/get-started/labels/bp-labels.md
index aa10867e1e61b..ade25d4fbaa32 100644
--- a/docs/sources/get-started/labels/bp-labels.md
+++ b/docs/sources/get-started/labels/bp-labels.md
@@ -50,7 +50,7 @@ As a general rule, you should try to keep any single tenant in Loki to less than
## Be aware of dynamic labels applied by clients
-Loki has several client options: [Grafana Alloy](https://grafana.com/docs/alloy/latest/), [Promtail](https://grafana.com/docs/loki//send-data/promtail/) (which also supports systemd journal ingestion and TCP-based syslog ingestion), [Fluentd](https://grafana.com/docs/loki//send-data/fluentd/), [Fluent Bit](https://grafana.com/docs/loki//send-data/fluentbit/), a [Docker plugin](https://grafana.com/docs/loki/MLOKI_VERSION>/send-data/docker-driver/), and more.
+Loki has several client options: [Grafana Alloy](https://grafana.com/docs/alloy/latest/), [Promtail](https://grafana.com/docs/loki//send-data/promtail/) (which also supports systemd journal ingestion and TCP-based syslog ingestion), [Fluentd](https://grafana.com/docs/loki//send-data/fluentd/), [Fluent Bit](https://grafana.com/docs/loki//send-data/fluentbit/), a [Docker plugin](https://grafana.com/docs/loki/send-data/docker-driver/), and more.
Each of these come with ways to configure what labels are applied to create log streams. But be aware of what dynamic labels might be applied.
Use the Loki series API to get an idea of what your log streams look like and see if there might be ways to reduce streams and cardinality.
diff --git a/docs/sources/query/log_queries/_index.md b/docs/sources/query/log_queries/_index.md
index 8f0e436713c6d..3457f9637147e 100644
--- a/docs/sources/query/log_queries/_index.md
+++ b/docs/sources/query/log_queries/_index.md
@@ -223,9 +223,9 @@ For example with `cluster="namespace"` the cluster is the label identifier, the
We support multiple **value** types which are automatically inferred from the query input.
- **String** is double quoted or backticked such as `"200"` or \``us-central1`\`.
-- **[Duration](https://golang.org/pkg/time/#ParseDuration)** is a sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+- **[Duration](https://golang.org/pkg/time/#ParseDuration)** is a sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value of the label identifier used for comparison must be a string with a unit suffix to be parsed correctly, such as "0.10ms" or "1h30m". Optionally, `label_format` can be used to modify the value and append the unit before making the comparison.
- **Number** are floating-point number (64bits), such as`250`, `89.923`.
-- **Bytes** is a sequence of decimal numbers, each with optional fraction and a unit suffix, such as "42MB", "1.5Kib" or "20b". Valid bytes units are "b", "kib", "kb", "mib", "mb", "gib", "gb", "tib", "tb", "pib", "pb", "eib", "eb".
+- **Bytes** is a sequence of decimal numbers, each with optional fraction and a unit suffix, such as "42MB", "1.5KiB" or "20B". Valid bytes units are "B", "kB", "MB", "GB", "TB", "KB", "KiB", "MiB", "GiB", "TiB".
String type work exactly like Prometheus label matchers use in [log stream selector](#log-stream-selector). This means you can use the same operations (`=`,`!=`,`=~`,`!~`).
@@ -247,10 +247,10 @@ You can chain multiple predicates using `and` and `or` which respectively expres
This means that all the following expressions are equivalent:
```logql
-| duration >= 20ms or size == 20kb and method!~"2.."
-| duration >= 20ms or size == 20kb | method!~"2.."
-| duration >= 20ms or size == 20kb , method!~"2.."
-| duration >= 20ms or size == 20kb method!~"2.."
+| duration >= 20ms or size == 20KB and method!~"2.."
+| duration >= 20ms or size == 20KB | method!~"2.."
+| duration >= 20ms or size == 20KB , method!~"2.."
+| duration >= 20ms or size == 20KB method!~"2.."
```
diff --git a/docs/sources/reference/loki-http-api.md b/docs/sources/reference/loki-http-api.md
index c9df1a2896d74..53dba2a6ab1b3 100644
--- a/docs/sources/reference/loki-http-api.md
+++ b/docs/sources/reference/loki-http-api.md
@@ -267,10 +267,16 @@ curl -H "Content-Type: application/json" \
POST /otlp/v1/logs
```
-`/otlp/v1/logs` lets the OpenTelemetry Collector send logs to Loki using `otlphttp` procotol.
+`/otlp/v1/logs` lets the OpenTelemetry Collector send logs to Loki using `otlphttp` protocol.
For information on how to configure Loki, refer to the [OTel Collector topic](https://grafana.com/docs/loki//send-data/otel/).
+
+{{< admonition type="note" >}}
+When configuring the OpenTelemetry Collector, you must use `endpoint: http://:3100/otlp`, as the collector automatically completes the endpoint. Entering the full endpoint will generate an error.
+{{< /admonition >}}
+
+
## Query logs at a single point in time
```bash
@@ -627,7 +633,7 @@ It accepts the following query parameters in the URL:
- `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago.
- `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now.
- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
-- `query`: A set of log stream selector that selects the streams to match and return label names. Example: `{"app": "myapp", "environment": "dev"}`
+- `query`: Log stream selector that selects the streams to match and return label names. Example: `{app="myapp", environment="dev"}`
In microservices mode, `/loki/api/v1/labels` is exposed by the querier.
@@ -677,7 +683,7 @@ It accepts the following query parameters in the URL:
- `start`: The start time for the query as a nanosecond Unix epoch. Defaults to 6 hours ago.
- `end`: The end time for the query as a nanosecond Unix epoch. Defaults to now.
- `since`: A `duration` used to calculate `start` relative to `end`. If `end` is in the future, `start` is calculated as this duration before now. Any value specified for `start` supersedes this parameter.
-- `query`: A set of log stream selector that selects the streams to match and return label values for ``. Example: `{"app": "myapp", "environment": "dev"}`
+- `query`: Log stream selector that selects the streams to match and return label values for ``. Example: `{app="myapp", environment="dev"}`
In microservices mode, `/loki/api/v1/label//values` is exposed by the querier.
@@ -798,7 +804,7 @@ gave this response:
## Query log statistics
```bash
-GET `/loki/api/v1/index/stats`
+GET /loki/api/v1/index/stats
```
The `/loki/api/v1/index/stats` endpoint can be used to query the index for the number of `streams`, `chunks`, `entries`, and `bytes` that a query resolves to.
diff --git a/docs/sources/send-data/docker-driver/configuration.md b/docs/sources/send-data/docker-driver/configuration.md
index 64195544b014d..afa982b0b57cb 100644
--- a/docs/sources/send-data/docker-driver/configuration.md
+++ b/docs/sources/send-data/docker-driver/configuration.md
@@ -110,7 +110,7 @@ Stack name and service name for each swarm service and project name and service
## Labels
-Loki can received a set of labels along with log line. These labels are used to index log entries and query back logs using [LogQL stream selector]({{< relref "../../query/log_queries#log-stream-selector" >}}).
+Loki can receive a set of labels along with log line. These labels are used to index log entries and query back logs using [LogQL stream selector]({{< relref "../../query/log_queries#log-stream-selector" >}}).
By default, the Docker driver will add the following labels to each log line:
@@ -128,7 +128,7 @@ next section for all supported options.
## Pipeline stages
While you can provide `loki-pipeline-stage-file` it can be hard to mount the configuration file to the driver root filesystem.
-This is why another option `loki-pipeline-stages` is available allowing your to pass a list of stages inlined. Pipeline stages are run at last on every lines.
+This is why another option `loki-pipeline-stages` is available allowing you to pass a list of stages inlined. Pipeline stages are run at last on every lines.
The example [docker-compose](https://github.com/grafana/loki/blob/main/clients/cmd/docker-driver/docker-compose.yaml) below configures 2 stages, one to extract level values and one to set it as a label:
diff --git a/docs/sources/send-data/k6/_index.md b/docs/sources/send-data/k6/_index.md
index 44e2f15ce17a9..54cf0ada97d6d 100644
--- a/docs/sources/send-data/k6/_index.md
+++ b/docs/sources/send-data/k6/_index.md
@@ -9,8 +9,8 @@ weight: 900
# Using k6 for load testing
-Grafana [k6](https://k6.io) is a modern load-testing tool.
-Its clean and approachable scripting [API](https://k6.io/docs/javascript-api/)
+Grafana [k6](https://grafana.com/oss/k6/) is a modern load-testing tool.
+Its clean and approachable scripting [API](https://grafana.com/docs/k6/latest/javascript-api/)
works locally or in the cloud.
Its configuration makes it flexible.
@@ -55,7 +55,7 @@ Use the custom-built k6 binary in the same way as a non-custom k6 binary:
```
`test.js` is a Javascript load test.
-Refer to the [k6 documentation](https://k6.io/docs/) to get started.
+Refer to the [k6 documentation](https://grafana.com/docs/k6/latest/) to get started.
### Scripting API
@@ -75,7 +75,7 @@ Classes of this module are:
| `Client` | client for writing and reading logs from Loki |
`Config` and `Client` must be called on the k6 init context (see
-[Test life cycle](https://k6.io/docs/using-k6/test-life-cycle/)) outside of the
+[Test life cycle](https://grafana.com/docs/k6/latest/using-k6/test-lifecycle/)) outside of the
default function so the client is only configured once and shared between all
VU iterations.
diff --git a/docs/sources/send-data/k6/query-scenario.md b/docs/sources/send-data/k6/query-scenario.md
index d5b8d38275d4f..95889b2b07c9d 100644
--- a/docs/sources/send-data/k6/query-scenario.md
+++ b/docs/sources/send-data/k6/query-scenario.md
@@ -96,7 +96,7 @@ export default () => {
## Metrics
The extension collects metrics that are printed in the
-[end-of-test summary](https://k6.io/docs/results-visualization/end-of-test-summary/) in addition to the built-in metrics.
+[end-of-test summary](https://grafana.com/docs/k6/latest/results-output/end-of-test/) in addition to the built-in metrics.
These metrics are collected only for instant and range queries.
| name | description |
diff --git a/docs/sources/send-data/k6/write-scenario.md b/docs/sources/send-data/k6/write-scenario.md
index 49425594e2c50..a68d2d62e9615 100644
--- a/docs/sources/send-data/k6/write-scenario.md
+++ b/docs/sources/send-data/k6/write-scenario.md
@@ -59,17 +59,17 @@ These parameters can be adjusted in the load test:
* The way to run k6
- k6 supports three [execution modes](https://k6.io/docs/get-started/running-k6/#execution-modes) to run a test: local, distributed, and cloud.
+ k6 supports three [execution modes](https://grafana.com/docs/k6/latest/get-started/running-k6/#execution-modes) to run a test: local, distributed, and cloud.
Whereas running your k6 load test from a single (local
or remote) machine is easy to set up and fine for smaller Loki clusters,
the single machine does not load test large Loki installations,
because it cannot create the data to saturate the write path.
- For larger tests, consider [these optimizations](https://k6.io/docs/testing-guides/running-large-tests/), or run them in [Grafana Cloud k6](/products/cloud/k6) or a Kubernetes cluster with the [k6 Operator](https://github.com/grafana/k6-operator).
+ For larger tests, consider [these optimizations](https://grafana.com/docs/k6/latest/testing-guides/running-large-tests/), or run them in [Grafana Cloud k6](/products/cloud/k6) or a Kubernetes cluster with the [k6 Operator](https://github.com/grafana/k6-operator).
## Metrics
The extension collects two metrics that are printed in the
-[end-of-test summary](https://k6.io/docs/results-visualization/end-of-test-summary/) in addition to the built-in metrics.
+[end-of-test summary](https://grafana.com/docs/k6/latest/results-output/end-of-test/) in addition to the built-in metrics.
| name | description |
| ---- | ----------- |
@@ -80,7 +80,7 @@ The extension collects two metrics that are printed in the
An HTTP request that successfully pushes logs to Loki
responds with status `204 No Content`.
-The status code should be checked explicitly with a [k6 check](https://k6.io/docs/javascript-api/k6/check-val-sets-tags/).
+The status code should be checked explicitly with a [k6 check](https://grafana.com/docs/k6/latest/javascript-api/k6/check/).
## Javascript example
diff --git a/docs/sources/send-data/otel/_index.md b/docs/sources/send-data/otel/_index.md
index 4b28cbf16c7c2..2d5ee499cdd4c 100644
--- a/docs/sources/send-data/otel/_index.md
+++ b/docs/sources/send-data/otel/_index.md
@@ -12,6 +12,8 @@ weight: 250
Loki natively supports ingesting OpenTelemetry logs over HTTP.
For ingesting logs to Loki using the OpenTelemetry Collector, you must use the [`otlphttp` exporter](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/otlphttpexporter).
+{{< youtube id="snXhe1fDDa8" >}}
+
## Loki configuration
When logs are ingested by Loki using an OpenTelemetry protocol (OTLP) ingestion endpoint, some of the data is stored as [Structured Metadata]({{< relref "../../get-started/labels/structured-metadata" >}}).
@@ -30,7 +32,7 @@ You need to make the following changes to the [OpenTelemetry Collector config](h
```yaml
exporters:
otlphttp:
- endpoint: http://:3100/otlp/v1/logs
+ endpoint: http://:3100/otlp
```
And enable it in `service.pipelines`:
@@ -57,7 +59,7 @@ exporters:
otlphttp:
auth:
authenticator: basicauth/otlp
- endpoint: http://:3100/otlp/v1/logs
+ endpoint: http://:3100/otlp
service:
extensions: [basicauth/otlp]
diff --git a/docs/sources/send-data/promtail/stages/timestamp.md b/docs/sources/send-data/promtail/stages/timestamp.md
index 2512fedaa60f0..f1b08143fd579 100644
--- a/docs/sources/send-data/promtail/stages/timestamp.md
+++ b/docs/sources/send-data/promtail/stages/timestamp.md
@@ -89,6 +89,11 @@ should be used in the custom format.
| Timezone offset | `-0700`, `-070000` (with seconds), `-07`, `07:00`, `-07:00:00` (with seconds) |
| Timezone ISO-8601 | `Z0700` (Z for UTC or time offset), `Z070000`, `Z07`, `Z07:00`, `Z07:00:00` |
+In order to correctly format the time, for the timestamp `2006/01/02 03:04:05.000`:
+
+- If you want a 24-hour format you should be using `15:04:0.000`.
+- If you want a 12-hour format you should be using either `3:04:05.000 PM` or `03:04:05.000 PM`.
+
### Action on Failure
The `action_on_failure` setting defines which action should be taken by the
diff --git a/docs/sources/setup/install/helm/install-microservices/_index.md b/docs/sources/setup/install/helm/install-microservices/_index.md
index 71f94673fe53c..4afca42d10b3e 100644
--- a/docs/sources/setup/install/helm/install-microservices/_index.md
+++ b/docs/sources/setup/install/helm/install-microservices/_index.md
@@ -48,73 +48,73 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu
3. Create the configuration file `values.yaml`. The example below illustrates how to deploy Loki in test mode using MinIO as storage:
```yaml
- loki:
- schemaConfig:
- configs:
- - from: 2024-04-01
- store: tsdb
- object_store: s3
- schema: v13
- index:
- prefix: loki_index_
- period: 24h
- ingester:
- chunk_encoding: snappy
- tracing:
- enabled: true
- querier:
- # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
- max_concurrent: 4
-
- #gateway:
- # ingress:
- # enabled: true
- # hosts:
- # - host: FIXME
- # paths:
- # - path: /
- # pathType: Prefix
-
- deploymentMode: Distributed
-
- ingester:
- replicas: 3
- querier:
- replicas: 3
- maxUnavailable: 2
- queryFrontend:
- replicas: 2
- maxUnavailable: 1
- queryScheduler:
- replicas: 2
- distributor:
- replicas: 3
- maxUnavailable: 2
- compactor:
- replicas: 1
- indexGateway:
- replicas: 2
- maxUnavailable: 1
-
- bloomCompactor:
- replicas: 0
- bloomGateway:
- replicas: 0
-
- # Enable minio for storage
- minio:
- enabled: true
-
- # Zero out replica counts of other deployment modes
- backend:
- replicas: 0
- read:
- replicas: 0
- write:
- replicas: 0
-
- singleBinary:
- replicas: 0
+ loki:
+ schemaConfig:
+ configs:
+ - from: 2024-04-01
+ store: tsdb
+ object_store: s3
+ schema: v13
+ index:
+ prefix: loki_index_
+ period: 24h
+ ingester:
+ chunk_encoding: snappy
+ tracing:
+ enabled: true
+ querier:
+ # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
+ max_concurrent: 4
+
+ #gateway:
+ # ingress:
+ # enabled: true
+ # hosts:
+ # - host: FIXME
+ # paths:
+ # - path: /
+ # pathType: Prefix
+
+ deploymentMode: Distributed
+
+ ingester:
+ replicas: 3
+ querier:
+ replicas: 3
+ maxUnavailable: 2
+ queryFrontend:
+ replicas: 2
+ maxUnavailable: 1
+ queryScheduler:
+ replicas: 2
+ distributor:
+ replicas: 3
+ maxUnavailable: 2
+ compactor:
+ replicas: 1
+ indexGateway:
+ replicas: 2
+ maxUnavailable: 1
+
+ bloomCompactor:
+ replicas: 0
+ bloomGateway:
+ replicas: 0
+
+ # Enable minio for storage
+ minio:
+ enabled: true
+
+ # Zero out replica counts of other deployment modes
+ backend:
+ replicas: 0
+ read:
+ replicas: 0
+ write:
+ replicas: 0
+
+ singleBinary:
+ replicas: 0
```
4. Install or upgrade the Loki deployment.
@@ -167,6 +167,10 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu
After testing Loki with MinIO, it is recommended to configure Loki with an object storage provider. The following examples shows how to configure Loki with different object storage providers:
+{{< admonition type="caution" >}}
+When deploying Loki using S3 Storage **DO NOT** use the default bucket names; `chunk`, `ruler` and `admin`. Choose a unique name for each bucket. For more information see the following [security update](https://grafana.com/blog/2024/06/27/grafana-security-update-grafana-loki-and-unintended-data-write-attempts-to-amazon-s3-buckets/). This caution does not apply when you are using MinIO. When using MinIO we recommend using the default bucket names.
+{{< /admonition >}}
+
{{< code >}}
```s3
@@ -192,9 +196,9 @@ After testing Loki with MinIO, it is recommended to configure Loki with an objec
storage:
type: s3
bucketNames:
- chunks: "chunks"
- ruler: "ruler"
- admin: "admin"
+ chunks: ""
+ ruler: ""
+ admin: ""
s3:
# s3 URL can be used to specify the endpoint, access key, secret key, and bucket name
s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name
@@ -343,4 +347,4 @@ To configure other storage providers, refer to the [Helm Chart Reference]({{< re
## Next Steps
* Configure an agent to [send log data to Loki](/docs/loki//send-data/).
-* Monitor the Loki deployment using the [Meta Monitoring Healm chart](/docs/loki//setup/install/helm/monitor-and-alert/)
+* Monitor the Loki deployment using the [Meta Monitoring Helm chart](/docs/loki//setup/install/helm/monitor-and-alert/)
diff --git a/docs/sources/setup/install/helm/install-scalable/_index.md b/docs/sources/setup/install/helm/install-scalable/_index.md
index e27f544b28f0c..a39b6580a90b2 100644
--- a/docs/sources/setup/install/helm/install-scalable/_index.md
+++ b/docs/sources/setup/install/helm/install-scalable/_index.md
@@ -50,68 +50,68 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu
3. Create the configuration file `values.yaml`. The example below illustrates how to deploy Loki in test mode using MinIO as storage:
```yaml
- loki:
- schemaConfig:
- configs:
- - from: 2024-04-01
- store: tsdb
- object_store: s3
- schema: v13
- index:
- prefix: loki_index_
- period: 24h
- ingester:
- chunk_encoding: snappy
- tracing:
- enabled: true
- querier:
- # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
- max_concurrent: 4
-
- #gateway:
- # ingress:
- # enabled: true
- # hosts:
- # - host: FIXME
- # paths:
- # - path: /
- # pathType: Prefix
-
- deploymentMode: SimpleScalable
-
- backend:
- replicas: 3
- read:
- replicas: 3
- write:
- replicas: 3
-
- # Enable minio for storage
- minio:
- enabled: true
-
- # Zero out replica counts of other deployment modes
- singleBinary:
- replicas: 0
-
- ingester:
- replicas: 0
- querier:
- replicas: 0
- queryFrontend:
- replicas: 0
- queryScheduler:
- replicas: 0
- distributor:
- replicas: 0
- compactor:
- replicas: 0
- indexGateway:
- replicas: 0
- bloomCompactor:
- replicas: 0
- bloomGateway:
- replicas: 0
+ loki:
+ schemaConfig:
+ configs:
+ - from: 2024-04-01
+ store: tsdb
+ object_store: s3
+ schema: v13
+ index:
+ prefix: loki_index_
+ period: 24h
+ ingester:
+ chunk_encoding: snappy
+ tracing:
+ enabled: true
+ querier:
+ # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing
+ max_concurrent: 4
+
+ #gateway:
+ # ingress:
+ # enabled: true
+ # hosts:
+ # - host: FIXME
+ # paths:
+ # - path: /
+ # pathType: Prefix
+
+ deploymentMode: SimpleScalable
+
+ backend:
+ replicas: 3
+ read:
+ replicas: 3
+ write:
+ replicas: 3
+
+ # Enable minio for storage
+ minio:
+ enabled: true
+
+ # Zero out replica counts of other deployment modes
+ singleBinary:
+ replicas: 0
+
+ ingester:
+ replicas: 0
+ querier:
+ replicas: 0
+ queryFrontend:
+ replicas: 0
+ queryScheduler:
+ replicas: 0
+ distributor:
+ replicas: 0
+ compactor:
+ replicas: 0
+ indexGateway:
+ replicas: 0
+ bloomCompactor:
+ replicas: 0
+ bloomGateway:
+ replicas: 0
```
4. Install or upgrade the Loki deployment.
@@ -128,165 +128,169 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu
After testing Loki with MinIO, it is recommended to configure Loki with an object storage provider. The following examples shows how to configure Loki with different object storage providers:
+{{< admonition type="caution" >}}
+When deploying Loki using S3 Storage **DO NOT** use the default bucket names; `chunk`, `ruler` and `admin`. Choose a unique name for each bucket. For more information see the following [security update](https://grafana.com/blog/2024/06/27/grafana-security-update-grafana-loki-and-unintended-data-write-attempts-to-amazon-s3-buckets/). This caution does not apply when you are using MinIO. When using MinIO we recommend using the default bucket names.
+{{< /admonition >}}
+
{{< code >}}
```s3
- loki:
- schemaConfig:
- configs:
- - from: 2024-04-01
- store: tsdb
- object_store: s3
- schema: v13
- index:
- prefix: loki_index_
- period: 24h
- ingester:
- chunk_encoding: snappy
- tracing:
- enabled: true
- querier:
- max_concurrent: 4
-
- storage:
- type: s3
- bucketNames:
- chunks: "chunks"
- ruler: "ruler"
- admin: "admin"
- s3:
- # s3 URL can be used to specify the endpoint, access key, secret key, and bucket name
- s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name
- # AWS endpoint URL
- endpoint:
- # AWS region where the S3 bucket is located
- region:
- # AWS secret access key
- secretAccessKey:
- # AWS access key ID
- accessKeyId:
- # AWS signature version (e.g., v2 or v4)
- signatureVersion:
- # Forces the path style for S3 (true/false)
- s3ForcePathStyle: false
- # Allows insecure (HTTP) connections (true/false)
- insecure: false
- # HTTP configuration settings
- http_config: {}
-
- deploymentMode: SimpleScalable
-
- backend:
- replicas: 3
- read:
- replicas: 3
- write:
- replicas: 3
-
- # Disable minio storage
- minio:
- enabled: false
-
- # Zero out replica counts of other deployment modes
- singleBinary:
- replicas: 0
-
- ingester:
- replicas: 0
- querier:
- replicas: 0
- queryFrontend:
- replicas: 0
- queryScheduler:
- replicas: 0
- distributor:
- replicas: 0
- compactor:
- replicas: 0
- indexGateway:
- replicas: 0
- bloomCompactor:
- replicas: 0
- bloomGateway:
- replicas: 0
+loki:
+ schemaConfig:
+ configs:
+ - from: 2024-04-01
+ store: tsdb
+ object_store: s3
+ schema: v13
+ index:
+ prefix: loki_index_
+ period: 24h
+ ingester:
+ chunk_encoding: snappy
+ tracing:
+ enabled: true
+ querier:
+ max_concurrent: 4
+
+ storage:
+ type: s3
+ bucketNames:
+ chunks: ""
+ ruler: ""
+ admin: ""
+ s3:
+ # s3 URL can be used to specify the endpoint, access key, secret key, and bucket name
+ s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name
+ # AWS endpoint URL
+ endpoint:
+ # AWS region where the S3 bucket is located
+ region:
+ # AWS secret access key
+ secretAccessKey:
+ # AWS access key ID
+ accessKeyId:
+ # AWS signature version (e.g., v2 or v4)
+ signatureVersion:
+ # Forces the path style for S3 (true/false)
+ s3ForcePathStyle: false
+ # Allows insecure (HTTP) connections (true/false)
+ insecure: false
+ # HTTP configuration settings
+ http_config: {}
+
+deploymentMode: SimpleScalable
+
+backend:
+ replicas: 3
+read:
+ replicas: 3
+write:
+ replicas: 3
+
+# Disable minio storage
+minio:
+ enabled: false
+
+# Zero out replica counts of other deployment modes
+singleBinary:
+ replicas: 0
+
+ingester:
+ replicas: 0
+querier:
+ replicas: 0
+queryFrontend:
+ replicas: 0
+queryScheduler:
+ replicas: 0
+distributor:
+ replicas: 0
+compactor:
+ replicas: 0
+indexGateway:
+ replicas: 0
+bloomCompactor:
+ replicas: 0
+bloomGateway:
+ replicas: 0
```
```azure
- loki:
- schemaConfig:
- configs:
- - from: 2024-04-01
- store: tsdb
- object_store: azure
- schema: v13
- index:
- prefix: loki_index_
- period: 24h
- ingester:
- chunk_encoding: snappy
- tracing:
- enabled: true
- querier:
- max_concurrent: 4
-
- storage:
- type: azure
- azure:
- # Name of the Azure Blob Storage account
- accountName:
- # Key associated with the Azure Blob Storage account
- accountKey:
- # Comprehensive connection string for Azure Blob Storage account (Can be used to replace endpoint, accountName, and accountKey)
- connectionString:
- # Flag indicating whether to use Azure Managed Identity for authentication
- useManagedIdentity: false
- # Flag indicating whether to use a federated token for authentication
- useFederatedToken: false
- # Client ID of the user-assigned managed identity (if applicable)
- userAssignedId:
- # Timeout duration for requests made to the Azure Blob Storage account (in seconds)
- requestTimeout:
- # Domain suffix of the Azure Blob Storage service endpoint (e.g., core.windows.net)
- endpointSuffix:
- bucketNames:
- chunks: "chunks"
- ruler: "ruler"
- admin: "admin"
-
- deploymentMode: SimpleScalable
-
- backend:
- replicas: 3
- read:
- replicas: 3
- write:
- replicas: 3
-
- # Disable minio storage
- minio:
- enabled: false
-
- # Zero out replica counts of other deployment modes
- singleBinary:
- replicas: 0
-
- ingester:
- replicas: 0
- querier:
- replicas: 0
- queryFrontend:
- replicas: 0
- queryScheduler:
- replicas: 0
- distributor:
- replicas: 0
- compactor:
- replicas: 0
- indexGateway:
- replicas: 0
- bloomCompactor:
- replicas: 0
- bloomGateway:
- replicas: 0
+loki:
+ schemaConfig:
+ configs:
+ - from: 2024-04-01
+ store: tsdb
+ object_store: azure
+ schema: v13
+ index:
+ prefix: loki_index_
+ period: 24h
+ ingester:
+ chunk_encoding: snappy
+ tracing:
+ enabled: true
+ querier:
+ max_concurrent: 4
+
+ storage:
+ type: azure
+ azure:
+ # Name of the Azure Blob Storage account
+ accountName:
+ # Key associated with the Azure Blob Storage account
+ accountKey:
+ # Comprehensive connection string for Azure Blob Storage account (Can be used to replace endpoint, accountName, and accountKey)
+ connectionString:
+ # Flag indicating whether to use Azure Managed Identity for authentication
+ useManagedIdentity: false
+ # Flag indicating whether to use a federated token for authentication
+ useFederatedToken: false
+ # Client ID of the user-assigned managed identity (if applicable)
+ userAssignedId:
+ # Timeout duration for requests made to the Azure Blob Storage account (in seconds)
+ requestTimeout:
+ # Domain suffix of the Azure Blob Storage service endpoint (e.g., core.windows.net)
+ endpointSuffix:
+ bucketNames:
+ chunks: "chunks"
+ ruler: "ruler"
+ admin: "admin"
+
+deploymentMode: SimpleScalable
+
+backend:
+ replicas: 3
+read:
+ replicas: 3
+write:
+ replicas: 3
+
+# Disable minio storage
+minio:
+ enabled: false
+
+# Zero out replica counts of other deployment modes
+singleBinary:
+ replicas: 0
+
+ingester:
+ replicas: 0
+querier:
+ replicas: 0
+queryFrontend:
+ replicas: 0
+queryScheduler:
+ replicas: 0
+distributor:
+ replicas: 0
+compactor:
+ replicas: 0
+indexGateway:
+ replicas: 0
+bloomCompactor:
+ replicas: 0
+bloomGateway:
+ replicas: 0
```
{{< /code >}}
@@ -295,4 +299,4 @@ To configure other storage providers, refer to the [Helm Chart Reference]({{< re
## Next Steps
* Configure an agent to [send log data to Loki](/docs/loki//send-data/).
-* Monitor the Loki deployment using the [Meta Monitoring Healm chart](/docs/loki//setup/install/helm/monitor-and-alert/)
\ No newline at end of file
+* Monitor the Loki deployment using the [Meta Monitoring Helm chart](/docs/loki//setup/install/helm/monitor-and-alert/)
diff --git a/docs/sources/setup/install/helm/monitor-and-alert/with-grafana-cloud.md b/docs/sources/setup/install/helm/monitor-and-alert/with-grafana-cloud.md
index 28aa4922bbd54..73303816db144 100644
--- a/docs/sources/setup/install/helm/monitor-and-alert/with-grafana-cloud.md
+++ b/docs/sources/setup/install/helm/monitor-and-alert/with-grafana-cloud.md
@@ -1,7 +1,7 @@
---
-title: Configure monitoring and alerting of Loki using Grafana Cloud
+title: Monitor Loki with Grafana Cloud
menuTitle: Monitor Loki with Grafana Cloud
-description: Configuring monitoring and alerts for Loki using Grafana Cloud.
+description: Configuring monitoring for Loki using Grafana Cloud.
aliases:
- ../../../../installation/helm/monitor-and-alert/with-grafana-cloud
weight: 200
@@ -12,89 +12,255 @@ keywords:
- grafana cloud
---
-# Configure monitoring and alerting of Loki using Grafana Cloud
+# Monitor Loki with Grafana Cloud
-This topic will walk you through using Grafana Cloud to monitor a Loki installation that is installed with the Helm chart. This approach leverages many of the chart's _self monitoring_ features, but instead of sending logs back to Loki itself, it sends them to a Grafana Cloud Logs instance. This approach also does not require the installation of the Prometheus Operator and instead sends metrics to a Grafana Cloud Metrics instance. Using Grafana Cloud to monitor Loki has the added benefit of being able to troubleshoot problems with Loki when the Helm installed Loki is down, as the logs will still be available in the Grafana Cloud Logs instance.
+This guide will walk you through using Grafana Cloud to monitor a Loki installation set up with the `meta-monitoring` Helm chart. This method takes advantage of many of the chart's self-monitoring features, sending metrics, logs, and traces from the Loki deployment to Grafana Cloud. Monitoring Loki with Grafana Cloud offers the added benefit of troubleshooting Loki issues even when the Helm-installed Loki is down, as the telemetry data will remain available in the Grafana Cloud instance.
-**Before you begin:**
+These instructions are based off the [meta-monitoring-chart repository](https://github.com/grafana/meta-monitoring-chart/tree/main).
+
+## Before you begin
- Helm 3 or above. See [Installing Helm](https://helm.sh/docs/intro/install/).
- A Grafana Cloud account and stack (including Cloud Grafana, Cloud Metrics, and Cloud Logs).
-- [Grafana Kubernetes Monitoring using Agent Flow](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/config-k8s-agent-flow/) configured for the Kubernetes cluster.
- A running Loki deployment installed in that Kubernetes cluster via the Helm chart.
-**Prequisites for Monitoring Loki:**
+## Configure the meta namespace
+
+The meta-monitoring stack will be installed in a separate namespace called `meta`. To create this namespace, run the following command:
+
+ ```bash
+ kubectl create namespace meta
+ ```
+
+## Grafana Cloud Connection Credentials
-You must setup the Grafana Kubernetes Integration following the instructions in [Grafana Kubernetes Monitoring using Agent Flow](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/config-k8s-agent-flow/) as this will install necessary components for collecting metrics about your Kubernetes cluster and sending them to Grafana Cloud. Many of the dashboards installed as a part of the Loki integration rely on these metrics.
+The meta-monitoring stack sends metrics, logs, and traces to Grafana Cloud. This requires that you know your connection credentials to Grafana Cloud. To obtain connection credentials, follow the steps below:
-Walking through this installation will create two Grafana Agent configurations, one for metrics and one for logs, that will add the external label `cluster: cloud`. In order for the Dashboards in the self-hosted Grafana Loki integration to work, the cluster name needs to match your Helm installation name. If you installed Loki using the command `helm install best-loki-cluster grafana/loki`, you would need to change the `cluster` value in both Grafana Agent configurations from `cloud` to `best-loki-cluster` when setting up the Grafana Kubernetes integration.
+1. Create a new Cloud Access Policy in Grafana Cloud.
+ 1. Sign into [Grafana Cloud](https://grafana.com/auth/sign-in/).
+ 1. In the main menu, select **Security > Access Policies**.
+ 1. Click **Create access policy**.
+ 1. Give the policy a **Name** and select the following permissions:
+ - Metrics: Write
+ - Logs: Write
+ - Traces: Write
+ 1. Click **Create**.
-**To set up the Loki integration in Grafana Cloud:**
-1. Get valid Push credentials for your Cloud Metrics and Cloud Logs instances.
-1. Create a secret in the same namespace as Loki to store your Cloud Logs credentials.
+1. Once the policy is created, select the policy and click **Add token**.
+1. Name the token, select an expiration date, then click **Create**.
+1. Copy the token to a secure location as it will not be displayed again.
+1. Navigate to the Grafana Cloud Portal **Overview** page.
+1. Click the **Details** button for your Prometheus or Mimir instance.
+ 1. From the **Using a self-hosted Grafana instance with Grafana Cloud Metrics** section, collect the instance **Name** and **URL**.
+ 1. Navigate back to the **Overview** page.
+1. Click the **Details** button for your Loki instance.
+ 1. From the **Using Grafana with Logs** section, collect the instance **Name** and **URL**.
+ 1. Navigate back to the **Overview** page.
+1. Click the **Details** button for your Tempo instance.
+ 1. From the **Using Grafana with Tempo** section, collect the instance **Name** and **URL**.
+
+3. Finally, generate the secrets to store your credentials for each metric type within your Kubernetes cluster:
```bash
- cat <<'EOF' | NAMESPACE=loki /bin/sh -c 'kubectl apply -n $NAMESPACE -f -'
- apiVersion: v1
- data:
- password:
- username:
- kind: Secret
- metadata:
- name: grafana-cloud-logs-credentials
- type: Opaque
- EOF
+ kubectl create secret generic logs -n meta \
+ --from-literal=username= \
+ --from-literal= \
+ --from-literal=endpoint='https:///loki/api/v1/push'
+
+ kubectl create secret generic metrics -n meta \
+ --from-literal=username= \
+ --from-literal=password= \
+ --from-literal=endpoint='https:///api/prom/push'
+
+ kubectl create secret generic traces -n meta \
+ --from-literal=username= \
+ --from-literal=password= \
+ --from-literal=endpoint='https:///otlp'
```
-1. Create a secret to store your Cloud Metrics credentials.
+## Configuration and Installation
+
+To install the `meta-monitoring` Helm chart, you must create a `values.yaml` file. At a minimum this file should contain the following:
+ * The namespace to monitor
+ * Enablement of cloud monitoring
+
+This example `values.yaml` file provides the minimum configuration to monitor the `loki` namespace:
+
+```yaml
+ namespacesToMonitor:
+ - default
+
+ cloud:
+ logs:
+ enabled: true
+ secret: "logs"
+ metrics:
+ enabled: true
+ secret: "metrics"
+ traces:
+ enabled: true
+ secret: "traces"
+```
+For further configuration options, refer to the [sample values.yaml file](https://github.com/grafana/meta-monitoring-chart/blob/main/charts/meta-monitoring/values.yaml).
+
+To install the `meta-monitoring` Helm chart, run the following commands:
+
+```bash
+helm repo add grafana https://grafana.github.io/helm-charts
+helm repo update
+helm install meta-monitoring grafana/meta-monitoring -n meta -f values.yaml
+```
+or when upgrading the configuration:
+```bash
+helm upgrade meta-monitoring grafana/meta-monitoring -n meta -f values.yaml
+```
+
+To verify the installation, run the following command:
+
+```bash
+kubectl get pods -n meta
+```
+It should return the following pods:
+```bash
+NAME READY STATUS RESTARTS AGE
+meta-alloy-0 2/2 Running 0 23h
+meta-alloy-1 2/2 Running 0 23h
+meta-alloy-2 2/2 Running 0 23h
+```
+
+
+## Enable Loki Tracing
+
+By default, Loki does not have tracing enabled. To enable tracing, modify the Loki configuration by editing the `values.yaml` file and adding the following configuration:
+
+Set the `tracing.enabled` configuration to `true`:
+```yaml
+loki:
+ tracing:
+ enabled: true
+```
+Next, instrument each of the Loki components to send traces to the meta-monitoring stack. Add the `extraEnv` configuration to each of the Loki components:
+
+```yaml
+ingester:
+ replicas: 3
+ extraEnv:
+ - name: JAEGER_ENDPOINT
+ value: "http://mmc-alloy-external.default.svc.cluster.local:14268/api/traces"
+ # This sets the Jaeger endpoint where traces will be sent.
+ # The endpoint points to the mmc-alloy service in the default namespace at port 14268.
+
+ - name: JAEGER_AGENT_TAGS
+ value: 'cluster="prod",namespace="default"'
+ # This specifies additional tags to attach to each span.
+ # Here, the cluster is labeled as "prod" and the namespace as "default".
+
+ - name: JAEGER_SAMPLER_TYPE
+ value: "ratelimiting"
+ # This sets the sampling strategy for traces.
+ # "ratelimiting" means that traces will be sampled at a fixed rate.
+
+ - name: JAEGER_SAMPLER_PARAM
+ value: "1.0"
+ # This sets the parameter for the sampler.
+ # For ratelimiting, "1.0" typically means one trace per second.
+```
+
+Since the meta-monitoring stack is installed in the `meta` namespace, the Loki components will need to be able to communicate with the meta-monitoring stack. To do this, create a new `externalname` service in the `default` namespace that points to the `meta` namespace by running the following command:
+
+```bash
+kubectl create service externalname mmc-alloy-external --external-name meta-alloy.meta.svc.cluster.local -n default
+```
+
+Finally, upgrade the Loki installation with the new configuration:
+
+```bash
+helm upgrade --values values.yaml loki grafana/loki
+```
+
+## Import the Loki Dashboards to Grafana Cloud
+
+The meta-monitoring stack includes a set of dashboards that can be imported into Grafana Cloud. These can be found in the [meta-monitoring repository](https://github.com/grafana/meta-monitoring-chart/tree/main/charts/meta-monitoring/src/dashboards).
+
+
+## Installing Rules
+
+The meta-monitoring stack includes a set of rules that can be installed to monitor the Loki installation. These rules can be found in the [meta-monitoring repository](https://github.com/grafana/meta-monitoring-chart/). To install the rules:
+
+1. Clone the repository:
+ ```bash
+ git clone https://github.com/grafana/meta-monitoring-chart/
+ ```
+1. Install `mimirtool` based on the instructions located [here](https://grafana.com/docs/mimir/latest/manage/tools/mimirtool/)
+1. Create a new access policy token in Grafana Cloud with the following permissions:
+ - Rules: Write
+ - Rules: Read
+1. Create a token for the access policy and copy it to a secure location.
+1. Install the rules:
```bash
- cat <<'EOF' | NAMESPACE=loki /bin/sh -c 'kubectl apply -n $NAMESPACE -f -'
- apiVersion: v1
- data:
- password:
- username:
- kind: Secret
- metadata:
- name: grafana-cloud-metrics-credentials
- type: Opaque
- EOF
+ mimirtool rules load --address= --id= --key= *.yaml
```
+1. Verify that the rules have been installed:
+ ```bash
+ mimirtool rules list --address= --id= --key=
+ ```
+ It should return a list of rules that have been installed.
+ ```bash
-1. Enable monitoring metrics and logs for the Loki installation to be sent your cloud database instances by adding the following to your Helm `values.yaml` file:
-
- ```yaml
- ---
- monitoring:
- dashboards:
- enabled: false
- rules:
- enabled: false
- selfMonitoring:
- logsInstance:
- clients:
- - url:
- basicAuth:
- username:
- name: grafana-cloud-logs-credentials
- key: username
- password:
- name: grafana-cloud-logs-credentials
- key: password
- serviceMonitor:
- metricsInstance:
- remoteWrite:
- - url:
- basicAuth:
- username:
- name: grafana-cloud-metrics-credentials
- key: username
- password:
- name: grafana-cloud-metrics-credentials
- key: password
+ loki-rules:
+ - name: loki_rules
+ rules:
+ - record: cluster_job:loki_request_duration_seconds:99quantile
+ expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, job))
+ - record: cluster_job:loki_request_duration_seconds:50quantile
+ expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, job))
+ - record: cluster_job:loki_request_duration_seconds:avg
+ expr: sum(rate(loki_request_duration_seconds_sum[5m])) by (cluster, job) / sum(rate(loki_request_duration_seconds_count[5m])) by (cluster, job)
+ - record: cluster_job:loki_request_duration_seconds_bucket:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, job)
+ - record: cluster_job:loki_request_duration_seconds_sum:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_sum[5m])) by (cluster, job)
+ - record: cluster_job:loki_request_duration_seconds_count:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_count[5m])) by (cluster, job)
+ - record: cluster_job_route:loki_request_duration_seconds:99quantile
+ expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, job, route))
+ - record: cluster_job_route:loki_request_duration_seconds:50quantile
+ expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, job, route))
+ - record: cluster_job_route:loki_request_duration_seconds:avg
+ expr: sum(rate(loki_request_duration_seconds_sum[5m])) by (cluster, job, route) / sum(rate(loki_request_duration_seconds_count[5m])) by (cluster, job, route)
+ - record: cluster_job_route:loki_request_duration_seconds_bucket:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, job, route)
+ - record: cluster_job_route:loki_request_duration_seconds_sum:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_sum[5m])) by (cluster, job, route)
+ - record: cluster_job_route:loki_request_duration_seconds_count:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_count[5m])) by (cluster, job, route)
+ - record: cluster_namespace_job_route:loki_request_duration_seconds:99quantile
+ expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, namespace, job, route))
+ - record: cluster_namespace_job_route:loki_request_duration_seconds:50quantile
+ expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, namespace, job, route))
+ - record: cluster_namespace_job_route:loki_request_duration_seconds:avg
+ expr: sum(rate(loki_request_duration_seconds_sum[5m])) by (cluster, namespace, job, route) / sum(rate(loki_request_duration_seconds_count[5m])) by (cluster, namespace, job, route)
+ - record: cluster_namespace_job_route:loki_request_duration_seconds_bucket:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_bucket[5m])) by (le, cluster, namespace, job, route)
+ - record: cluster_namespace_job_route:loki_request_duration_seconds_sum:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_sum[5m])) by (cluster, namespace, job, route)
+ - record: cluster_namespace_job_route:loki_request_duration_seconds_count:sum_rate
+ expr: sum(rate(loki_request_duration_seconds_count[5m])) by (cluster, namespace, job, route)
```
+## Install kube-state-metrics
+
+Metrics about Kubernetes objects are scraped from [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics). This needs to be installed in the cluster. The `kubeStateMetrics.endpoint` entry in the meta-monitoring `values.yaml` should be set to its address (without the `/metrics` part in the URL):
+
+```yaml
+kubeStateMetrics:
+ # Scrape https://github.com/kubernetes/kube-state-metrics by default
+ enabled: true
+ # This endpoint is created when the helm chart from
+ # https://artifacthub.io/packages/helm/prometheus-community/kube-state-metrics/
+ # is used. Change this if kube-state-metrics is installed somewhere else.
+ endpoint: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
+```
-1. Install the self-hosted Grafana Loki integration by going to your hosted Grafana instance, selecting **Connections** from the Home menu, then search for and install the **Self-hosted Grafana Loki** integration.
-1. Once the self-hosted Grafana Loki integration is installed, click the **View Dashboards** button to see the installed dashboards.
diff --git a/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md b/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md
index dfa491fe966fa..1c1fb6d244f30 100644
--- a/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md
+++ b/docs/sources/setup/install/helm/monitor-and-alert/with-local-monitoring.md
@@ -1,7 +1,7 @@
---
-title: Configure monitoring and alerting
-menuTitle: Configure monitoring and alerting
-description: Configuring monitoring and alerts using the Helm chart.
+title: Monitor Loki using a local LGTM (Loki, Grafana, Tempo and Mimir) stack
+menuTitle: Monitor Loki using a local LGTM stack
+description: Monitor Loki using a local LGTM (Loki, Grafana, Tempo and Mimir) stack
aliases:
- ../../../../installation/helm/monitor-and-alert/with-local-monitoring/
weight: 100
@@ -11,203 +11,162 @@ keywords:
- alerting
---
-# Configure monitoring and alerting
+# Monitor Loki using a local LGTM (Loki, Grafana, Tempo and Mimir) stack
-By default this Helm Chart configures meta-monitoring of metrics (service monitoring) and logs (self monitoring). This topic will walk you through configuring monitoring using a monitoring solution local to the same cluster where Loki is installed.
+This topic will walk you through using the meta-monitoring Helm chart to deploy a local stack to monitor your production Loki installation. This approach leverages many of the chart's _self monitoring_ features, but instead of sending logs back to Loki itself, it sends them to a small Loki, Grafana, Tempo, Mimir (LGTM) stack running within the `meta` namespace.
-The `ServiceMonitor` resource works with either the Prometheus Operator or the Grafana Agent Operator, and defines how Loki's metrics should be scraped. Scraping this Loki cluster using the scrape config defined in the `SerivceMonitor` resource is required for the included dashboards to work. A `MetricsInstance` can be configured to write the metrics to a remote Prometheus instance such as Grafana Cloud Metrics.
-_Self monitoring_ is enabled by default. This will deploy a `GrafanaAgent`, `LogsInstance`, and `PodLogs` resource which will instruct the Grafana Agent Operator (installed separately) on how to scrape this Loki cluster's logs and send them back to itself. Scraping this Loki cluster using the scrape config defined in the `PodLogs` resource is required for the included dashboards to work.
-
-Rules and alerts are automatically deployed.
-
-**Before you begin:**
+## Before you begin
- Helm 3 or above. See [Installing Helm](https://helm.sh/docs/intro/install/).
- A running Kubernetes cluster with a running Loki deployment.
-- A running Grafana instance.
-- A running Prometheus Operator installed using the `kube-prometheus-stack` Helm chart.
-**Prometheus Operator Prequisites**
+## Configure the meta namespace
+
+The meta-monitoring stack will be installed in a separate namespace called `meta`. To create this namespace, run the following command:
+
+ ```bash
+ kubectl create namespace meta
+ ```
+
-The dashboards require certain metric labels to display Kubernetes metrics. The best way to accomplish this is to install the `kube-prometheus-stack` Helm chart with the following values file, replacing `CLUSTER_NAME` with the name of your cluster. The cluster name is what you specify during the helm installation, so a cluster installed with the command `helm install loki-cluster grafana/loki` would be called `loki-cluster`.
+## Configuration and Installation
+
+The meta-monitoring stack is installed using the `meta-monitoring` Helm chart. The local mode deploys a small LGTM stack that includes Alloy, Grafana, Mimir, Loki, and Tempo. To configure the meta-monitoring stack, create a `values.yaml` file with the following content:
```yaml
-kubelet:
- serviceMonitor:
- cAdvisorRelabelings:
- - action: replace
- replacement:
- targetLabel: cluster
- - targetLabel: metrics_path
- sourceLabels:
- - "__metrics_path__"
- - targetLabel: "instance"
- sourceLabels:
- - "node"
-
-defaultRules:
- additionalRuleLabels:
- cluster:
-
-"kube-state-metrics":
- prometheus:
- monitor:
- relabelings:
- - action: replace
- replacement:
- targetLabel: cluster
- - targetLabel: "instance"
- sourceLabels:
- - "__meta_kubernetes_pod_node_name"
-
-"prometheus-node-exporter":
- prometheus:
- monitor:
- relabelings:
- - action: replace
- replacement:
- targetLabel: cluster
- - targetLabel: "instance"
- sourceLabels:
- - "__meta_kubernetes_pod_node_name"
-
-prometheus:
- monitor:
- relabelings:
- - action: replace
- replacement:
- targetLabel: cluster
+namespacesToMonitor:
+- default
+
+cloud:
+ logs:
+ enabled: false
+ metrics:
+ enabled: false
+ traces:
+ enabled: false
+
+local:
+ grafana:
+ enabled: true
+ logs:
+ enabled: true
+ metrics:
+ enabled: true
+ traces:
+ enabled: true
+ minio:
+ enabled: true
+```
+
+For further configuration options, refer to the [sample values.yaml file](https://github.com/grafana/meta-monitoring-chart/blob/main/charts/meta-monitoring/values.yaml).
+
+Local mode by default will also enable Minio, which will act as the object storage for the LGTM stack. To provide access to Minio, you need to create a generic secret. To create the generic secret, run the following command:
+
+```bash
+kubectl create secret generic minio -n meta \
+ --from-literal= \
+ --from-literal=
+```
+{{< admonition type="note" >}}
+Username and password must have a minimum of 8 characters.
+{{< /admonition >}}
+
+To install the meta-monitoring stack, run the following commands:
+
+```bash
+helm repo add grafana https://grafana.github.io/helm-charts
+helm repo update
+helm install meta-monitoring grafana/meta-monitoring -n meta -f values.yaml
+```
+
+or when upgrading the configuration:
+```bash
+helm upgrade meta-monitoring grafana/meta-monitoring -n meta -f values.yaml
+```
+
+To verify the installation, run the following command:
+
+```bash
+kubectl get pods -n meta
+```
+It should return the following pods:
+```bash
+grafana-59d664f55f-dtfqr 1/1 Running 2 (2m7s ago) 137m
+loki-backend-0 2/2 Running 2 (2m7s ago) 137m
+loki-backend-1 2/2 Running 4 (2m7s ago) 137m
+loki-backend-2 2/2 Running 3 (2m7s ago) 137m
+loki-read-6f775d8c5-6t749 1/1 Running 1 (2m7s ago) 137m
+loki-read-6f775d8c5-kdd8m 1/1 Running 1 (2m7s ago) 137m
+loki-read-6f775d8c5-tsw2r 1/1 Running 1 (2m7s ago) 137m
+loki-write-0 1/1 Running 1 (2m7s ago) 137m
+loki-write-1 1/1 Running 1 (2m7s ago) 137m
+loki-write-2 1/1 Running 1 (2m7s ago) 137m
+meta-alloy-0 2/2 Running 2 (2m7s ago) 137m
+meta-alloy-1 2/2 Running 2 (2m7s ago) 137m
+...
```
+## Enable Loki Tracing
+
+By default, Loki does not have tracing enabled. To enable tracing, modify the Loki configuration by editing the `values.yaml` file and adding the following configuration:
+
+Set the `tracing.enabled` configuration to `true`:
+```yaml
+loki:
+ tracing:
+ enabled: true
+```
+
+Next, instrument each of the Loki components to send traces to the meta-monitoring stack. Add the `extraEnv` configuration to each of the Loki components:
+
+```yaml
+ingester:
+ replicas: 3
+ extraEnv:
+ - name: JAEGER_ENDPOINT
+ value: "http://mmc-alloy-external.default.svc.cluster.local:14268/api/traces"
+ # This sets the Jaeger endpoint where traces will be sent.
+ # The endpoint points to the mmc-alloy service in the default namespace at port 14268.
+
+ - name: JAEGER_AGENT_TAGS
+ value: 'cluster="prod",namespace="default"'
+ # This specifies additional tags to attach to each span.
+ # Here, the cluster is labeled as "prod" and the namespace as "default".
+
+ - name: JAEGER_SAMPLER_TYPE
+ value: "ratelimiting"
+ # This sets the sampling strategy for traces.
+ # "ratelimiting" means that traces will be sampled at a fixed rate.
+
+ - name: JAEGER_SAMPLER_PARAM
+ value: "1.0"
+ # This sets the parameter for the sampler.
+ # For ratelimiting, "1.0" typically means one trace per second.
+```
+
+## Install kube-state-metrics
+
+Metrics about Kubernetes objects are scraped from [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics). This needs to be installed in the cluster. The `kubeStateMetrics.endpoint` entry in the meta-monitoring `values.yaml` should be set to its address (without the `/metrics` part in the URL):
+
+```yaml
+kubeStateMetrics:
+ # Scrape https://github.com/kubernetes/kube-state-metrics by default
+ enabled: true
+ # This endpoint is created when the helm chart from
+ # https://artifacthub.io/packages/helm/prometheus-community/kube-state-metrics/
+ # is used. Change this if kube-state-metrics is installed somewhere else.
+ endpoint: kube-state-metrics.kube-state-metrics.svc.cluster.local:8080
+```
+
+## Accessing the meta-monitoring stack
+
+To access the meta-monitoring stack, you can use port-forwarding to access the Grafana dashboard. To do this, run the following command:
+
+```bash
+kubectl port-forward -n meta svc/grafana 3000:3000
+```
+
+## Dashboards and Rules
-The `kube-prometheus-stack` installs `ServiceMonitor` and `PrometheusRule` resources for monitoring Kubernetes, and it depends on the `kube-state-metrics` and `prometheus-node-exporter` helm charts which also install `ServiceMonitor` resources for collecting `kubelet` and `node-exporter` metrics. The above values file adds the necessary additional labels required for these metrics to work with the included dashboards.
-
-If you are using this helm chart in an environment which does not allow for the installation of `kube-prometheus-stack` or custom CRDs, you should run `helm template` on the `kube-prometheus-stack` helm chart with the above values file, and review all generated `ServiceMonitor` and `PrometheusRule` resources. These resources may have to be modified with the correct ports and selectors to find the various services such as `kubelet` and `node-exporter` in your environment.
-
-**To install the dashboards:**
-
-1. Dashboards are enabled by default. Set `monitoring.dashboards.namespace` to the namespace of the Grafana instance if it is in a different namespace than this Loki cluster.
-1. Dashbards must be mounted to your Grafana container. The dashboards are in `ConfigMap`s named `loki-dashboards-1` and `loki-dashboards-2` for Loki, and `enterprise-logs-dashboards-1` and `enterprise-logs-dashboards-2` for GEL. Mount them to `/var/lib/grafana/dashboards/loki-1` and `/var/lib/grafana/dashboards/loki-2` in your Grafana container.
-1. Create a dashboard provisioning file called `dashboards.yaml` in `/etc/grafana/provisioning/dashboards` of your Grafana container with the following contents (_note_: you may need to edit the `orgId`):
-
- ```yaml
- ---
- apiVersion: 1
- providers:
- - disableDeletion: true
- editable: false
- folder: Loki
- name: loki-1
- options:
- path: /var/lib/grafana/dashboards/loki-1
- orgId: 1
- type: file
- - disableDeletion: true
- editable: false
- folder: Loki
- name: loki-2
- options:
- path: /var/lib/grafana/dashboards/loki-2
- orgId: 1
- type: file
- ```
-
-**To add add additional Prometheus rules:**
-
-1. Modify the configuration file `values.yaml`:
-
- ```yaml
- monitoring:
- rules:
- additionalGroups:
- - name: loki-rules
- rules:
- - record: job:loki_request_duration_seconds_bucket:sum_rate
- expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job)
- - record: job_route:loki_request_duration_seconds_bucket:sum_rate
- expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route)
- - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate
- expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container)
- ```
-
-**To disable monitoring:**
-
-1. Modify the configuration file `values.yaml`:
-
- ```yaml
- selfMonitoring:
- enabled: false
-
- serviceMonitor:
- enabled: false
- ```
-
-**To use a remote Prometheus and Loki instance such as Grafana Cloud**
-
-1. Create a `secrets.yaml` file with credentials to access the Grafana Cloud services:
-
- ```yaml
- ---
- apiVersion: v1
- kind: Secret
- metadata:
- name: primary-credentials-metrics
- namespace: default
- stringData:
- username: ""
- password: ""
- ---
- apiVersion: v1
- kind: Secret
- metadata:
- name: primary-credentials-logs
- namespace: default
- stringData:
- username: ""
- password: ""
- ```
-
-2. Add the secret to Kubernetes with `kubectl create -f secret.yaml`.
-
-3. Add a `remoteWrite` section to `serviceMonitor` in `values.yaml`:
-
- ```yaml
- monitoring:
- ...
- serviceMonitor:
- enabled: true
- ...
- metricsInstance:
- remoteWrite:
- - url:
- basicAuth:
- username:
- name: primary-credentials-metrics
- key: username
- password:
- name: primary-credentials-metrics
- key: password
- ```
-
-4. Add a client to `monitoring.selfMonitoring.logsInstance.clients`:
-
- ```yaml
- monitoring:
- ---
- selfMonitoring:
- enabled: true
- logsInstance:
- clients:
- - url:
- basicAuth:
- username:
- name: primary-credentials-logs
- key: username
- password:
- name: primary-credentials-logs
- key: password
- lokiCanary:
- enabled: false
- ```
-
-5. Install the `Loki meta-motoring` connection on Grafana Cloud.
+The local meta-monitoring stack comes with a set of pre-configured dashboards and alerting rules. These can be accessed via
+[http://localhost:3000](http://localhost:3000) using the default credentials `admin` and `admin`.
\ No newline at end of file
diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md
index 76b4936f20bfd..03f9d17f26984 100644
--- a/docs/sources/setup/install/helm/reference.md
+++ b/docs/sources/setup/install/helm/reference.md
@@ -3487,6 +3487,15 @@ null
null
+ |
+
+
+ gateway.containerPort |
+ int |
+ Default container port |
+
+8080
+
|
diff --git a/docs/sources/setup/install/local.md b/docs/sources/setup/install/local.md
index dbdeb8ca3a164..f8fbaecd1c388 100644
--- a/docs/sources/setup/install/local.md
+++ b/docs/sources/setup/install/local.md
@@ -1,23 +1,24 @@
---
-title: Local
-menuTitle: Install locally
+title: Install Grafana Loki locally
+menuTitle: Install locally
description: Describes how to install and run Grafana Loki locally.
-aliases:
+aliases:
- ../../installation/local/
weight: 500
---
-# Local
-In order to log events with Grafana Loki, download and install both Promtail and Loki.
+# Install Grafana Loki locally
+
+To log events with Grafana Loki, download and install both Promtail and Loki.
+
- Loki is the logging engine.
- Promtail sends logs to Loki.
-The configuration specifies running Loki as a single binary.
+The configuration runs Loki as a single binary.
## Install using APT or RPM package manager
-1. Add Granafa's Advanced Package Tool [APT](https://apt.grafana.com/) or RPM Package Manager [RPM](https://rpm.grafana.com/)
- package repository following the linked instructions.
+1. Add the Grafana [Advanced Package Tool (APT)](https://apt.grafana.com/) or [RPM Package Manager (RPM)](https://rpm.grafana.com/) package repository following the linked instructions.
1. Install Loki and Promtail
1. Using `dnf`
```
@@ -31,51 +32,76 @@ The configuration specifies running Loki as a single binary.
```
## Install manually
-1. Navigate to the [release page](https://github.com/grafana/loki/releases/).
-2. Scroll down to the Assets section under the version that you want to install.
-3. Download the Loki and Promtail .zip files that correspond to your system.
- Do not download LogCLI or Loki Canary at this time. `LogCLI` allows you to run Loki queries in a command line interface. [Loki Canary]({{< relref "../../operations/loki-canary" >}}) is a tool to audit Loki performance.
-4. Unzip the package contents into the same directory. This is where the two programs will run.
-5. In the command line, change directory (`cd` on most systems) to the directory with Loki and Promtail. Copy and paste the commands below into your command line to download generic configuration files.
- Use the corresponding Git refs that match your downloaded Loki version to get the correct configuration file. For example, if you are using Loki version 2.9.2, you need to use the `https://raw.githubusercontent.com/grafana/loki/v2.9.2/cmd/loki/loki-local-config.yaml` URL to download the configuration file that corresponds to the Loki version you aim to run.
-
- ```
- wget https://raw.githubusercontent.com/grafana/loki/main/cmd/loki/loki-local-config.yaml
- wget https://raw.githubusercontent.com/grafana/loki/main/clients/cmd/promtail/promtail-local-config.yaml
- ```
-6. Enter the following command to start Loki:
-
- **Windows**
-
- ```
- .\loki-windows-amd64.exe --config.file=loki-local-config.yaml
- ```
-
- **Linux**
- ```
- ./loki-linux-amd64 -config.file=loki-local-config.yaml
- ```
+
+1. Browse to the [release page](https://github.com/grafana/loki/releases/).
+1. Find the **Assets** section for the version that you want to install.
+1. Download the Loki and Promtail archive files that correspond to your system.
+
+ Don't download LogCLI or Loki Canary at this time.
+ LogCLI allows you to run Loki queries in a command line interface.
+ [Loki Canary]({{< relref "../../operations/loki-canary" >}}) is a tool to audit Loki performance.
+
+1. Extract the package contents into the same directory. This is where the two programs will run.
+1. In the command line, change directory (`cd` on most systems) to the directory with Loki and Promtail.
+
+ Copy and paste the following commands into your command line to download generic configuration files.
+
+ Use the Git references that match your downloaded Loki version to get the correct configuration file.
+ For example, if you are using Loki version 2.9.2, you need to use the `https://raw.githubusercontent.com/grafana/loki/v2.9.2/cmd/loki/loki-local-config.yaml` URL to download the configuration file.
+
+ ```
+ wget https://raw.githubusercontent.com/grafana/loki/main/cmd/loki/loki-local-config.yaml
+ wget https://raw.githubusercontent.com/grafana/loki/main/clients/cmd/promtail/promtail-local-config.yaml
+ ```
+
+1. Run the following command to start Loki:
+
+ **Windows**
+
+ ```
+ .\loki-windows-amd64.exe --config.file=loki-local-config.yaml
+ ```
+
+ **Linux**
+
+ ```
+ ./loki-linux-amd64 -config.file=loki-local-config.yaml
+ ```
Loki runs and displays Loki logs in your command line and on http://localhost:3100/metrics.
-The next step will be running an agent to send logs to Loki.
+The next step is running an agent to send logs to Loki.
To do so with Promtail, refer to the [Promtail configuration]({{< relref "../../send-data/promtail" >}}).
## Release binaries - openSUSE Linux only
-Every release includes binaries for Loki which can be found on the
-[Releases page](https://github.com/grafana/loki/releases).
+Every release includes binaries for Loki.
+You can find them on the [Releases page](https://github.com/grafana/loki/releases).
## Community openSUSE Linux packages
-The community provides packages of Loki for openSUSE Linux. To install:
-
-1. Add the repository `https://download.opensuse.org/repositories/security:/logging/`
- to your system. For example, if you are using Leap 15.1, run
- `sudo zypper ar https://download.opensuse.org/repositories/security:/logging/openSUSE_Leap_15.1/security:logging.repo ; sudo zypper ref`
-2. Install the Loki package with `zypper in loki`
-3. Enable the Loki and Promtail services:
- - `systemd start loki && systemd enable loki`
- - `systemd start promtail && systemd enable promtail`
-4. Modify the configuration files as needed: `/etc/loki/promtail.yaml` and
- `/etc/loki/loki.yaml`.
+The community provides packages of Loki for openSUSE Linux.
+To install them:
+
+1. Add the repository `https://download.opensuse.org/repositories/security:/logging/` to your system.
+ For example, if you are using Leap 15.1, run:
+
+ ```
+ sudo zypper ar https://download.opensuse.org/repositories/security:/logging/openSUSE_Leap_15.1/security:logging.repo
+ sudo zypper ref
+ ```
+
+1. Install the Loki package:
+
+ ```
+ zypper in loki
+ ```
+
+1. Start and enable the Loki and Promtail services:
+ ```
+ systemd start loki
+ systemd enable loki
+ systemd start promtail
+ systemd enable promtail
+ ```
+1. Modify the `/etc/loki/promtail.yaml` and `/etc/loki/loki.yaml` configuration files as needed.
diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md
index e5abde43173d7..547da559bb1fa 100644
--- a/docs/sources/setup/upgrade/_index.md
+++ b/docs/sources/setup/upgrade/_index.md
@@ -36,6 +36,8 @@ The output is incredibly verbose as it shows the entire internal config struct u
## Main / Unreleased
+Loki changes the default value of `-ruler.alertmanager-use-v2` from `false` to `true`. Alertmanager APIv1 was deprecated in Alertmanager 0.16.0 and is removed as of 0.27.0.
+
## 3.0.0
{{% admonition type="note" %}}
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index cae0094873a84..145ab85144a06 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -316,6 +316,17 @@ pattern_ingester:
# CLI flag: -pattern-ingester.flush-check-period
[flush_check_period: | default = 30s]
+ # Configures the metric aggregation and storage behavior of the pattern
+ # ingester.
+ metric_aggregation:
+ # Whether the pattern ingester metric aggregation is enabled.
+ # CLI flag: -pattern-ingester.metric-aggregation.enabled
+ [enabled: | default = false]
+
+ # Whether to log push observations.
+ # CLI flag: -pattern-ingester.metric-aggregation.log-push-observations
+ [log_push_observations: | default = false]
+
# The index_gateway block configures the Loki index gateway server, responsible
# for serving index queries without the need to constantly interact with the
# object store.
@@ -366,6 +377,19 @@ bloom_build:
# CLI flag: -bloom-build.builder.planner-address
[planner_address: | default = ""]
+ backoff_config:
+ # Minimum delay when backing off.
+ # CLI flag: -bloom-build.builder.backoff.backoff-min-period
+ [min_period: | default = 100ms]
+
+ # Maximum delay when backing off.
+ # CLI flag: -bloom-build.builder.backoff.backoff-max-period
+ [max_period: | default = 10s]
+
+ # Number of times to backoff and retry before failing.
+ # CLI flag: -bloom-build.builder.backoff.backoff-retries
+ [max_retries: | default = 10]
+
# Experimental: The bloom_gateway block configures the Loki bloom gateway
# server, responsible for serving queries for filtering chunks based on filter
# expressions.
@@ -556,6 +580,9 @@ compactor_grpc_client:
# Configuration for analytics.
[analytics: ]
+# Configuration for profiling options.
+[profiling: ]
+
# Common configuration to be shared between multiple modules. If a more specific
# configuration is given in other sections, the related configuration within
# this section will be ignored.
@@ -2752,7 +2779,23 @@ lifecycler:
# CLI flag: -ingester.flush-check-period
[flush_check_period: | default = 30s]
-# The timeout before a flush is cancelled.
+flush_op_backoff:
+ # Minimum backoff period when a flush fails. Each concurrent flush has its own
+ # backoff, see `ingester.concurrent-flushes`.
+ # CLI flag: -ingester.flush-op-backoff-min-period
+ [min_period: | default = 10s]
+
+ # Maximum backoff period when a flush fails. Each concurrent flush has its own
+ # backoff, see `ingester.concurrent-flushes`.
+ # CLI flag: -ingester.flush-op-backoff-max-period
+ [max_period: | default = 1m]
+
+ # Maximum retries for failed flushes.
+ # CLI flag: -ingester.flush-op-backoff-retries
+ [max_retries: | default = 10]
+
+# The timeout for an individual flush. Will be retried up to
+# `flush-op-backoff-retries` times.
# CLI flag: -ingester.flush-op-timeout
[flush_op_timeout: | default = 10m]
@@ -2863,6 +2906,11 @@ wal:
# common.path_prefix is set then common.path_prefix will be used.
# CLI flag: -ingester.shutdown-marker-path
[shutdown_marker_path: | default = ""]
+
+# Interval at which the ingester ownedStreamService checks for changes in the
+# ring to recalculate owned streams.
+# CLI flag: -ingester.owned-streams-check-interval
+[owned_streams_check_interval: | default = 30s]
```
### ingester_client
@@ -3524,7 +3572,7 @@ When a memberlist config with atleast 1 join_members is defined, kvstore of type
# The timeout for establishing a connection with a remote node, and for
# read/write operations.
# CLI flag: -memberlist.stream-timeout
-[stream_timeout: | default = 10s]
+[stream_timeout: | default = 2s]
# Multiplication factor used when sending out messages (factor * log(N+1)).
# CLI flag: -memberlist.retransmit-factor
@@ -3757,6 +3805,14 @@ These are values which allow you to control aspects of Loki's operation, most co
# CLI flag: -operation-config.log-push-request-streams
[log_push_request_streams: | default = false]
+# Log metrics for duplicate lines received.
+# CLI flag: -operation-config.log-duplicate-metrics
+[log_duplicate_metrics: | default = false]
+
+# Log stream info for duplicate lines received
+# CLI flag: -operation-config.log-duplicate-stream-info
+[log_duplicate_stream_info: | default = false]
+
# Log push errors with a rate limited logger, will show client push errors
# without overly spamming logs.
# CLI flag: -operation-config.limited-log-push-errors
@@ -3818,6 +3874,24 @@ chunks:
[row_shards: | default = 16]
```
+### profiling
+
+Configuration for `profiling` options.
+
+```yaml
+# Sets the value for runtime.SetBlockProfilingRate
+# CLI flag: -profiling.block-profile-rate
+[block_profile_rate: | default = 0]
+
+# Sets the value for runtime.SetCPUProfileRate
+# CLI flag: -profiling.cpu-profile-rate
+[cpu_profile_rate: | default = 0]
+
+# Sets the value for runtime.SetMutexProfileFraction
+# CLI flag: -profiling.mutex-profile-fraction
+[mutex_profile_fraction: | default = 0]
+```
+
### querier
Configures the `querier`. Only appropriate when running all modules or just the querier.
@@ -4212,9 +4286,10 @@ storage:
# CLI flag: -ruler.alertmanager-refresh-interval
[alertmanager_refresh_interval: | default = 1m]
-# If enabled requests to Alertmanager will utilize the V2 API.
+# Use Alertmanager APIv2. APIv1 was deprecated in Alertmanager 0.16.0 and is
+# removed as of 0.27.0.
# CLI flag: -ruler.alertmanager-use-v2
-[enable_alertmanager_v2: | default = false]
+[enable_alertmanager_v2: | default = true]
# List of alert relabel configs.
[alert_relabel_configs: ]
@@ -4739,6 +4814,10 @@ Configures the `server` of the launched module(s).
# CLI flag: -server.grpc-conn-limit
[grpc_listen_conn_limit: | default = 0]
+# Enables PROXY protocol.
+# CLI flag: -server.proxy-protocol-enabled
+[proxy_protocol_enabled: | default = false]
+
# Comma-separated list of cipher suites to use. If blank, the default Go cipher
# suites is used.
# CLI flag: -server.tls-cipher-suites
@@ -4893,6 +4972,21 @@ grpc_tls_config:
# CLI flag: -server.grpc.num-workers
[grpc_server_num_workers: | default = 0]
+# If true, the request_message_bytes, response_message_bytes, and
+# inflight_requests metrics will be tracked. Enabling this option prevents the
+# use of memory pools for parsing gRPC request bodies and may lead to more
+# memory allocations.
+# CLI flag: -server.grpc.stats-tracking-enabled
+[grpc_server_stats_tracking_enabled: | default = true]
+
+# If true, gGPC's buffer pools will be used to handle incoming requests.
+# Enabling this feature can reduce memory allocation, but also requires
+# disabling GRPC server stats tracking by setting
+# `server.grpc.stats-tracking-enabled=false`. This is an experimental gRPC
+# feature, so it might be removed in a future version of the gRPC library.
+# CLI flag: -server.grpc.recv-buffer-pools-enabled
+[grpc_server_recv_buffer_pools_enabled: | default = false]
+
# Output log messages in the given format. Valid formats: [logfmt, json]
# CLI flag: -log.format
[log_format: | default = "logfmt"]
@@ -4906,6 +5000,11 @@ grpc_tls_config:
# CLI flag: -server.log-source-ips-enabled
[log_source_ips_enabled: | default = false]
+# Log all source IPs instead of only the originating one. Only used if
+# server.log-source-ips-enabled is true
+# CLI flag: -server.log-source-ips-full
+[log_source_ips_full: | default = false]
+
# Header field storing the source IPs. Only used if
# server.log-source-ips-enabled is true. If not set the default Forwarded,
# X-Real-IP and X-Forwarded-For headers are used
diff --git a/go.mod b/go.mod
index 051a18d2292f7..4a49b95b102bc 100644
--- a/go.mod
+++ b/go.mod
@@ -50,9 +50,9 @@ require (
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.5.0
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2
- github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb
+ github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d
github.com/grafana/go-gelf/v2 v2.0.1
- github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0
+ github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
@@ -96,7 +96,7 @@ require (
github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/xdg-go/scram v1.1.2
- go.etcd.io/bbolt v1.3.6
+ go.etcd.io/bbolt v1.3.8
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
golang.org/x/crypto v0.21.0
@@ -151,8 +151,10 @@ require (
require (
github.com/dlclark/regexp2 v1.4.0 // indirect
+ github.com/go-kit/kit v0.12.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
+ github.com/pires/go-proxyproto v0.7.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
@@ -229,7 +231,6 @@ require (
github.com/envoyproxy/go-control-plane v0.12.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/go-kit/kit v0.12.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.22.2 // indirect
@@ -312,7 +313,6 @@ require (
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/sirupsen/logrus v1.9.3
- github.com/soheilhy/cmux v0.1.5 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
diff --git a/go.sum b/go.sum
index 37ea77d492150..170ca1df4672f 100644
--- a/go.sum
+++ b/go.sum
@@ -1017,14 +1017,14 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw=
-github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb h1:AWE6+kvtE18HP+lRWNUCyvymyrFSXs6TcS2vXIXGIuw=
-github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb/go.mod h1:kkWM4WUV230bNG3urVRWPBnSJHs64y/0RmWjftnnn0c=
+github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d h1:CD8PWWX+9lYdgeMquSofmLErvCtk7jb+3/W/SH6oo/k=
+github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d/go.mod h1:HvSf3uf8Ps2vPpzHeAFyZTdUcbVr+Rxpq1xcx7J/muc=
github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak=
github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90=
github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY=
github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I=
-github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 h1:aLBiDMjTtXx2800iCIp+8kdjIlvGX0MF/zICQMQO2qU=
-github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU=
+github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE=
+github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU=
github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d h1:YwbJJ/PrVWVdnR+j/EAVuazdeP+Za5qbiH1Vlr+wFXs=
github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU=
@@ -1541,6 +1541,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi
github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs=
+github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -1697,8 +1699,6 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
-github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg=
@@ -1840,8 +1840,8 @@ go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
-go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
+go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc=
@@ -2206,7 +2206,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/integration/bloom_building_test.go b/integration/bloom_building_test.go
new file mode 100644
index 0000000000000..0a96ee5702ace
--- /dev/null
+++ b/integration/bloom_building_test.go
@@ -0,0 +1,266 @@
+//go:build integration
+
+package integration
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/grafana/dskit/flagext"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/integration/client"
+ "github.com/grafana/loki/v3/integration/cluster"
+ "github.com/grafana/loki/v3/pkg/storage"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/cache"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client/local"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ bloomshipperconfig "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"
+ "github.com/grafana/loki/v3/pkg/storage/types"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
+)
+
+func TestBloomBuilding(t *testing.T) {
+ const (
+ nSeries = 10 //1000
+ nLogsPerSeries = 50
+ nBuilders = 5
+ )
+
+ clu := cluster.New(nil, cluster.SchemaWithTSDB, func(c *cluster.Cluster) {
+ c.SetSchemaVer("v13")
+ })
+ defer func() {
+ require.NoError(t, clu.Cleanup())
+ }()
+
+ // First run distributor and ingester and write some data across many series.
+ tDistributor := clu.AddComponent(
+ "distributor",
+ "-target=distributor",
+ )
+ tIngester := clu.AddComponent(
+ "ingester",
+ "-target=ingester",
+ "-ingester.flush-on-shutdown=true",
+ )
+ require.NoError(t, clu.Run())
+
+ tenantID := "fake"
+ now := time.Now()
+ cliDistributor := client.New(tenantID, "", tDistributor.HTTPURL())
+ cliIngester := client.New(tenantID, "", tIngester.HTTPURL())
+ cliIngester.Now = now
+
+ // We now ingest some logs across many series.
+ series := make([]labels.Labels, 0, nSeries)
+ for i := 0; i < nSeries; i++ {
+ lbs := labels.FromStrings("job", fmt.Sprintf("job-%d", i))
+ series = append(series, lbs)
+
+ for j := 0; j < nLogsPerSeries; j++ {
+ require.NoError(t, cliDistributor.PushLogLine(fmt.Sprintf("log line %d", j), now, nil, lbs.Map()))
+ }
+ }
+
+ // restart ingester which should flush the chunks and index
+ require.NoError(t, tIngester.Restart())
+
+ // Start compactor and wait for compaction to finish.
+ tCompactor := clu.AddComponent(
+ "compactor",
+ "-target=compactor",
+ "-compactor.compaction-interval=10s",
+ )
+ require.NoError(t, clu.Run())
+
+ // Wait for compaction to finish.
+ cliCompactor := client.New(tenantID, "", tCompactor.HTTPURL())
+ checkCompactionFinished(t, cliCompactor)
+
+ // Now create the bloom planner and builders
+ tBloomPlanner := clu.AddComponent(
+ "bloom-planner",
+ "-target=bloom-planner",
+ "-bloom-build.enabled=true",
+ "-bloom-build.enable=true",
+ "-bloom-build.planner.interval=15s",
+ "-bloom-build.planner.min-table-offset=0", // Disable table offset so we process today's data.
+ "-bloom.cache-list-ops=0", // Disable cache list operations to avoid caching issues.
+ )
+ require.NoError(t, clu.Run())
+
+ // Add several builders
+ for i := 0; i < nBuilders; i++ {
+ clu.AddComponent(
+ "bloom-builder",
+ "-target=bloom-builder",
+ "-bloom-build.enabled=true",
+ "-bloom-build.enable=true",
+ "-bloom-build.builder.planner-address="+tBloomPlanner.GRPCURL(),
+ )
+ }
+ require.NoError(t, clu.Run())
+
+ // Wait for bloom build to finish
+ cliPlanner := client.New(tenantID, "", tBloomPlanner.HTTPURL())
+ checkBloomBuildFinished(t, cliPlanner)
+
+ // Create bloom client to fetch metas and blocks.
+ bloomStore := createBloomStore(t, tBloomPlanner.ClusterSharedPath())
+
+ // Check that all series pushed are present in the metas and blocks.
+ checkSeriesInBlooms(t, now, tenantID, bloomStore, series)
+
+ // Push some more logs so TSDBs need to be updated.
+ for i := 0; i < nSeries; i++ {
+ lbs := labels.FromStrings("job", fmt.Sprintf("job-new-%d", i))
+ series = append(series, lbs)
+
+ for j := 0; j < nLogsPerSeries; j++ {
+ require.NoError(t, cliDistributor.PushLogLine(fmt.Sprintf("log line %d", j), now, nil, lbs.Map()))
+ }
+ }
+
+ // restart ingester which should flush the chunks and index
+ require.NoError(t, tIngester.Restart())
+
+ // Wait for compaction to finish so TSDBs are updated.
+ checkCompactionFinished(t, cliCompactor)
+
+ // Wait for bloom build to finish
+ checkBloomBuildFinished(t, cliPlanner)
+
+ // Check that all series (both previous and new ones) pushed are present in the metas and blocks.
+ // This check ensures up to 1 meta per series, which tests deletion of old metas.
+ checkSeriesInBlooms(t, now, tenantID, bloomStore, series)
+}
+
+func checkCompactionFinished(t *testing.T, cliCompactor *client.Client) {
+ checkForTimestampMetric(t, cliCompactor, "loki_boltdb_shipper_compact_tables_operation_last_successful_run_timestamp_seconds")
+}
+
+func checkBloomBuildFinished(t *testing.T, cliPlanner *client.Client) {
+ checkForTimestampMetric(t, cliPlanner, "loki_bloomplanner_build_last_successful_run_timestamp_seconds")
+}
+
+func checkForTimestampMetric(t *testing.T, cliPlanner *client.Client, metricName string) {
+ start := time.Now()
+ time.Sleep(1 * time.Second) // Gauge seconds has second precision, so we need to wait a bit.
+
+ require.Eventually(t, func() bool {
+ metrics, err := cliPlanner.Metrics()
+ require.NoError(t, err)
+
+ val, _, err := extractMetric(metricName, metrics)
+ require.NoError(t, err)
+
+ lastRun := time.Unix(int64(val), 0)
+ return lastRun.After(start)
+ }, 30*time.Second, 1*time.Second)
+}
+
+func createBloomStore(t *testing.T, sharedPath string) *bloomshipper.BloomStore {
+ logger := log.NewNopLogger()
+ //logger := log.NewLogfmtLogger(os.Stdout)
+
+ schemaCfg := config.SchemaConfig{
+ Configs: []config.PeriodConfig{
+ {
+ From: parseDayTime("2023-09-01"),
+ IndexTables: config.IndexPeriodicTableConfig{
+ PeriodicTableConfig: config.PeriodicTableConfig{
+ Prefix: "index_tsdb_",
+ Period: 24 * time.Hour,
+ },
+ },
+ IndexType: types.TSDBType,
+ ObjectType: types.StorageTypeFileSystem,
+ Schema: "v13",
+ RowShards: 16,
+ },
+ },
+ }
+ storageCfg := storage.Config{
+ BloomShipperConfig: bloomshipperconfig.Config{
+ WorkingDirectory: []string{sharedPath + "/bloom-store-test"},
+ DownloadParallelism: 1,
+ BlocksCache: bloomshipperconfig.BlocksCacheConfig{
+ SoftLimit: flagext.Bytes(10 << 20),
+ HardLimit: flagext.Bytes(20 << 20),
+ TTL: time.Hour,
+ },
+ },
+ FSConfig: local.FSConfig{
+ Directory: sharedPath + "/fs-store-1",
+ },
+ }
+
+ reg := prometheus.NewPedanticRegistry()
+ metasCache := cache.NewNoopCache()
+ blocksCache := bloomshipper.NewFsBlocksCache(storageCfg.BloomShipperConfig.BlocksCache, reg, logger)
+
+ store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, storage.ClientMetrics{}, metasCache, blocksCache, &mempool.SimpleHeapAllocator{}, reg, logger)
+ require.NoError(t, err)
+
+ return store
+}
+
+func checkSeriesInBlooms(
+ t *testing.T,
+ now time.Time,
+ tenantID string,
+ bloomStore *bloomshipper.BloomStore,
+ series []labels.Labels,
+) {
+ for _, lbs := range series {
+ seriesFP := model.Fingerprint(lbs.Hash())
+
+ metas, err := bloomStore.FetchMetas(context.Background(), bloomshipper.MetaSearchParams{
+ TenantID: tenantID,
+ Interval: bloomshipper.NewInterval(model.TimeFromUnix(now.Add(-24*time.Hour).Unix()), model.TimeFromUnix(now.Unix())),
+ Keyspace: v1.NewBounds(seriesFP, seriesFP),
+ })
+ require.NoError(t, err)
+
+ // Only one meta should be present.
+ require.Len(t, metas, 1)
+
+ var relevantBlocks []bloomshipper.BlockRef
+ for _, block := range metas[0].Blocks {
+ if block.Cmp(uint64(seriesFP)) != v1.Overlap {
+ continue
+ }
+ relevantBlocks = append(relevantBlocks, block)
+ }
+
+ // Only one block should be relevant.
+ require.Len(t, relevantBlocks, 1)
+
+ queriers, err := bloomStore.FetchBlocks(context.Background(), relevantBlocks)
+ require.NoError(t, err)
+ require.Len(t, queriers, 1)
+ querier := queriers[0]
+
+ require.NoError(t, querier.Seek(seriesFP))
+ require.Equal(t, seriesFP, querier.At().Series.Fingerprint)
+ }
+}
+
+func parseDayTime(s string) config.DayTime {
+ t, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ panic(err)
+ }
+ return config.DayTime{
+ Time: model.TimeFromUnix(t.Unix()),
+ }
+}
diff --git a/operator/apis/loki/v1/rulerconfig_types.go b/operator/apis/loki/v1/rulerconfig_types.go
index d8ca91523da8d..8321d2bc67012 100644
--- a/operator/apis/loki/v1/rulerconfig_types.go
+++ b/operator/apis/loki/v1/rulerconfig_types.go
@@ -221,6 +221,13 @@ type AlertManagerClientTLSConfig struct {
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Key Path"
KeyPath *string `json:"keyPath,omitempty"`
+
+ // Skip validating server certificate.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Skip validating server certificate"
+ InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"`
}
// RemoteWriteAuthType defines the type of authorization to use to access the remote write endpoint.
diff --git a/operator/apis/loki/v1/zz_generated.deepcopy.go b/operator/apis/loki/v1/zz_generated.deepcopy.go
index 03b8e5ad0b9ed..c7206c5ab6602 100644
--- a/operator/apis/loki/v1/zz_generated.deepcopy.go
+++ b/operator/apis/loki/v1/zz_generated.deepcopy.go
@@ -118,6 +118,11 @@ func (in *AlertManagerClientTLSConfig) DeepCopyInto(out *AlertManagerClientTLSCo
*out = new(string)
**out = **in
}
+ if in.InsecureSkipVerify != nil {
+ in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify
+ *out = new(bool)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertManagerClientTLSConfig.
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index b77a51c3574a6..bd28187a5f013 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.1
- createdAt: "2024-06-04T16:17:47Z"
+ createdAt: "2024-06-12T17:07:27Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
features.operators.openshift.io/disconnected: "true"
@@ -995,6 +995,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: alertmanager.client.tls.keyPath
@@ -1144,6 +1147,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: overrides.alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: overrides.alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: overrides.alertmanager.client.tls.keyPath
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml
index 1969480f84378..d7af1c24bad25 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_rulerconfigs.yaml
@@ -107,6 +107,9 @@ spec:
description: The client-side certificate file path for
the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the TLS
configuration.
@@ -310,6 +313,9 @@ spec:
description: The client-side certificate file path
for the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the
TLS configuration.
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index 153de23258649..42d3b88c6d79e 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:0.6.1
- createdAt: "2024-06-04T16:17:41Z"
+ createdAt: "2024-06-12T17:07:25Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -988,6 +988,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: alertmanager.client.tls.keyPath
@@ -1137,6 +1140,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: overrides.alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: overrides.alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: overrides.alertmanager.client.tls.keyPath
diff --git a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
index 1057ece6a2edf..71b690e14a632 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml
@@ -107,6 +107,9 @@ spec:
description: The client-side certificate file path for
the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the TLS
configuration.
@@ -310,6 +313,9 @@ spec:
description: The client-side certificate file path
for the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the
TLS configuration.
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index c2f743258fa40..b270d82272608 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:0.1.0
- createdAt: "2024-06-04T16:17:54Z"
+ createdAt: "2024-06-12T17:07:29Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -1008,6 +1008,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: alertmanager.client.tls.keyPath
@@ -1157,6 +1160,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: overrides.alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: overrides.alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: overrides.alertmanager.client.tls.keyPath
diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_rulerconfigs.yaml
index c4d5a37fa0353..219b8cb60697d 100644
--- a/operator/bundle/openshift/manifests/loki.grafana.com_rulerconfigs.yaml
+++ b/operator/bundle/openshift/manifests/loki.grafana.com_rulerconfigs.yaml
@@ -107,6 +107,9 @@ spec:
description: The client-side certificate file path for
the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the TLS
configuration.
@@ -310,6 +313,9 @@ spec:
description: The client-side certificate file path
for the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the
TLS configuration.
diff --git a/operator/config/crd/bases/loki.grafana.com_rulerconfigs.yaml b/operator/config/crd/bases/loki.grafana.com_rulerconfigs.yaml
index 88d65fb826c73..df922bfdefd1d 100644
--- a/operator/config/crd/bases/loki.grafana.com_rulerconfigs.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_rulerconfigs.yaml
@@ -89,6 +89,9 @@ spec:
description: The client-side certificate file path for
the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the TLS
configuration.
@@ -292,6 +295,9 @@ spec:
description: The client-side certificate file path
for the TLS configuration.
type: string
+ insecureSkipVerify:
+ description: Skip validating server certificate.
+ type: boolean
keyPath:
description: The client-side key file path for the
TLS configuration.
diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
index 649b8d0739ec5..3627c03d58577 100644
--- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -1376,6 +1376,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: alertmanager.client.tls.keyPath
@@ -1525,6 +1528,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: overrides.alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: overrides.alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: overrides.alertmanager.client.tls.keyPath
diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
index 24f52dc2acd05..0aefa95fc2807 100644
--- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
@@ -1369,6 +1369,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: alertmanager.client.tls.keyPath
@@ -1518,6 +1521,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: overrides.alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: overrides.alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: overrides.alertmanager.client.tls.keyPath
diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
index aab8dcd070778..77bb3bff6fcd1 100644
--- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -1388,6 +1388,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: alertmanager.client.tls.keyPath
@@ -1537,6 +1540,9 @@ spec:
- description: The client-side certificate file path for the TLS configuration.
displayName: Cert Path
path: overrides.alertmanager.client.tls.certPath
+ - description: Skip validating server certificate.
+ displayName: Skip validating server certificate
+ path: overrides.alertmanager.client.tls.insecureSkipVerify
- description: The client-side key file path for the TLS configuration.
displayName: Key Path
path: overrides.alertmanager.client.tls.keyPath
diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md
index e6e5a65765d1f..ca71ecee6ce3a 100644
--- a/operator/docs/operator/api.md
+++ b/operator/docs/operator/api.md
@@ -237,6 +237,18 @@ string
The client-side key file path for the TLS configuration.
+
+
+insecureSkipVerify
+
+bool
+
+ |
+
+(Optional)
+ Skip validating server certificate.
+ |
+
diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go
index 4ec0b728d916d..e212e6d6a30fe 100644
--- a/operator/internal/manifests/config.go
+++ b/operator/internal/manifests/config.go
@@ -248,10 +248,11 @@ func alertManagerConfig(spec *lokiv1.AlertManagerSpec) *config.AlertManagerConfi
conf.Notifier = &config.NotifierConfig{}
if tls := clt.TLS; tls != nil {
conf.Notifier.TLS = config.TLSConfig{
- CAPath: tls.CAPath,
- ServerName: tls.ServerName,
- CertPath: tls.CertPath,
- KeyPath: tls.KeyPath,
+ CAPath: tls.CAPath,
+ ServerName: tls.ServerName,
+ InsecureSkipVerify: tls.InsecureSkipVerify,
+ CertPath: tls.CertPath,
+ KeyPath: tls.KeyPath,
}
}
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 1d9a0287b398b..9486771f2611b 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -5632,3 +5632,391 @@ analytics:
require.NoError(t, err)
require.YAMLEq(t, expCfg, string(cfg))
}
+
+func TestBuild_ConfigAndRuntimeConfig_RulerConfigGenerated_WithAlertmanagerClient(t *testing.T) {
+ expCfg := `
+---
+auth_enabled: true
+chunk_store_config:
+ chunk_cache_config:
+ embedded_cache:
+ enabled: true
+ max_size_mb: 500
+common:
+ storage:
+ s3:
+ endpoint: http://test.default.svc.cluster.local.:9000
+ bucketnames: loki
+ region: us-east
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_ACCESS_KEY_SECRET}
+ s3forcepathstyle: true
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
+ ring:
+ kvstore:
+ store: memberlist
+ heartbeat_period: 5s
+ heartbeat_timeout: 1m
+ instance_port: 9095
+compactor:
+ compaction_interval: 2h
+ working_directory: /tmp/loki/compactor
+frontend:
+ tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
+ compress_responses: true
+ max_outstanding_per_tenant: 4096
+ log_queries_longer_than: 5s
+frontend_worker:
+ frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
+ grpc_client_config:
+ max_send_msg_size: 104857600
+ match_max_concurrent: true
+ingester:
+ chunk_block_size: 262144
+ chunk_encoding: snappy
+ chunk_idle_period: 1h
+ chunk_retain_period: 5m
+ chunk_target_size: 2097152
+ flush_op_timeout: 10m
+ lifecycler:
+ final_sleep: 0s
+ join_after: 30s
+ num_tokens: 512
+ ring:
+ replication_factor: 1
+ max_chunk_age: 2h
+ max_transfer_retries: 0
+ wal:
+ enabled: true
+ dir: /tmp/wal
+ replay_memory_ceiling: 2147483648
+ingester_client:
+ grpc_client_config:
+ max_recv_msg_size: 67108864
+ remote_timeout: 1s
+# NOTE: Keep the order of keys as in Loki docs
+# to enable easy diffs when vendoring newer
+# Loki releases.
+# (See https://grafana.com/docs/loki/latest/configuration/#limits_config)
+#
+# Values for not exposed fields are taken from the grafana/loki production
+# configuration manifests.
+# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet)
+limits_config:
+ ingestion_rate_strategy: global
+ ingestion_rate_mb: 4
+ ingestion_burst_size_mb: 6
+ max_label_name_length: 1024
+ max_label_value_length: 2048
+ max_label_names_per_series: 30
+ reject_old_samples: true
+ reject_old_samples_max_age: 168h
+ creation_grace_period: 10m
+ enforce_metric_name: false
+ # Keep max_streams_per_user always to 0 to default
+ # using max_global_streams_per_user always.
+ # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73)
+ max_streams_per_user: 0
+ max_line_size: 256000
+ max_entries_limit_per_query: 5000
+ max_global_streams_per_user: 0
+ max_chunks_per_query: 2000000
+ max_query_length: 721h
+ max_query_parallelism: 32
+ tsdb_max_query_parallelism: 512
+ max_query_series: 500
+ cardinality_limit: 100000
+ max_streams_matchers_per_query: 1000
+ max_cache_freshness_per_query: 10m
+ split_queries_by_interval: 30m
+ query_timeout: 1m
+ per_stream_rate_limit: 5MB
+ per_stream_rate_limit_burst: 15MB
+ shard_streams:
+ enabled: true
+ desired_rate: 3MB
+ allow_structured_metadata: true
+memberlist:
+ abort_if_cluster_join_fails: true
+ advertise_port: 7946
+ bind_port: 7946
+ join_members:
+ - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946
+ max_join_backoff: 1m
+ max_join_retries: 10
+ min_join_backoff: 1s
+querier:
+ engine:
+ max_look_back_period: 30s
+ extra_query_delay: 0s
+ max_concurrent: 2
+ query_ingesters_within: 3h
+ tail_max_duration: 1h
+query_range:
+ align_queries_with_step: true
+ cache_results: true
+ max_retries: 5
+ results_cache:
+ cache:
+ embedded_cache:
+ enabled: true
+ max_size_mb: 500
+ parallelise_shardable_queries: true
+schema_config:
+ configs:
+ - from: "2020-10-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v11
+ store: boltdb-shipper
+ruler:
+ enable_api: true
+ enable_sharding: true
+ evaluation_interval: 1m
+ poll_interval: 1m
+ external_url: http://alert.me/now
+ external_labels:
+ key1: val1
+ key2: val2
+ alertmanager_url: http://alerthost1,http://alerthost2
+ enable_alertmanager_v2: true
+ enable_alertmanager_discovery: true
+ alertmanager_refresh_interval: 1m
+ notification_queue_capacity: 1000
+ notification_timeout: 1m
+ alertmanager_client:
+ tls_cert_path: "custom/path"
+ tls_key_path: "custom/key"
+ tls_ca_path: "custom/CA"
+ tls_server_name: "custom-servername"
+ tls_insecure_skip_verify: false
+ basic_auth_password: "pass"
+ basic_auth_username: "user"
+ credentials: "creds"
+ credentials_file: "cred/file"
+ type: "auth"
+ for_outage_tolerance: 10m
+ for_grace_period: 5m
+ resend_delay: 2m
+ remote_write:
+ enabled: true
+ config_refresh_period: 1m
+ client:
+ name: remote-write-me
+ url: http://remote.write.me
+ remote_timeout: 10s
+ proxy_url: http://proxy.through.me
+ follow_redirects: true
+ headers:
+ more: foryou
+ less: forme
+ authorization:
+ type: bearer
+ credentials: supersecret
+ queue_config:
+ capacity: 1000
+ max_shards: 100
+ min_shards: 50
+ max_samples_per_send: 1000
+ batch_send_deadline: 10s
+ min_backoff: 30ms
+ max_backoff: 100ms
+ wal:
+ dir: /tmp/wal
+ truncate_frequency: 60m
+ min_age: 5m
+ max_age: 4h
+ rule_path: /tmp/loki
+ storage:
+ type: local
+ local:
+ directory: /tmp/rules
+ ring:
+ kvstore:
+ store: memberlist
+server:
+ graceful_shutdown_timeout: 5s
+ grpc_server_min_time_between_pings: '10s'
+ grpc_server_ping_without_stream_allowed: true
+ grpc_server_max_concurrent_streams: 1000
+ grpc_server_max_recv_msg_size: 104857600
+ grpc_server_max_send_msg_size: 104857600
+ http_listen_port: 3100
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
+ log_level: info
+storage_config:
+ boltdb_shipper:
+ active_index_directory: /tmp/loki/index
+ cache_location: /tmp/loki/index_cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095
+tracing:
+ enabled: false
+analytics:
+ reporting_enabled: true
+`
+ expRCfg := `
+---
+overrides:
+`
+ opts := Options{
+ Stack: lokiv1.LokiStackSpec{
+ Replication: &lokiv1.ReplicationSpec{
+ Factor: 1,
+ },
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
+ IngestionRate: 4,
+ IngestionBurstSize: 6,
+ MaxLabelNameLength: 1024,
+ MaxLabelValueLength: 2048,
+ MaxLabelNamesPerSeries: 30,
+ MaxGlobalStreamsPerTenant: 0,
+ MaxLineSize: 256000,
+ PerStreamRateLimit: 5,
+ PerStreamRateLimitBurst: 15,
+ PerStreamDesiredRate: 3,
+ },
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 5000,
+ MaxChunksPerQuery: 2000000,
+ MaxQuerySeries: 500,
+ QueryTimeout: "1m",
+ CardinalityLimit: 100000,
+ },
+ },
+ },
+ },
+ Namespace: "test-ns",
+ Name: "test",
+ Compactor: Address{
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ FrontendWorker: Address{
+ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ GossipRing: GossipRing{
+ InstancePort: 9095,
+ BindPort: 7946,
+ MembersDiscoveryAddr: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local",
+ },
+ Querier: Address{
+ Protocol: "http",
+ FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local",
+ Port: 3100,
+ },
+ IndexGateway: Address{
+ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ Ruler: Ruler{
+ Enabled: true,
+ RulesStorageDirectory: "/tmp/rules",
+ EvaluationInterval: "1m",
+ PollInterval: "1m",
+ AlertManager: &AlertManagerConfig{
+ Notifier: &NotifierConfig{
+ TLS: TLSConfig{
+ ServerName: ptr.To("custom-servername"),
+ CertPath: ptr.To("custom/path"),
+ KeyPath: ptr.To("custom/key"),
+ CAPath: ptr.To("custom/CA"),
+ InsecureSkipVerify: ptr.To(false),
+ },
+ BasicAuth: BasicAuth{
+ Username: ptr.To("user"),
+ Password: ptr.To("pass"),
+ },
+ HeaderAuth: HeaderAuth{
+ CredentialsFile: ptr.To("cred/file"),
+ Type: ptr.To("auth"),
+ Credentials: ptr.To("creds"),
+ },
+ },
+ ExternalURL: "http://alert.me/now",
+ ExternalLabels: map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ },
+ Hosts: "http://alerthost1,http://alerthost2",
+ EnableV2: true,
+ EnableDiscovery: true,
+ RefreshInterval: "1m",
+ QueueCapacity: 1000,
+ Timeout: "1m",
+ ForOutageTolerance: "10m",
+ ForGracePeriod: "5m",
+ ResendDelay: "2m",
+ },
+ RemoteWrite: &RemoteWriteConfig{
+ Enabled: true,
+ RefreshPeriod: "1m",
+ Client: &RemoteWriteClientConfig{
+ Name: "remote-write-me",
+ URL: "http://remote.write.me",
+ RemoteTimeout: "10s",
+ Headers: map[string]string{
+ "more": "foryou",
+ "less": "forme",
+ },
+ ProxyURL: "http://proxy.through.me",
+ FollowRedirects: true,
+ BearerToken: "supersecret",
+ },
+ Queue: &RemoteWriteQueueConfig{
+ Capacity: 1000,
+ MaxShards: 100,
+ MinShards: 50,
+ MaxSamplesPerSend: 1000,
+ BatchSendDeadline: "10s",
+ MinBackOffPeriod: "30ms",
+ MaxBackOffPeriod: "100ms",
+ },
+ },
+ },
+ StorageDirectory: "/tmp/loki",
+ MaxConcurrent: MaxConcurrent{
+ AvailableQuerierCPUCores: 2,
+ },
+ WriteAheadLog: WriteAheadLog{
+ Directory: "/tmp/wal",
+ IngesterMemoryRequest: 4 * 1024 * 1024 * 1024,
+ },
+ ObjectStorage: storage.Options{
+ SharedStore: lokiv1.ObjectStorageSecretS3,
+ S3: &storage.S3StorageConfig{
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ ForcePathStyle: true,
+ },
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-01",
+ },
+ },
+ },
+ Shippers: []string{"boltdb"},
+ EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
+ }
+ cfg, rCfg, err := Build(opts)
+ require.NoError(t, err)
+ require.YAMLEq(t, expCfg, string(cfg))
+ require.YAMLEq(t, expRCfg, string(rCfg))
+}
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 3df0ac7463881..38326157f2937 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -313,6 +313,47 @@ ruler:
{{- if .Timeout }}
notification_timeout: {{ .Timeout }}
{{- end }}
+ {{- if .Notifier }}
+ {{- with .Notifier }}
+ alertmanager_client:
+ {{- if .TLS.CertPath }}
+ tls_cert_path: {{ .TLS.CertPath }}
+ {{- end }}
+ {{- if .TLS.KeyPath }}
+ tls_key_path: {{ .TLS.KeyPath }}
+ {{- end }}
+ {{- if .TLS.CAPath }}
+ tls_ca_path: {{ .TLS.CAPath }}
+ {{- end }}
+ {{- if .TLS.ServerName }}
+ tls_server_name: {{ .TLS.ServerName }}
+ {{- end }}
+ {{- if .TLS.InsecureSkipVerify }}
+ tls_insecure_skip_verify: {{ .TLS.InsecureSkipVerify }}
+ {{- end }}
+ {{- if .TLS.CipherSuites }}
+ tls_cipher_suites: {{ .TLS.CipherSuites }}
+ {{- end }}
+ {{- if .TLS.MinVersion }}
+ tls_min_version: {{ .TLS.MinVersion }}
+ {{- end }}
+ {{- if .BasicAuth.Username }}
+ basic_auth_username: {{ .BasicAuth.Username }}
+ {{- end }}
+ {{- if .BasicAuth.Password }}
+ basic_auth_password: {{ .BasicAuth.Password }}
+ {{- end }}
+ {{- if .HeaderAuth.Type }}
+ type: {{ .HeaderAuth.Type }}
+ {{- end }}
+ {{- if .HeaderAuth.Credentials }}
+ credentials: {{ .HeaderAuth.Credentials }}
+ {{- end }}
+ {{- if .HeaderAuth.CredentialsFile }}
+ credentials_file: {{ .HeaderAuth.CredentialsFile }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
{{- with .RelabelConfigs }}
alert_relabel_configs:
diff --git a/pkg/analytics/reporter.go b/pkg/analytics/reporter.go
index 7daa352259f28..47312e6374cd2 100644
--- a/pkg/analytics/reporter.go
+++ b/pkg/analytics/reporter.go
@@ -95,28 +95,31 @@ func (rep *Reporter) initLeader(ctx context.Context) *ClusterSeed {
MaxRetries: 0,
})
for backoff.Ongoing() {
- // create a new cluster seed
- seed := ClusterSeed{
- UID: uuid.NewString(),
- PrometheusVersion: build.GetVersion(),
- CreatedAt: time.Now(),
- }
- if err := kvClient.CAS(ctx, seedKey, func(in interface{}) (out interface{}, retry bool, err error) {
- // The key is already set, so we don't need to do anything
- if in != nil {
- if kvSeed, ok := in.(*ClusterSeed); ok && kvSeed != nil && kvSeed.UID != seed.UID {
- seed = *kvSeed
- return nil, false, nil
+ {
+ // create a new cluster seed
+ seed := ClusterSeed{
+ UID: uuid.NewString(),
+ PrometheusVersion: build.GetVersion(),
+ CreatedAt: time.Now(),
+ }
+ if err := kvClient.CAS(ctx, seedKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ // The key is already set, so we don't need to do anything
+ if in != nil {
+ if kvSeed, ok := in.(*ClusterSeed); ok && kvSeed != nil && kvSeed.UID != seed.UID {
+ seed = *kvSeed
+ return nil, false, nil
+ }
}
+ return &seed, true, nil
+ }); err != nil {
+ level.Info(rep.logger).Log("msg", "failed to CAS cluster seed key", "err", err)
+ continue
}
- return &seed, true, nil
- }); err != nil {
- level.Info(rep.logger).Log("msg", "failed to CAS cluster seed key", "err", err)
- continue
}
// ensure stability of the cluster seed
stableSeed := ensureStableKey(ctx, kvClient, rep.logger)
- seed = *stableSeed
+ // This is a new local variable so that Go knows it's not racing with the previous usage.
+ seed := *stableSeed
// Fetch the remote cluster seed.
remoteSeed, err := rep.fetchSeed(ctx,
func(err error) bool {
@@ -262,7 +265,7 @@ func (rep *Reporter) running(ctx context.Context) error {
}
return nil
}
- rep.startCPUPercentCollection(ctx)
+ rep.startCPUPercentCollection(ctx, time.Minute)
// check every minute if we should report.
ticker := time.NewTicker(reportCheckInterval)
defer ticker.Stop()
@@ -317,13 +320,13 @@ func (rep *Reporter) reportUsage(ctx context.Context, interval time.Time) error
return errs.Err()
}
+const cpuUsageKey = "cpu_usage"
+
var (
- cpuUsageKey = "cpu_usage"
- cpuUsage = NewFloat(cpuUsageKey)
- cpuCollectionInterval = time.Minute
+ cpuUsage = NewFloat(cpuUsageKey)
)
-func (rep *Reporter) startCPUPercentCollection(ctx context.Context) {
+func (rep *Reporter) startCPUPercentCollection(ctx context.Context, cpuCollectionInterval time.Duration) {
proc, err := process.NewProcess(int32(os.Getpid()))
if err != nil {
level.Debug(rep.logger).Log("msg", "failed to get process", "err", err)
diff --git a/pkg/analytics/reporter_test.go b/pkg/analytics/reporter_test.go
index 140953e70700e..889ec8d31e19a 100644
--- a/pkg/analytics/reporter_test.go
+++ b/pkg/analytics/reporter_test.go
@@ -159,14 +159,13 @@ func TestWrongKV(t *testing.T) {
}
func TestStartCPUCollection(t *testing.T) {
- cpuCollectionInterval = 1 * time.Second
r, err := NewReporter(Config{Leader: true, Enabled: true}, kv.Config{
Store: "inmemory",
}, nil, log.NewLogfmtLogger(os.Stdout), prometheus.NewPedanticRegistry())
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- r.startCPUPercentCollection(ctx)
+ r.startCPUPercentCollection(ctx, 1*time.Second)
require.Eventually(t, func() bool {
return cpuUsage.Value() > 0
}, 5*time.Second, 1*time.Second)
diff --git a/pkg/bloombuild/builder/batch.go b/pkg/bloombuild/builder/batch.go
index 3ff52327b4c30..4b5fcdb00ad2e 100644
--- a/pkg/bloombuild/builder/batch.go
+++ b/pkg/bloombuild/builder/batch.go
@@ -168,9 +168,9 @@ func newBatchedBlockLoader(
}
// compiler checks
-var _ v1.Iterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
-var _ v1.CloseableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
-var _ v1.ResettableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
+var _ v1.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{}
+var _ v1.CloseableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{}
+var _ v1.ResettableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{}
// TODO(chaudum): testware
func newBlockLoadingIter(ctx context.Context, blocks []bloomshipper.BlockRef, fetcher FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier], batchSize int) *blockLoadingIter {
@@ -196,13 +196,13 @@ type blockLoadingIter struct {
// internals
initialized bool
err error
- iter v1.Iterator[*v1.SeriesWithBloom]
+ iter v1.Iterator[*v1.SeriesWithBlooms]
loader *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier]
loaded map[io.Closer]struct{}
}
// At implements v1.Iterator.
-func (i *blockLoadingIter) At() *v1.SeriesWithBloom {
+func (i *blockLoadingIter) At() *v1.SeriesWithBlooms {
if !i.initialized {
panic("iterator not initialized")
}
@@ -229,7 +229,7 @@ func (i *blockLoadingIter) init() {
i.overlapping = overlappingBlocksIter(i.inputs)
// set initial iter
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
// set "match all" filter function if not present
if i.filter == nil {
@@ -249,14 +249,14 @@ func (i *blockLoadingIter) loadNext() bool {
loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize)
filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
- iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
+ iters := make([]v1.PeekingIterator[*v1.SeriesWithBlooms], 0, len(blockRefs))
for filtered.Next() {
bq := filtered.At()
i.loaded[bq] = struct{}{}
iter, err := bq.SeriesIter()
if err != nil {
i.err = err
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
return false
}
iters = append(iters, iter)
@@ -264,7 +264,7 @@ func (i *blockLoadingIter) loadNext() bool {
if err := filtered.Err(); err != nil {
i.err = err
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
return false
}
@@ -278,12 +278,12 @@ func (i *blockLoadingIter) loadNext() bool {
// two overlapping blocks can conceivably have the same series, so we need to dedupe,
// preferring the one with the most chunks already indexed since we'll have
// to add fewer chunks to the bloom
- i.iter = v1.NewDedupingIter[*v1.SeriesWithBloom, *v1.SeriesWithBloom](
- func(a, b *v1.SeriesWithBloom) bool {
+ i.iter = v1.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms](
+ func(a, b *v1.SeriesWithBlooms) bool {
return a.Series.Fingerprint == b.Series.Fingerprint
},
- v1.Identity[*v1.SeriesWithBloom],
- func(a, b *v1.SeriesWithBloom) *v1.SeriesWithBloom {
+ v1.Identity[*v1.SeriesWithBlooms],
+ func(a, b *v1.SeriesWithBlooms) *v1.SeriesWithBlooms {
if len(a.Series.Chunks) > len(b.Series.Chunks) {
return a
}
@@ -294,7 +294,7 @@ func (i *blockLoadingIter) loadNext() bool {
return i.iter.Next()
}
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
i.err = i.overlapping.Err()
return false
}
diff --git a/pkg/bloombuild/builder/batch_test.go b/pkg/bloombuild/builder/batch_test.go
index b2616a37dc1ec..19de5354fb14b 100644
--- a/pkg/bloombuild/builder/batch_test.go
+++ b/pkg/bloombuild/builder/batch_test.go
@@ -5,6 +5,7 @@ import (
"errors"
"testing"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
@@ -208,3 +209,12 @@ func TestOverlappingBlocksIter(t *testing.T) {
})
}
}
+
+func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
+ bounds := v1.NewBounds(min, max)
+ return bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ Bounds: bounds,
+ },
+ }
+}
diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go
index 3a6d6ce4e1532..0ee9afbc71fd5 100644
--- a/pkg/bloombuild/builder/builder.go
+++ b/pkg/bloombuild/builder/builder.go
@@ -10,10 +10,14 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/google/uuid"
+ "github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/services"
+ "github.com/grafana/dskit/user"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
"github.com/grafana/loki/v3/pkg/bloombuild/common"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
@@ -57,14 +61,17 @@ func New(
) (*Builder, error) {
utillog.WarnExperimentalUse("Bloom Builder", logger)
+ builderID := uuid.NewString()
+ logger = log.With(logger, "builder_id", builderID)
+
tsdbStore, err := common.NewTSDBStores(schemaCfg, storeCfg, storageMetrics, logger)
if err != nil {
return nil, fmt.Errorf("error creating TSDB store: %w", err)
}
- metrics := NewMetrics(r, v1.NewMetrics(r))
+ metrics := NewMetrics(r)
b := &Builder{
- ID: uuid.NewString(),
+ ID: builderID,
cfg: cfg,
limits: limits,
metrics: metrics,
@@ -84,26 +91,59 @@ func (b *Builder) starting(_ context.Context) error {
}
func (b *Builder) stopping(_ error) error {
+ defer b.metrics.running.Set(0)
+
if b.client != nil {
+ // The gRPC server we use from dskit expects the orgID to be injected into the context when auth is enabled
+ // We won't actually use the orgID anywhere in this service, but we need to inject it to satisfy the server.
+ ctx, err := user.InjectIntoGRPCRequest(user.InjectOrgID(context.Background(), "fake"))
+ if err != nil {
+ level.Error(b.logger).Log("msg", "failed to inject orgID into context", "err", err)
+ return nil
+ }
+
req := &protos.NotifyBuilderShutdownRequest{
BuilderID: b.ID,
}
- if _, err := b.client.NotifyBuilderShutdown(context.Background(), req); err != nil {
+ if _, err := b.client.NotifyBuilderShutdown(ctx, req); err != nil {
level.Error(b.logger).Log("msg", "failed to notify planner about builder shutdown", "err", err)
}
}
- b.metrics.running.Set(0)
return nil
}
func (b *Builder) running(ctx context.Context) error {
+ // Retry if the connection to the planner is lost.
+ retries := backoff.New(ctx, b.cfg.BackoffConfig)
+ for retries.Ongoing() {
+ err := b.connectAndBuild(ctx)
+ if err == nil || errors.Is(err, context.Canceled) {
+ break
+ }
+
+ level.Error(b.logger).Log("msg", "failed to connect and build. Retrying", "err", err)
+ retries.Wait()
+ }
+
+ if err := retries.Err(); err != nil {
+ if errors.Is(err, context.Canceled) {
+ return nil
+ }
+ return fmt.Errorf("failed to connect and build: %w", err)
+ }
+
+ return nil
+}
+
+func (b *Builder) connectAndBuild(
+ ctx context.Context,
+) error {
opts, err := b.cfg.GrpcConfig.DialOption(nil, nil)
if err != nil {
return fmt.Errorf("failed to create grpc dial options: %w", err)
}
- // TODO: Wrap hereafter in retry logic
conn, err := grpc.DialContext(ctx, b.cfg.PlannerAddress, opts...)
if err != nil {
return fmt.Errorf("failed to dial bloom planner: %w", err)
@@ -111,6 +151,13 @@ func (b *Builder) running(ctx context.Context) error {
b.client = protos.NewPlannerForBuilderClient(conn)
+ // The gRPC server we use from dskit expects the orgID to be injected into the context when auth is enabled
+ // We won't actually use the orgID anywhere in this service, but we need to inject it to satisfy the server.
+ ctx, err = user.InjectIntoGRPCRequest(user.InjectOrgID(ctx, "fake"))
+ if err != nil {
+ return fmt.Errorf("failed to inject orgID into context: %w", err)
+ }
+
c, err := b.client.BuilderLoop(ctx)
if err != nil {
return fmt.Errorf("failed to start builder loop: %w", err)
@@ -131,11 +178,11 @@ func (b *Builder) builderLoop(c protos.PlannerForBuilder_BuilderLoopClient) erro
}
for b.State() == services.Running {
- // When the planner connection closes or the builder stops, the context
- // will be canceled and the loop will exit.
+ // When the planner connection closes, an EOF or "planner shutting down" error is returned.
+ // When the builder is shutting down, a gRPC context canceled error is returned.
protoTask, err := c.Recv()
if err != nil {
- if errors.Is(c.Context().Err(), context.Canceled) {
+ if status.Code(err) == codes.Canceled {
level.Debug(b.logger).Log("msg", "builder loop context canceled")
return nil
}
@@ -143,6 +190,8 @@ func (b *Builder) builderLoop(c protos.PlannerForBuilder_BuilderLoopClient) erro
return fmt.Errorf("failed to receive task from planner: %w", err)
}
+ logger := log.With(b.logger, "task", protoTask.Task.Id)
+
b.metrics.taskStarted.Inc()
start := time.Now()
status := statusSuccess
@@ -150,7 +199,7 @@ func (b *Builder) builderLoop(c protos.PlannerForBuilder_BuilderLoopClient) erro
newMetas, err := b.processTask(c.Context(), protoTask.Task)
if err != nil {
status = statusFailure
- level.Error(b.logger).Log("msg", "failed to process task", "err", err)
+ level.Error(logger).Log("msg", "failed to process task", "err", err)
}
b.metrics.taskCompleted.WithLabelValues(status).Inc()
@@ -178,13 +227,25 @@ func (b *Builder) notifyTaskCompletedToPlanner(
CreatedMetas: metas,
}
- // TODO: Implement retry
- if err := c.Send(&protos.BuilderToPlanner{
- BuilderID: b.ID,
- Result: *result.ToProtoTaskResult(),
- }); err != nil {
+ // We have a retry mechanism upper in the stack, but we add another one here
+ // to try our best to avoid losing the task result.
+ retries := backoff.New(c.Context(), b.cfg.BackoffConfig)
+ for retries.Ongoing() {
+ if err := c.Send(&protos.BuilderToPlanner{
+ BuilderID: b.ID,
+ Result: *result.ToProtoTaskResult(),
+ }); err == nil {
+ break
+ }
+
+ level.Error(b.logger).Log("msg", "failed to acknowledge task completion to planner. Retrying", "err", err)
+ retries.Wait()
+ }
+
+ if err := retries.Err(); err != nil {
return fmt.Errorf("failed to acknowledge task completion to planner: %w", err)
}
+
return nil
}
@@ -283,7 +344,7 @@ func (b *Builder) processTask(
blocksIter,
b.rwFn,
nil, // TODO(salvacorts): Pass reporter or remove when we address tracking
- b.metrics,
+ b.bloomStore.BloomMetrics(),
logger,
)
@@ -368,7 +429,7 @@ func (b *Builder) loadWorkForGap(
tenant string,
id tsdb.Identifier,
gap protos.GapWithBlocks,
-) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBloom], error) {
+) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBlooms], error) {
// load a series iterator for the gap
seriesItr, err := b.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.Bounds)
if err != nil {
diff --git a/pkg/bloombuild/builder/builder_test.go b/pkg/bloombuild/builder/builder_test.go
index 1c5a4bc1c45b2..b04a34fb6eeb2 100644
--- a/pkg/bloombuild/builder/builder_test.go
+++ b/pkg/bloombuild/builder/builder_test.go
@@ -4,27 +4,33 @@ import (
"context"
"fmt"
"net"
+ "sync"
"testing"
"time"
"github.com/go-kit/log"
+ "github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
"google.golang.org/grpc"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
"github.com/grafana/loki/v3/pkg/storage"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/local"
"github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
bloomshipperconfig "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"
"github.com/grafana/loki/v3/pkg/storage/types"
)
func Test_BuilderLoop(t *testing.T) {
logger := log.NewNopLogger()
+ //logger := log.NewLogfmtLogger(os.Stdout)
schemaCfg := config.SchemaConfig{
Configs: []config.PeriodConfig{
@@ -68,13 +74,21 @@ func Test_BuilderLoop(t *testing.T) {
server, err := newFakePlannerServer(tasks)
require.NoError(t, err)
+ // Start the server so the builder can connect and receive tasks.
+ server.Start()
+
limits := fakeLimits{}
cfg := Config{
PlannerAddress: server.Addr(),
+ BackoffConfig: backoff.Config{
+ MinBackoff: 1 * time.Second,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 5,
+ },
}
flagext.DefaultValues(&cfg.GrpcConfig)
- builder, err := New(cfg, limits, schemaCfg, storageCfg, storage.NewClientMetrics(), nil, nil, logger, prometheus.DefaultRegisterer)
+ builder, err := New(cfg, limits, schemaCfg, storageCfg, storage.NewClientMetrics(), nil, fakeBloomStore{}, logger, prometheus.DefaultRegisterer)
require.NoError(t, err)
t.Cleanup(func() {
err = services.StopAndAwaitTerminated(context.Background(), builder)
@@ -86,10 +100,28 @@ func Test_BuilderLoop(t *testing.T) {
err = services.StartAndAwaitRunning(context.Background(), builder)
require.NoError(t, err)
+ // Wait for at least one task to be processed.
require.Eventually(t, func() bool {
- return server.completedTasks == len(tasks)
+ return server.CompletedTasks() > 0
}, 5*time.Second, 100*time.Millisecond)
+ // Right after stop it so connection is broken, and builder will retry.
+ server.Stop()
+
+ // While the server is stopped, the builder should keep retrying to connect but no tasks should be processed.
+ // Note this is just a way to sleep while making sure no tasks are processed.
+ tasksProcessedSoFar := server.CompletedTasks()
+ require.Never(t, func() bool {
+ return server.CompletedTasks() > tasksProcessedSoFar
+ }, 5*time.Second, 500*time.Millisecond)
+
+ // Now we start the server so the builder can connect and receive tasks.
+ server.Start()
+
+ require.Eventually(t, func() bool {
+ return server.CompletedTasks() >= len(tasks)
+ }, 30*time.Second, 500*time.Millisecond)
+
err = services.StopAndAwaitTerminated(context.Background(), builder)
require.NoError(t, err)
@@ -98,44 +130,65 @@ func Test_BuilderLoop(t *testing.T) {
type fakePlannerServer struct {
tasks []*protos.ProtoTask
- completedTasks int
+ completedTasks atomic.Int64
shutdownCalled bool
- addr string
+ listenAddr string
grpcServer *grpc.Server
+ wg sync.WaitGroup
}
func newFakePlannerServer(tasks []*protos.ProtoTask) (*fakePlannerServer, error) {
- lis, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- return nil, err
- }
-
server := &fakePlannerServer{
- tasks: tasks,
- addr: lis.Addr().String(),
- grpcServer: grpc.NewServer(),
+ tasks: tasks,
}
- protos.RegisterPlannerForBuilderServer(server.grpcServer, server)
- go func() {
- if err := server.grpcServer.Serve(lis); err != nil {
- panic(err)
- }
- }()
-
return server, nil
}
func (f *fakePlannerServer) Addr() string {
- return f.addr
+ if f.listenAddr == "" {
+ panic("server not started")
+ }
+ return f.listenAddr
}
func (f *fakePlannerServer) Stop() {
- f.grpcServer.Stop()
+ if f.grpcServer != nil {
+ f.grpcServer.Stop()
+ }
+
+ f.wg.Wait()
+}
+
+func (f *fakePlannerServer) Start() {
+ f.Stop()
+
+ lisAddr := "localhost:0"
+ if f.listenAddr != "" {
+ // Reuse the same address if the server was stopped and started again.
+ lisAddr = f.listenAddr
+ }
+
+ lis, err := net.Listen("tcp", lisAddr)
+ if err != nil {
+ panic(err)
+ }
+ f.listenAddr = lis.Addr().String()
+
+ f.grpcServer = grpc.NewServer()
+ protos.RegisterPlannerForBuilderServer(f.grpcServer, f)
+ go func() {
+ if err := f.grpcServer.Serve(lis); err != nil {
+ panic(err)
+ }
+ }()
}
func (f *fakePlannerServer) BuilderLoop(srv protos.PlannerForBuilder_BuilderLoopServer) error {
+ f.wg.Add(1)
+ defer f.wg.Done()
+
// Receive Ready
if _, err := srv.Recv(); err != nil {
return fmt.Errorf("failed to receive ready: %w", err)
@@ -148,7 +201,8 @@ func (f *fakePlannerServer) BuilderLoop(srv protos.PlannerForBuilder_BuilderLoop
if _, err := srv.Recv(); err != nil {
return fmt.Errorf("failed to receive task response: %w", err)
}
- f.completedTasks++
+ time.Sleep(10 * time.Millisecond) // Simulate task processing time to add some latency.
+ f.completedTasks.Inc()
}
// No more tasks. Wait until shutdown.
@@ -156,6 +210,10 @@ func (f *fakePlannerServer) BuilderLoop(srv protos.PlannerForBuilder_BuilderLoop
return nil
}
+func (f *fakePlannerServer) CompletedTasks() int {
+ return int(f.completedTasks.Load())
+}
+
func (f *fakePlannerServer) NotifyBuilderShutdown(_ context.Context, _ *protos.NotifyBuilderShutdownRequest) (*protos.NotifyBuilderShutdownResponse, error) {
f.shutdownCalled = true
return &protos.NotifyBuilderShutdownResponse{}, nil
@@ -184,6 +242,14 @@ func (f fakeLimits) BloomCompactorMaxBloomSize(_ string) int {
panic("implement me")
}
+type fakeBloomStore struct {
+ bloomshipper.Store
+}
+
+func (f fakeBloomStore) BloomMetrics() *v1.Metrics {
+ return nil
+}
+
func parseDayTime(s string) config.DayTime {
t, err := time.Parse("2006-01-02", s)
if err != nil {
diff --git a/pkg/bloombuild/builder/config.go b/pkg/bloombuild/builder/config.go
index 25cefa4215224..d0c553104b09e 100644
--- a/pkg/bloombuild/builder/config.go
+++ b/pkg/bloombuild/builder/config.go
@@ -4,6 +4,7 @@ import (
"flag"
"fmt"
+ "github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/grpcclient"
)
@@ -11,12 +12,14 @@ import (
type Config struct {
GrpcConfig grpcclient.Config `yaml:"grpc_config"`
PlannerAddress string `yaml:"planner_address"`
+ BackoffConfig backoff.Config `yaml:"backoff_config"`
}
// RegisterFlagsWithPrefix registers flags for the bloom-planner configuration.
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.PlannerAddress, prefix+".planner-address", "", "Hostname (and port) of the bloom planner")
cfg.GrpcConfig.RegisterFlagsWithPrefix(prefix+".grpc", f)
+ cfg.BackoffConfig.RegisterFlagsWithPrefix(prefix+".backoff", f)
}
func (cfg *Config) Validate() error {
diff --git a/pkg/bloombuild/builder/metrics.go b/pkg/bloombuild/builder/metrics.go
index 658d5a2c43acb..f94d92353ee1f 100644
--- a/pkg/bloombuild/builder/metrics.go
+++ b/pkg/bloombuild/builder/metrics.go
@@ -3,8 +3,6 @@ package builder
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
-
- v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
)
const (
@@ -16,8 +14,7 @@ const (
)
type Metrics struct {
- bloomMetrics *v1.Metrics
- running prometheus.Gauge
+ running prometheus.Gauge
taskStarted prometheus.Counter
taskCompleted *prometheus.CounterVec
@@ -33,9 +30,8 @@ type Metrics struct {
chunkSize prometheus.Histogram
}
-func NewMetrics(r prometheus.Registerer, bloomMetrics *v1.Metrics) *Metrics {
+func NewMetrics(r prometheus.Registerer) *Metrics {
return &Metrics{
- bloomMetrics: bloomMetrics,
running: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
diff --git a/pkg/bloombuild/builder/spec.go b/pkg/bloombuild/builder/spec.go
index a56918b0344de..a031a69c9812b 100644
--- a/pkg/bloombuild/builder/spec.go
+++ b/pkg/bloombuild/builder/spec.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
- "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@@ -45,12 +44,12 @@ type SimpleBloomGenerator struct {
userID string
store v1.Iterator[*v1.Series]
chunkLoader ChunkLoader
- blocksIter v1.ResettableIterator[*v1.SeriesWithBloom]
+ blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms]
// options to build blocks with
opts v1.BlockOptions
- metrics *Metrics
+ metrics *v1.Metrics
logger log.Logger
readWriterFn func() (v1.BlockWriter, v1.BlockReader)
@@ -68,10 +67,10 @@ func NewSimpleBloomGenerator(
opts v1.BlockOptions,
store v1.Iterator[*v1.Series],
chunkLoader ChunkLoader,
- blocksIter v1.ResettableIterator[*v1.SeriesWithBloom],
+ blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms],
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
reporter func(model.Fingerprint),
- metrics *Metrics,
+ metrics *v1.Metrics,
logger log.Logger,
) *SimpleBloomGenerator {
return &SimpleBloomGenerator{
@@ -93,49 +92,35 @@ func NewSimpleBloomGenerator(
opts.Schema.NGramLen(),
opts.Schema.NGramSkip(),
int(opts.UnencodedBlockOptions.MaxBloomSizeBytes),
- metrics.bloomMetrics,
+ metrics,
),
}
}
-func (s *SimpleBloomGenerator) populator(ctx context.Context) func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) {
- return func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) {
- start := time.Now()
+func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorFunc {
+ return func(
+ series *v1.Series,
+ srcBlooms v1.SizedIterator[*v1.Bloom],
+ toAdd v1.ChunkRefs,
+ ch chan *v1.BloomCreation,
+ ) {
level.Debug(s.logger).Log(
"msg", "populating bloom filter",
"stage", "before",
"fp", series.Fingerprint,
"chunks", len(series.Chunks),
)
- chunkItersWithFP, err := s.chunkLoader.Load(ctx, s.userID, series)
- if err != nil {
- return 0, false, errors.Wrapf(err, "failed to load chunks for series: %+v", series)
- }
-
- bytesAdded, skip, err := s.tokenizer.Populate(
- &v1.SeriesWithBloom{
- Series: series,
- Bloom: bloom,
- },
- chunkItersWithFP.itr,
- )
+ chunkItersWithFP := s.chunkLoader.Load(ctx, s.userID, &v1.Series{
+ Fingerprint: series.Fingerprint,
+ Chunks: toAdd,
+ })
- level.Debug(s.logger).Log(
- "msg", "populating bloom filter",
- "stage", "after",
- "fp", series.Fingerprint,
- "chunks", len(series.Chunks),
- "series_bytes", bytesAdded,
- "duration", time.Since(start),
- "err", err,
- )
+ s.tokenizer.Populate(srcBlooms, chunkItersWithFP.itr, ch)
if s.reporter != nil {
s.reporter(series.Fingerprint)
}
- return bytesAdded, skip, err
}
-
}
func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIterator {
@@ -178,11 +163,11 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIt
type LazyBlockBuilderIterator struct {
ctx context.Context
opts v1.BlockOptions
- metrics *Metrics
- populate func(*v1.Series, *v1.Bloom) (int, bool, error)
+ metrics *v1.Metrics
+ populate v1.BloomPopulatorFunc
readWriterFn func() (v1.BlockWriter, v1.BlockReader)
series v1.PeekingIterator[*v1.Series]
- blocks v1.ResettableIterator[*v1.SeriesWithBloom]
+ blocks v1.ResettableIterator[*v1.SeriesWithBlooms]
bytesAdded int
curr *v1.Block
@@ -192,11 +177,11 @@ type LazyBlockBuilderIterator struct {
func NewLazyBlockBuilderIterator(
ctx context.Context,
opts v1.BlockOptions,
- metrics *Metrics,
- populate func(*v1.Series, *v1.Bloom) (int, bool, error),
+ metrics *v1.Metrics,
+ populate v1.BloomPopulatorFunc,
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
series v1.PeekingIterator[*v1.Series],
- blocks v1.ResettableIterator[*v1.SeriesWithBloom],
+ blocks v1.ResettableIterator[*v1.SeriesWithBlooms],
) *LazyBlockBuilderIterator {
return &LazyBlockBuilderIterator{
ctx: ctx,
@@ -229,7 +214,7 @@ func (b *LazyBlockBuilderIterator) Next() bool {
return false
}
- mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate, b.metrics.bloomMetrics)
+ mergeBuilder := v1.NewMergeBuilder(b.blocks, b.series, b.populate, b.metrics)
writer, reader := b.readWriterFn()
blockBuilder, err := v1.NewBlockBuilder(b.opts, writer)
if err != nil {
@@ -244,7 +229,7 @@ func (b *LazyBlockBuilderIterator) Next() bool {
return false
}
- b.curr = v1.NewBlock(reader, b.metrics.bloomMetrics)
+ b.curr = v1.NewBlock(reader, b.metrics)
return true
}
@@ -270,7 +255,7 @@ type ChunkItersByFingerprint struct {
// ChunkLoader loads chunks from a store
type ChunkLoader interface {
- Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error)
+ Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint
}
// StoreChunkLoader loads chunks from a store
@@ -286,7 +271,7 @@ func NewStoreChunkLoader(fetcherProvider stores.ChunkFetcherProvider, metrics *M
}
}
-func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error) {
+func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint {
// NB(owen-d): This is probably unnecessary as we should only have one fetcher
// because we'll only be working on a single index period at a time, but this should protect
// us in the case of refactoring/changing this and likely isn't a perf bottleneck.
@@ -317,5 +302,5 @@ func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.S
return &ChunkItersByFingerprint{
fp: series.Fingerprint,
itr: newBatchedChunkLoader(ctx, fetchers, inputs, s.metrics, batchedLoaderDefaultBatchSize),
- }, nil
+ }
}
diff --git a/pkg/bloombuild/builder/spec_test.go b/pkg/bloombuild/builder/spec_test.go
index 40225dc45865b..0e3f98c907799 100644
--- a/pkg/bloombuild/builder/spec_test.go
+++ b/pkg/bloombuild/builder/spec_test.go
@@ -13,21 +13,22 @@ import (
"github.com/grafana/loki/v3/pkg/chunkenc"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
)
-func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) {
+func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) {
return blocksFromSchemaWithRange(t, n, options, 0, 0xffff)
}
// splits 100 series across `n` non-overlapping blocks.
// uses options to build blocks with.
-func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) {
+func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) {
if 100%n != 0 {
panic("100 series must be evenly divisible by n")
}
numSeries := 100
- data, _ = v1.MkBasicSeriesWithBlooms(numSeries, 0, fromFP, throughFp, 0, 10000)
+ data, _ = v1.MkBasicSeriesWithBlooms(numSeries, fromFP, throughFp, 0, 10000)
seriesPerBlock := numSeries / n
@@ -46,7 +47,7 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro
minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock
- itr := v1.NewSliceIter[v1.SeriesWithBloom](data[minIdx:maxIdx])
+ itr := v1.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx])
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
@@ -62,11 +63,11 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro
// doesn't actually load any chunks
type dummyChunkLoader struct{}
-func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) (*ChunkItersByFingerprint, error) {
+func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) *ChunkItersByFingerprint {
return &ChunkItersByFingerprint{
fp: series.Fingerprint,
itr: v1.NewEmptyIter[v1.ChunkRefWithIter](),
- }, nil
+ }
}
func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator {
@@ -74,7 +75,7 @@ func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Ser
for i, b := range blocks {
bqs = append(bqs, &bloomshipper.CloseableBlockQuerier{
BlockRef: refs[i],
- BlockQuerier: v1.NewBlockQuerier(b, false, v1.DefaultMaxPageSize),
+ BlockQuerier: v1.NewBlockQuerier(b, &mempool.SimpleHeapAllocator{}, v1.DefaultMaxPageSize),
})
}
@@ -106,7 +107,7 @@ func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Ser
return v1.NewMemoryBlockWriter(indexBuf, bloomsBuf), v1.NewByteReader(indexBuf, bloomsBuf)
},
nil,
- NewMetrics(nil, v1.NewMetrics(nil)),
+ v1.NewMetrics(nil),
log.NewNopLogger(),
)
}
@@ -132,9 +133,9 @@ func TestSimpleBloomGenerator(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s/%s", tc.desc, enc), func(t *testing.T) {
sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff)
- storeItr := v1.NewMapIter[v1.SeriesWithBloom, *v1.Series](
- v1.NewSliceIter[v1.SeriesWithBloom](data),
- func(swb v1.SeriesWithBloom) *v1.Series {
+ storeItr := v1.NewMapIter[v1.SeriesWithBlooms, *v1.Series](
+ v1.NewSliceIter[v1.SeriesWithBlooms](data),
+ func(swb v1.SeriesWithBlooms) *v1.Series {
return swb.Series
},
)
@@ -150,9 +151,9 @@ func TestSimpleBloomGenerator(t *testing.T) {
// Check all the input series are present in the output blocks.
expectedRefs := v1.PointerSlice(data)
- outputRefs := make([]*v1.SeriesWithBloom, 0, len(data))
+ outputRefs := make([]*v1.SeriesWithBlooms, 0, len(data))
for _, block := range outputBlocks {
- bq := v1.NewBlockQuerier(block, false, v1.DefaultMaxPageSize)
+ bq := v1.NewBlockQuerier(block, &mempool.SimpleHeapAllocator{}, v1.DefaultMaxPageSize).Iter()
for bq.Next() {
outputRefs = append(outputRefs, bq.At())
}
@@ -164,13 +165,5 @@ func TestSimpleBloomGenerator(t *testing.T) {
})
}
}
-}
-func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
- bounds := v1.NewBounds(min, max)
- return bloomshipper.BlockRef{
- Ref: bloomshipper.Ref{
- Bounds: bounds,
- },
- }
}
diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go
index 3f68ab5206303..3f0fe684ab24b 100644
--- a/pkg/bloombuild/planner/metrics.go
+++ b/pkg/bloombuild/planner/metrics.go
@@ -26,13 +26,19 @@ type Metrics struct {
inflightRequests prometheus.Summary
tasksRequeued prometheus.Counter
taskLost prometheus.Counter
- tasksFailed prometheus.Counter
- buildStarted prometheus.Counter
- buildCompleted *prometheus.CounterVec
- buildTime *prometheus.HistogramVec
+ planningTime prometheus.Histogram
+ buildStarted prometheus.Counter
+ buildCompleted *prometheus.CounterVec
+ buildTime *prometheus.HistogramVec
+ buildLastSuccess prometheus.Gauge
- tenantsDiscovered prometheus.Counter
+ blocksDeleted prometheus.Counter
+ metasDeleted prometheus.Counter
+
+ tenantsDiscovered prometheus.Counter
+ tenantTasksPlanned *prometheus.GaugeVec
+ tenantTasksCompleted *prometheus.GaugeVec
}
func NewMetrics(
@@ -80,13 +86,15 @@ func NewMetrics(
Name: "tasks_lost_total",
Help: "Total number of tasks lost due to not being picked up by a builder and failed to be requeued.",
}),
- tasksFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{
+
+ planningTime: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
- Name: "tasks_failed_total",
- Help: "Total number of tasks that failed to be processed by builders (after the configured retries).",
+ Name: "planning_time_seconds",
+ Help: "Time spent planning a build cycle.",
+ // 1s --> 1h (steps of 1 minute)
+ Buckets: prometheus.LinearBuckets(1, 60, 60),
}),
-
buildStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
@@ -104,8 +112,33 @@ func NewMetrics(
Subsystem: metricsSubsystem,
Name: "build_time_seconds",
Help: "Time spent during a builds cycle.",
- Buckets: prometheus.DefBuckets,
+ // Buckets in seconds:
+ Buckets: append(
+ // 1s --> 1h (steps of 10 minutes)
+ prometheus.LinearBuckets(1, 600, 6),
+ // 1h --> 24h (steps of 1 hour)
+ prometheus.LinearBuckets(3600, 3600, 24)...,
+ ),
}, []string{"status"}),
+ buildLastSuccess: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "build_last_successful_run_timestamp_seconds",
+ Help: "Unix timestamp of the last successful build cycle.",
+ }),
+
+ blocksDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "blocks_deleted_total",
+ Help: "Number of blocks deleted",
+ }),
+ metasDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "metas_deleted_total",
+ Help: "Number of metas deleted",
+ }),
tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
@@ -113,6 +146,18 @@ func NewMetrics(
Name: "tenants_discovered_total",
Help: "Number of tenants discovered during the current build iteration",
}),
+ tenantTasksPlanned: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "tenant_tasks_planned",
+ Help: "Number of tasks planned for a tenant during the current build iteration.",
+ }, []string{"tenant"}),
+ tenantTasksCompleted: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "tenant_tasks_completed",
+ Help: "Number of tasks completed for a tenant during the current build iteration.",
+ }, []string{"tenant", "status"}),
}
}
diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go
index 287a859745f5a..ccbd462aaabe0 100644
--- a/pkg/bloombuild/planner/planner.go
+++ b/pkg/bloombuild/planner/planner.go
@@ -3,6 +3,7 @@ package planner
import (
"context"
"fmt"
+ "math"
"sort"
"sync"
"time"
@@ -13,6 +14,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
+ "go.uber.org/atomic"
"github.com/grafana/loki/v3/pkg/bloombuild/common"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
@@ -39,7 +41,7 @@ type Planner struct {
schemaCfg config.SchemaConfig
tsdbStore common.TSDBStore
- bloomStore bloomshipper.Store
+ bloomStore bloomshipper.StoreBase
tasksQueue *queue.RequestQueue
activeUsers *util.ActiveUsersCleanupService
@@ -56,7 +58,7 @@ func New(
schemaCfg config.SchemaConfig,
storeCfg storage.Config,
storageMetrics storage.ClientMetrics,
- bloomStore bloomshipper.Store,
+ bloomStore bloomshipper.StoreBase,
logger log.Logger,
r prometheus.Registerer,
) (*Planner, error) {
@@ -121,6 +123,8 @@ func (p *Planner) stopping(_ error) error {
}
func (p *Planner) running(ctx context.Context) error {
+ go p.trackInflightRequests(ctx)
+
// run once at beginning
if err := p.runOne(ctx); err != nil {
level.Error(p.logger).Log("msg", "bloom build iteration failed for the first time", "err", err)
@@ -129,9 +133,6 @@ func (p *Planner) running(ctx context.Context) error {
planningTicker := time.NewTicker(p.cfg.PlanningInterval)
defer planningTicker.Stop()
- inflightTasksTicker := time.NewTicker(250 * time.Millisecond)
- defer inflightTasksTicker.Stop()
-
for {
select {
case <-ctx.Done():
@@ -148,6 +149,19 @@ func (p *Planner) running(ctx context.Context) error {
if err := p.runOne(ctx); err != nil {
level.Error(p.logger).Log("msg", "bloom build iteration failed", "err", err)
}
+ }
+ }
+}
+
+func (p *Planner) trackInflightRequests(ctx context.Context) {
+ inflightTasksTicker := time.NewTicker(250 * time.Millisecond)
+ defer inflightTasksTicker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ // We just return. Error handling and logging is done in the main loop (running method).
+ return
case <-inflightTasksTicker.C:
inflight := p.totalPendingTasks()
@@ -156,6 +170,17 @@ func (p *Planner) running(ctx context.Context) error {
}
}
+type tenantTableTaskResults struct {
+ tasksToWait int
+ originalMetas []bloomshipper.Meta
+ resultsCh chan *protos.TaskResult
+}
+
+type tenantTable struct {
+ table config.DayTable
+ tenant string
+}
+
func (p *Planner) runOne(ctx context.Context) error {
var (
start = time.Now()
@@ -164,6 +189,10 @@ func (p *Planner) runOne(ctx context.Context) error {
defer func() {
p.metrics.buildCompleted.WithLabelValues(status).Inc()
p.metrics.buildTime.WithLabelValues(status).Observe(time.Since(start).Seconds())
+
+ if status == statusSuccess {
+ p.metrics.buildLastSuccess.SetToCurrentTime()
+ }
}()
p.metrics.buildStarted.Inc()
@@ -171,47 +200,303 @@ func (p *Planner) runOne(ctx context.Context) error {
tables := p.tables(time.Now())
level.Debug(p.logger).Log("msg", "loaded tables", "tables", tables.TotalDays())
- work, err := p.loadWork(ctx, tables)
+ work, err := p.loadTenantWork(ctx, tables)
if err != nil {
return fmt.Errorf("error loading work: %w", err)
}
+ // For deletion, we need to aggregate the results for each table and tenant tuple
+ // We cannot delete the returned tombstoned metas as soon as a task finishes since
+ // other tasks may still be using the now tombstoned metas
+ tasksResultForTenantTable := make(map[tenantTable]tenantTableTaskResults)
var totalTasks int
- for _, w := range work {
- logger := log.With(p.logger, "tenant", w.tenant, "table", w.table.Addr(), "ownership", w.ownershipRange.String())
- gaps, err := p.findGapsForBounds(ctx, w.tenant, w.table, w.ownershipRange)
- if err != nil {
- level.Error(logger).Log("msg", "error finding gaps", "err", err)
+ for table, tenants := range work {
+ for tenant, ownershipRanges := range tenants {
+ logger := log.With(p.logger, "tenant", tenant, "table", table.Addr())
+ tt := tenantTable{
+ tenant: tenant,
+ table: table,
+ }
+
+ tasks, existingMetas, err := p.computeTasks(ctx, table, tenant, ownershipRanges)
+ if err != nil {
+ level.Error(logger).Log("msg", "error computing tasks", "err", err)
+ continue
+ }
+ level.Debug(logger).Log("msg", "computed tasks", "tasks", len(tasks), "existingMetas", len(existingMetas))
+
+ var tenantTableEnqueuedTasks int
+ resultsCh := make(chan *protos.TaskResult, len(tasks))
+
+ now := time.Now()
+ for _, task := range tasks {
+ queueTask := NewQueueTask(ctx, now, task, resultsCh)
+ if err := p.enqueueTask(queueTask); err != nil {
+ level.Error(logger).Log("msg", "error enqueuing task", "err", err)
+ continue
+ }
+
+ totalTasks++
+ tenantTableEnqueuedTasks++
+ }
+
+ p.metrics.tenantTasksPlanned.WithLabelValues(tt.tenant).Add(float64(tenantTableEnqueuedTasks))
+ tasksResultForTenantTable[tt] = tenantTableTaskResults{
+ tasksToWait: tenantTableEnqueuedTasks,
+ originalMetas: existingMetas,
+ resultsCh: resultsCh,
+ }
+
+ level.Debug(logger).Log("msg", "enqueued tasks", "tasks", tenantTableEnqueuedTasks)
+ }
+ }
+
+ p.metrics.planningTime.Observe(time.Since(start).Seconds())
+ level.Debug(p.logger).Log(
+ "msg", "planning completed",
+ "tenantTables", len(tasksResultForTenantTable),
+ "tasks", totalTasks,
+ "time", time.Since(start).Seconds(),
+ )
+
+ // Create a goroutine to process the results for each table tenant tuple
+ // TODO(salvacorts): This may end up creating too many goroutines.
+ // Create a pool of workers to process table-tenant tuples.
+ var tasksSucceed atomic.Int64
+ var wg sync.WaitGroup
+ for tt, results := range tasksResultForTenantTable {
+ if results.tasksToWait == 0 {
+ // No tasks enqueued for this tenant-table tuple, skip processing
continue
}
- now := time.Now()
- for _, gap := range gaps {
- totalTasks++
+ wg.Add(1)
+ go func(table config.DayTable, tenant string, results tenantTableTaskResults) {
+ defer wg.Done()
- task := NewTask(
- ctx, now,
- protos.NewTask(w.table, w.tenant, w.ownershipRange, gap.tsdb, gap.gaps),
+ logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant)
+
+ nSucceed, err := p.processTenantTaskResults(
+ ctx, table, tenant,
+ results.originalMetas, results.tasksToWait, results.resultsCh,
)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to process tenant task results", "err", err)
+ }
- if err := p.enqueueTask(task); err != nil {
- level.Error(logger).Log("msg", "error enqueuing task", "err", err)
- continue
+ if nSucceed != results.tasksToWait {
+ level.Error(logger).Log(
+ "msg", "not all tasks succeeded for tenant table",
+ "tasks", results.tasksToWait,
+ "tasksSucceed", nSucceed,
+ "tasksFailed", results.tasksToWait-nSucceed,
+ )
}
- }
+ tasksSucceed.Add(int64(nSucceed))
+ }(tt.table, tt.tenant, results)
}
- level.Debug(p.logger).Log("msg", "planning completed", "tasks", totalTasks)
+ level.Debug(p.logger).Log(
+ "msg", "waiting for all tasks to be completed",
+ "tenantTables", len(tasksResultForTenantTable),
+ "tasks", totalTasks,
+ )
+ wg.Wait()
status = statusSuccess
level.Info(p.logger).Log(
"msg", "bloom build iteration completed",
+ "tasks", totalTasks,
+ "tasksSucceed", tasksSucceed.Load(),
"duration", time.Since(start).Seconds(),
)
return nil
}
+// computeTasks computes the tasks for a given table and tenant and ownership range.
+// It returns the tasks to be executed and the metas that are existing relevant for the ownership range.
+func (p *Planner) computeTasks(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ ownershipRanges []v1.FingerprintBounds,
+) ([]*protos.Task, []bloomshipper.Meta, error) {
+ var tasks []*protos.Task
+ logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant)
+
+ // Fetch source metas to be used in both build and cleanup of out-of-date metas+blooms
+ metas, err := p.bloomStore.FetchMetas(
+ ctx,
+ bloomshipper.MetaSearchParams{
+ TenantID: tenant,
+ Interval: bloomshipper.NewInterval(table.Bounds()),
+ Keyspace: v1.NewBounds(0, math.MaxUint64),
+ },
+ )
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to get metas: %w", err)
+ }
+
+ for _, ownershipRange := range ownershipRanges {
+ logger := log.With(logger, "ownership", ownershipRange.String())
+
+ // Filter only the metas that overlap in the ownership range
+ metasInBounds := bloomshipper.FilterMetasOverlappingBounds(metas, ownershipRange)
+
+ // Find gaps in the TSDBs for this tenant/table
+ gaps, err := p.findOutdatedGaps(ctx, tenant, table, ownershipRange, metasInBounds, logger)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to find outdated gaps", "err", err)
+ continue
+ }
+ if len(gaps) == 0 {
+ continue
+ }
+
+ for _, gap := range gaps {
+ tasks = append(tasks, protos.NewTask(table, tenant, ownershipRange, gap.tsdb, gap.gaps))
+ }
+ }
+
+ return tasks, metas, nil
+}
+
+func (p *Planner) processTenantTaskResults(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ originalMetas []bloomshipper.Meta,
+ totalTasks int,
+ resultsCh <-chan *protos.TaskResult,
+) (int, error) {
+ logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant)
+ level.Debug(logger).Log("msg", "waiting for all tasks to be completed", "tasks", totalTasks)
+
+ var tasksSucceed int
+ newMetas := make([]bloomshipper.Meta, 0, totalTasks)
+ for i := 0; i < totalTasks; i++ {
+ select {
+ case <-ctx.Done():
+ if err := ctx.Err(); err != nil && !errors.Is(err, context.Canceled) {
+ level.Error(logger).Log("msg", "planner context done with error", "err", err)
+ return tasksSucceed, err
+ }
+
+ // No error or context canceled, just return
+ level.Debug(logger).Log("msg", "context done while waiting for task results")
+ return tasksSucceed, nil
+ case result := <-resultsCh:
+ if result == nil {
+ p.metrics.tenantTasksCompleted.WithLabelValues(tenant, statusFailure).Inc()
+ level.Error(logger).Log("msg", "received nil task result")
+ continue
+ }
+ if result.Error != nil {
+ p.metrics.tenantTasksCompleted.WithLabelValues(tenant, statusFailure).Inc()
+ level.Error(logger).Log(
+ "msg", "task failed",
+ "err", result.Error,
+ "task", result.TaskID,
+ )
+ continue
+ }
+
+ p.metrics.tenantTasksCompleted.WithLabelValues(tenant, statusSuccess).Inc()
+ newMetas = append(newMetas, result.CreatedMetas...)
+ tasksSucceed++
+ }
+ }
+
+ level.Debug(logger).Log(
+ "msg", "all tasks completed for tenant table",
+ "tasks", totalTasks,
+ "tasksSucceed", tasksSucceed,
+ "originalMetas", len(originalMetas),
+ "newMetas", len(newMetas),
+ )
+
+ if len(newMetas) == 0 {
+ // No new metas were created, nothing to delete
+ // Note: this would only happen if all tasks failed
+ return tasksSucceed, nil
+ }
+
+ combined := append(originalMetas, newMetas...)
+ outdated := outdatedMetas(combined)
+ if len(outdated) == 0 {
+ level.Debug(logger).Log("msg", "no outdated metas found")
+ return tasksSucceed, nil
+ }
+
+ level.Debug(logger).Log("msg", "found outdated metas", "outdated", len(outdated))
+ if err := p.deleteOutdatedMetasAndBlocks(ctx, table, tenant, outdated); err != nil {
+ return 0, fmt.Errorf("failed to delete outdated metas: %w", err)
+ }
+
+ return tasksSucceed, nil
+}
+
+func (p *Planner) deleteOutdatedMetasAndBlocks(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ metas []bloomshipper.Meta,
+) error {
+ logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant)
+
+ client, err := p.bloomStore.Client(table.ModelTime())
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to get client", "err", err)
+ return errors.Wrap(err, "failed to get client")
+ }
+
+ var (
+ deletedMetas int
+ deletedBlocks int
+ )
+ defer func() {
+ p.metrics.metasDeleted.Add(float64(deletedMetas))
+ p.metrics.blocksDeleted.Add(float64(deletedBlocks))
+ }()
+
+ for _, meta := range metas {
+ for _, block := range meta.Blocks {
+ if err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}); err != nil {
+ if client.IsObjectNotFoundErr(err) {
+ level.Debug(logger).Log("msg", "block not found while attempting delete, continuing", "block", block.String())
+ } else {
+ level.Error(logger).Log("msg", "failed to delete block", "err", err, "block", block.String())
+ return errors.Wrap(err, "failed to delete block")
+ }
+ }
+
+ deletedBlocks++
+ level.Debug(logger).Log("msg", "removed outdated block", "block", block.String())
+ }
+
+ err = client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef})
+ if err != nil {
+ if client.IsObjectNotFoundErr(err) {
+ level.Debug(logger).Log("msg", "meta not found while attempting delete, continuing", "meta", meta.MetaRef.String())
+ } else {
+ level.Error(logger).Log("msg", "failed to delete meta", "err", err, "meta", meta.MetaRef.String())
+ return errors.Wrap(err, "failed to delete meta")
+ }
+ }
+ deletedMetas++
+ level.Debug(logger).Log("msg", "removed outdated meta", "meta", meta.MetaRef.String())
+ }
+
+ level.Debug(logger).Log(
+ "msg", "deleted outdated metas and blocks",
+ "metas", deletedMetas,
+ "blocks", deletedBlocks,
+ )
+
+ return nil
+}
+
func (p *Planner) tables(ts time.Time) *dayRangeIterator {
// adjust the minimum by one to make it inclusive, which is more intuitive
// for a configuration variable
@@ -228,21 +513,15 @@ func (p *Planner) tables(ts time.Time) *dayRangeIterator {
return newDayRangeIterator(fromDay, throughDay, p.schemaCfg)
}
-type tenantTableRange struct {
- tenant string
- table config.DayTable
- ownershipRange v1.FingerprintBounds
+type work map[config.DayTable]map[string][]v1.FingerprintBounds
- // TODO: Add tracking
- //finished bool
- //queueTime, startTime, endTime time.Time
-}
-
-func (p *Planner) loadWork(
+// loadTenantWork loads the work for each tenant and table tuple.
+// work is the list of fingerprint ranges that need to be indexed in bloom filters.
+func (p *Planner) loadTenantWork(
ctx context.Context,
tables *dayRangeIterator,
-) ([]tenantTableRange, error) {
- var work []tenantTableRange
+) (work, error) {
+ tenantTableWork := make(map[config.DayTable]map[string][]v1.FingerprintBounds, tables.TotalDays())
for tables.Next() && tables.Err() == nil && ctx.Err() == nil {
table := tables.At()
@@ -252,26 +531,33 @@ func (p *Planner) loadWork(
if err != nil {
return nil, fmt.Errorf("error loading tenants: %w", err)
}
- level.Debug(p.logger).Log("msg", "loaded tenants", "table", table, "tenants", tenants.Len())
+ level.Debug(p.logger).Log("msg", "loaded tenants", "table", table, "tenants", tenants.Remaining())
+
+ // If this is the first this we see this table, initialize the map
+ if tenantTableWork[table] == nil {
+ tenantTableWork[table] = make(map[string][]v1.FingerprintBounds, tenants.Remaining())
+ }
for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil {
p.metrics.tenantsDiscovered.Inc()
tenant := tenants.At()
if !p.limits.BloomCreationEnabled(tenant) {
+ level.Debug(p.logger).Log("msg", "bloom creation disabled for tenant", "tenant", tenant)
continue
}
splitFactor := p.limits.BloomSplitSeriesKeyspaceBy(tenant)
bounds := SplitFingerprintKeyspaceByFactor(splitFactor)
- for _, bounds := range bounds {
- work = append(work, tenantTableRange{
- tenant: tenant,
- table: table,
- ownershipRange: bounds,
- })
- }
+ tenantTableWork[table][tenant] = bounds
+
+ // Reset progress tracking metrics for this tenant
+ // NOTE(salvacorts): We will reset them multiple times for the same tenant, for each table, but it's not a big deal.
+ // Alternatively, we can use a Counter instead of a Gauge, but I think a Gauge is easier to reason about.
+ p.metrics.tenantTasksPlanned.WithLabelValues(tenant).Set(0)
+ p.metrics.tenantTasksCompleted.WithLabelValues(tenant, statusSuccess).Set(0)
+ p.metrics.tenantTasksCompleted.WithLabelValues(tenant, statusFailure).Set(0)
level.Debug(p.logger).Log("msg", "loading work for tenant", "table", table, "tenant", tenant, "splitFactor", splitFactor)
}
@@ -286,7 +572,7 @@ func (p *Planner) loadWork(
return nil, fmt.Errorf("error iterating tables: %w", err)
}
- return work, ctx.Err()
+ return tenantTableWork, ctx.Err()
}
func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*v1.SliceIter[string], error) {
@@ -298,47 +584,6 @@ func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*v1.Slice
return v1.NewSliceIter(tenants), nil
}
-/*
-Planning works as follows, split across many functions for clarity:
- 1. Fetch all meta.jsons for the given tenant and table which overlap the ownership range of this compactor.
- 2. Load current TSDBs for this tenant/table.
- 3. For each live TSDB (there should be only 1, but this works with multiple), find any gaps
- (fingerprint ranges) which are not up-to-date, determined by checking other meta.json files and comparing
- the TSDBs they were generated from as well as their ownership ranges.
-*/
-func (p *Planner) findGapsForBounds(
- ctx context.Context,
- tenant string,
- table config.DayTable,
- ownershipRange v1.FingerprintBounds,
-) ([]blockPlan, error) {
- logger := log.With(p.logger, "org_id", tenant, "table", table.Addr(), "ownership", ownershipRange.String())
-
- // Fetch source metas to be used in both build and cleanup of out-of-date metas+blooms
- metas, err := p.bloomStore.FetchMetas(
- ctx,
- bloomshipper.MetaSearchParams{
- TenantID: tenant,
- Interval: bloomshipper.NewInterval(table.Bounds()),
- Keyspace: ownershipRange,
- },
- )
- if err != nil {
- level.Error(logger).Log("msg", "failed to get metas", "err", err)
- return nil, fmt.Errorf("failed to get metas: %w", err)
- }
-
- level.Debug(logger).Log("msg", "found relevant metas", "metas", len(metas))
-
- // Find gaps in the TSDBs for this tenant/table
- gaps, err := p.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger)
- if err != nil {
- return nil, fmt.Errorf("failed to find outdated gaps: %w", err)
- }
-
- return gaps, nil
-}
-
// blockPlan is a plan for all the work needed to build a meta.json
// It includes:
// - the tsdb (source of truth) which contains all the series+chunks
@@ -507,11 +752,11 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan
return plans, nil
}
-func (p *Planner) addPendingTask(task *Task) {
+func (p *Planner) addPendingTask(task *QueueTask) {
p.pendingTasks.Store(task.ID, task)
}
-func (p *Planner) removePendingTask(task *Task) {
+func (p *Planner) removePendingTask(task *QueueTask) {
p.pendingTasks.Delete(task.ID)
}
@@ -523,10 +768,10 @@ func (p *Planner) totalPendingTasks() (total int) {
return total
}
-func (p *Planner) enqueueTask(task *Task) error {
+func (p *Planner) enqueueTask(task *QueueTask) error {
p.activeUsers.UpdateUserTimestamp(task.Tenant, time.Now())
return p.tasksQueue.Enqueue(task.Tenant, nil, task, func() {
- task.timesEnqueued++
+ task.timesEnqueued.Add(1)
p.addPendingTask(task)
})
}
@@ -536,7 +781,7 @@ func (p *Planner) NotifyBuilderShutdown(
req *protos.NotifyBuilderShutdownRequest,
) (*protos.NotifyBuilderShutdownResponse, error) {
level.Debug(p.logger).Log("msg", "builder shutdown", "builder", req.BuilderID)
- p.tasksQueue.UnregisterConsumerConnection(req.GetBuilderID())
+ p.tasksQueue.NotifyConsumerShutdown(req.GetBuilderID())
return &protos.NotifyBuilderShutdownResponse{}, nil
}
@@ -570,7 +815,8 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer
return fmt.Errorf("dequeue() call resulted in nil response. builder: %s", builderID)
}
- task := item.(*Task)
+ task := item.(*QueueTask)
+ logger := log.With(logger, "task", task.ID)
queueTime := time.Since(task.queueTime)
p.metrics.queueDuration.Observe(queueTime.Seconds())
@@ -582,17 +828,21 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer
continue
}
- if err := p.forwardTaskToBuilder(builder, builderID, task); err != nil {
+ result, err := p.forwardTaskToBuilder(builder, builderID, task)
+ if err != nil {
maxRetries := p.limits.BloomTaskMaxRetries(task.Tenant)
- if maxRetries > 0 && task.timesEnqueued >= maxRetries {
- p.metrics.tasksFailed.Inc()
+ if maxRetries > 0 && int(task.timesEnqueued.Load()) >= maxRetries {
p.removePendingTask(task)
level.Error(logger).Log(
"msg", "task failed after max retries",
- "retries", task.timesEnqueued,
+ "retries", task.timesEnqueued.Load(),
"maxRetries", maxRetries,
"err", err,
)
+ task.resultsChannel <- &protos.TaskResult{
+ TaskID: task.ID,
+ Error: fmt.Errorf("task failed after max retries (%d): %w", maxRetries, err),
+ }
continue
}
@@ -601,13 +851,31 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer
p.metrics.taskLost.Inc()
p.removePendingTask(task)
level.Error(logger).Log("msg", "error re-enqueuing task. this task will be lost", "err", err)
+ task.resultsChannel <- &protos.TaskResult{
+ TaskID: task.ID,
+ Error: fmt.Errorf("error re-enqueuing task: %w", err),
+ }
continue
}
p.metrics.tasksRequeued.Inc()
- level.Error(logger).Log("msg", "error forwarding task to builder, Task requeued", "err", err)
+ level.Error(logger).Log(
+ "msg", "error forwarding task to builder, Task requeued",
+ "retries", task.timesEnqueued.Load(),
+ "err", err,
+ )
+ continue
}
+ level.Debug(logger).Log(
+ "msg", "task completed",
+ "duration", time.Since(task.queueTime).Seconds(),
+ "retries", task.timesEnqueued.Load()-1, // -1 because the first enqueue is not a retry
+ )
+ p.removePendingTask(task)
+
+ // Send the result back to the task. The channel is buffered, so this should not block.
+ task.resultsChannel <- result
}
return errPlannerIsNotRunning
@@ -616,16 +884,14 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer
func (p *Planner) forwardTaskToBuilder(
builder protos.PlannerForBuilder_BuilderLoopServer,
builderID string,
- task *Task,
-) error {
- defer p.removePendingTask(task)
-
+ task *QueueTask,
+) (*protos.TaskResult, error) {
msg := &protos.PlannerToBuilder{
Task: task.ToProtoTask(),
}
if err := builder.Send(msg); err != nil {
- return fmt.Errorf("error sending task to builder (%s): %w", builderID, err)
+ return nil, fmt.Errorf("error sending task to builder (%s): %w", builderID, err)
}
// Launch a goroutine to wait for the response from the builder so we can
@@ -651,12 +917,14 @@ func (p *Planner) forwardTaskToBuilder(
select {
case result := <-resultsCh:
- // TODO: Return metas forward via channel
- return result.Error
+ // Note: Errors from the result are not returned here since we don't retry tasks
+ // that return with an error. I.e. we won't retry errors forwarded from the builder.
+ // TODO(salvacorts): Filter and return errors that can be retried.
+ return result, nil
case err := <-errCh:
- return err
+ return nil, err
case <-timeout:
- return fmt.Errorf("timeout waiting for response from builder (%s)", builderID)
+ return nil, fmt.Errorf("timeout waiting for response from builder (%s)", builderID)
}
}
@@ -666,7 +934,7 @@ func (p *Planner) forwardTaskToBuilder(
func (p *Planner) receiveResultFromBuilder(
builder protos.PlannerForBuilder_BuilderLoopServer,
builderID string,
- task *Task,
+ task *QueueTask,
) (*protos.TaskResult, error) {
// If connection is closed, Recv() will return an error
res, err := builder.Recv()
diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go
index b46b987de751c..a20ce4e9a98a3 100644
--- a/pkg/bloombuild/planner/planner_test.go
+++ b/pkg/bloombuild/planner/planner_test.go
@@ -3,28 +3,38 @@ package planner
import (
"context"
"fmt"
+ "io"
+ "math"
+ "sync"
"testing"
"time"
"github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/services"
+ "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
"google.golang.org/grpc"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
"github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/cache"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/local"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
bloomshipperconfig "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
"github.com/grafana/loki/v3/pkg/storage/types"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
)
+var testDay = parseDayTime("2023-09-01")
+var testTable = config.NewDayTable(testDay, "index_")
+
func tsdbID(n int) tsdb.SingleTenantTSDBIdentifier {
return tsdb.SingleTenantTSDBIdentifier{
TS: time.Unix(int64(n), 0),
@@ -35,7 +45,9 @@ func genMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.Bl
m := bloomshipper.Meta{
MetaRef: bloomshipper.MetaRef{
Ref: bloomshipper.Ref{
- Bounds: v1.NewBounds(min, max),
+ TenantID: "fakeTenant",
+ TableName: testTable.Addr(),
+ Bounds: v1.NewBounds(min, max),
},
},
Blocks: blocks,
@@ -141,14 +153,26 @@ func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
}
func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
- bounds := v1.NewBounds(min, max)
+ startTS, endTS := testDay.Bounds()
return bloomshipper.BlockRef{
Ref: bloomshipper.Ref{
- Bounds: bounds,
+ TenantID: "fakeTenant",
+ TableName: testTable.Addr(),
+ Bounds: v1.NewBounds(min, max),
+ StartTimestamp: startTS,
+ EndTimestamp: endTS,
+ Checksum: 0,
},
}
}
+func genBlock(ref bloomshipper.BlockRef) bloomshipper.Block {
+ return bloomshipper.Block{
+ BlockRef: ref,
+ Data: &DummyReadSeekCloser{},
+ }
+}
+
func Test_blockPlansForGaps(t *testing.T) {
for _, tc := range []struct {
desc string
@@ -333,13 +357,14 @@ func Test_blockPlansForGaps(t *testing.T) {
}
}
-func createTasks(n int) []*Task {
- tasks := make([]*Task, 0, n)
+func createTasks(n int, resultsCh chan *protos.TaskResult) []*QueueTask {
+ tasks := make([]*QueueTask, 0, n)
// Enqueue tasks
for i := 0; i < n; i++ {
- task := NewTask(
+ task := NewQueueTask(
context.Background(), time.Now(),
- protos.NewTask(config.NewDayTable(config.NewDayTime(0), "fake"), "fakeTenant", v1.NewBounds(0, 10), tsdbID(1), nil),
+ protos.NewTask(config.NewDayTable(testDay, "fake"), "fakeTenant", v1.NewBounds(0, 10), tsdbID(1), nil),
+ resultsCh,
)
tasks = append(tasks, task)
}
@@ -385,7 +410,12 @@ func createPlanner(
}
reg := prometheus.NewPedanticRegistry()
- planner, err := New(cfg, limits, schemaCfg, storageCfg, storage.ClientMetrics{}, nil, logger, reg)
+ metasCache := cache.NewNoopCache()
+ blocksCache := bloomshipper.NewFsBlocksCache(storageCfg.BloomShipperConfig.BlocksCache, reg, logger)
+ bloomStore, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, storage.ClientMetrics{}, metasCache, blocksCache, &mempool.SimpleHeapAllocator{}, reg, logger)
+ require.NoError(t, err)
+
+ planner, err := New(cfg, limits, schemaCfg, storageCfg, storage.ClientMetrics{}, bloomStore, logger, reg)
require.NoError(t, err)
return planner
@@ -432,9 +462,8 @@ func Test_BuilderLoop(t *testing.T) {
modifyBuilder: func(builder *fakeBuilder) {
builder.SetReturnErrorMsg(true)
},
- resetBuilder: func(builder *fakeBuilder) {
- builder.SetReturnErrorMsg(false)
- },
+ // We don't retry on error messages from the builder
+ shouldConsumeAfterModify: true,
},
{
name: "exceed max retries",
@@ -487,9 +516,10 @@ func Test_BuilderLoop(t *testing.T) {
})
// Enqueue tasks
- tasks := createTasks(nTasks)
+ resultsCh := make(chan *protos.TaskResult, nTasks)
+ tasks := createTasks(nTasks, resultsCh)
for _, task := range tasks {
- err = planner.enqueueTask(task)
+ err := planner.enqueueTask(task)
require.NoError(t, err)
}
@@ -499,10 +529,10 @@ func Test_BuilderLoop(t *testing.T) {
builder := newMockBuilder(fmt.Sprintf("builder-%d", i))
builders = append(builders, builder)
- go func() {
- err = planner.BuilderLoop(builder)
- require.ErrorIs(t, err, tc.expectedBuilderLoopError)
- }()
+ go func(expectedBuilderLoopError error) {
+ err := planner.BuilderLoop(builder)
+ require.ErrorIs(t, err, expectedBuilderLoopError)
+ }(tc.expectedBuilderLoopError)
}
// Eventually, all tasks should be sent to builders
@@ -517,6 +547,11 @@ func Test_BuilderLoop(t *testing.T) {
// Finally, the queue should be empty
require.Equal(t, 0, planner.totalPendingTasks())
+ // consume all tasks result to free up the channel for the next round of tasks
+ for i := 0; i < nTasks; i++ {
+ <-resultsCh
+ }
+
if tc.modifyBuilder != nil {
// Configure builders to return errors
for _, builder := range builders {
@@ -525,7 +560,7 @@ func Test_BuilderLoop(t *testing.T) {
// Enqueue tasks again
for _, task := range tasks {
- err = planner.enqueueTask(task)
+ err := planner.enqueueTask(task)
require.NoError(t, err)
}
@@ -568,15 +603,230 @@ func Test_BuilderLoop(t *testing.T) {
}
}
+func putMetas(bloomClient bloomshipper.Client, metas []bloomshipper.Meta) error {
+ for _, meta := range metas {
+ err := bloomClient.PutMeta(context.Background(), meta)
+ if err != nil {
+ return err
+ }
+
+ for _, block := range meta.Blocks {
+ err := bloomClient.PutBlock(context.Background(), genBlock(block))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func Test_processTenantTaskResults(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+
+ originalMetas []bloomshipper.Meta
+ taskResults []*protos.TaskResult
+ expectedMetas []bloomshipper.Meta
+ expectedTasksSucceed int
+ }{
+ {
+ name: "errors",
+ originalMetas: []bloomshipper.Meta{
+ genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ },
+ taskResults: []*protos.TaskResult{
+ {
+ TaskID: "1",
+ Error: errors.New("fake error"),
+ },
+ {
+ TaskID: "2",
+ Error: errors.New("fake error"),
+ },
+ },
+ expectedMetas: []bloomshipper.Meta{
+ // The original metas should remain unchanged
+ genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ },
+ expectedTasksSucceed: 0,
+ },
+ {
+ name: "no new metas",
+ originalMetas: []bloomshipper.Meta{
+ genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ },
+ taskResults: []*protos.TaskResult{
+ {
+ TaskID: "1",
+ },
+ {
+ TaskID: "2",
+ },
+ },
+ expectedMetas: []bloomshipper.Meta{
+ // The original metas should remain unchanged
+ genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ },
+ expectedTasksSucceed: 2,
+ },
+ {
+ name: "no original metas",
+ taskResults: []*protos.TaskResult{
+ {
+ TaskID: "1",
+ CreatedMetas: []bloomshipper.Meta{
+ genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ },
+ },
+ {
+ TaskID: "2",
+ CreatedMetas: []bloomshipper.Meta{
+ genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ },
+ },
+ },
+ expectedMetas: []bloomshipper.Meta{
+ genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}),
+ },
+ expectedTasksSucceed: 2,
+ },
+ {
+ name: "single meta covers all original",
+ originalMetas: []bloomshipper.Meta{
+ genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}),
+ genMeta(6, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(6, 10)}),
+ },
+ taskResults: []*protos.TaskResult{
+ {
+ TaskID: "1",
+ CreatedMetas: []bloomshipper.Meta{
+ genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ },
+ },
+ },
+ expectedMetas: []bloomshipper.Meta{
+ genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ },
+ expectedTasksSucceed: 1,
+ },
+ {
+ name: "multi version ordering",
+ originalMetas: []bloomshipper.Meta{
+ genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}),
+ genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), // only part of the range is outdated, must keep
+ },
+ taskResults: []*protos.TaskResult{
+ {
+ TaskID: "1",
+ CreatedMetas: []bloomshipper.Meta{
+ genMeta(8, 10, []int{2}, []bloomshipper.BlockRef{genBlockRef(8, 10)}),
+ },
+ },
+ },
+ expectedMetas: []bloomshipper.Meta{
+ genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}),
+ genMeta(8, 10, []int{2}, []bloomshipper.BlockRef{genBlockRef(8, 10)}),
+ },
+ expectedTasksSucceed: 1,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ logger := log.NewNopLogger()
+ //logger := log.NewLogfmtLogger(os.Stdout)
+
+ cfg := Config{
+ PlanningInterval: 1 * time.Hour,
+ MaxQueuedTasksPerTenant: 10000,
+ }
+ planner := createPlanner(t, cfg, &fakeLimits{}, logger)
+
+ bloomClient, err := planner.bloomStore.Client(testDay.ModelTime())
+ require.NoError(t, err)
+
+ // Create original metas and blocks
+ err = putMetas(bloomClient, tc.originalMetas)
+ require.NoError(t, err)
+
+ ctx, ctxCancel := context.WithCancel(context.Background())
+ defer ctxCancel()
+ resultsCh := make(chan *protos.TaskResult, len(tc.taskResults))
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ completed, err := planner.processTenantTaskResults(
+ ctx,
+ testTable,
+ "fakeTenant",
+ tc.originalMetas,
+ len(tc.taskResults),
+ resultsCh,
+ )
+ require.NoError(t, err)
+ require.Equal(t, tc.expectedTasksSucceed, completed)
+ }()
+
+ for _, taskResult := range tc.taskResults {
+ if len(taskResult.CreatedMetas) > 0 {
+ // Emulate builder putting new metas to obj store
+ err = putMetas(bloomClient, taskResult.CreatedMetas)
+ require.NoError(t, err)
+ }
+
+ resultsCh <- taskResult
+ }
+
+ // Wait for all tasks to be processed and outdated metas/blocks deleted
+ wg.Wait()
+
+ // Get all metas
+ metas, err := planner.bloomStore.FetchMetas(
+ context.Background(),
+ bloomshipper.MetaSearchParams{
+ TenantID: "fakeTenant",
+ Interval: bloomshipper.NewInterval(testTable.Bounds()),
+ Keyspace: v1.NewBounds(0, math.MaxUint64),
+ },
+ )
+ require.NoError(t, err)
+
+ // TODO(salvacorts): Fix this
+ // For some reason, when the tests are run in the CI, we do not encode the `loc` of model.Time for each TSDB.
+ // As a result, when we fetch them, the loc is empty whereas in the original metas, it is not. Therefore the
+ // comparison fails. As a workaround to fix the issue, we will manually reset the TS of the sources to the
+ // fetched metas
+ for i := range metas {
+ for j := range metas[i].Sources {
+ sec := metas[i].Sources[j].TS.Unix()
+ nsec := metas[i].Sources[j].TS.Nanosecond()
+ metas[i].Sources[j].TS = time.Unix(sec, int64(nsec))
+ }
+ }
+
+ // Compare metas
+ require.Equal(t, len(tc.expectedMetas), len(metas))
+ require.ElementsMatch(t, tc.expectedMetas, metas)
+ })
+ }
+}
+
type fakeBuilder struct {
+ mx sync.Mutex // Protects tasks and currTaskIdx.
id string
tasks []*protos.Task
currTaskIdx int
grpc.ServerStream
- returnError bool
- returnErrorMsg bool
- wait bool
+ returnError atomic.Bool
+ returnErrorMsg atomic.Bool
+ wait atomic.Bool
ctx context.Context
ctxCancel context.CancelFunc
}
@@ -593,19 +843,21 @@ func newMockBuilder(id string) *fakeBuilder {
}
func (f *fakeBuilder) ReceivedTasks() []*protos.Task {
+ f.mx.Lock()
+ defer f.mx.Unlock()
return f.tasks
}
func (f *fakeBuilder) SetReturnError(b bool) {
- f.returnError = b
+ f.returnError.Store(b)
}
func (f *fakeBuilder) SetReturnErrorMsg(b bool) {
- f.returnErrorMsg = b
+ f.returnErrorMsg.Store(b)
}
func (f *fakeBuilder) SetWait(b bool) {
- f.wait = b
+ f.wait.Store(b)
}
func (f *fakeBuilder) CancelContext(b bool) {
@@ -633,6 +885,8 @@ func (f *fakeBuilder) Send(req *protos.PlannerToBuilder) error {
return err
}
+ f.mx.Lock()
+ defer f.mx.Unlock()
f.tasks = append(f.tasks, task)
f.currTaskIdx++
return nil
@@ -646,12 +900,12 @@ func (f *fakeBuilder) Recv() (*protos.BuilderToPlanner, error) {
}, nil
}
- if f.returnError {
+ if f.returnError.Load() {
return nil, fmt.Errorf("fake error from %s", f.id)
}
// Wait until `wait` is false
- for f.wait {
+ for f.wait.Load() {
time.Sleep(time.Second)
}
@@ -661,10 +915,12 @@ func (f *fakeBuilder) Recv() (*protos.BuilderToPlanner, error) {
}
var errMsg string
- if f.returnErrorMsg {
+ if f.returnErrorMsg.Load() {
errMsg = fmt.Sprintf("fake error from %s", f.id)
}
+ f.mx.Lock()
+ defer f.mx.Unlock()
return &protos.BuilderToPlanner{
BuilderID: f.id,
Result: protos.ProtoTaskResult{
@@ -709,3 +965,17 @@ func parseDayTime(s string) config.DayTime {
Time: model.TimeFromUnix(t.Unix()),
}
}
+
+type DummyReadSeekCloser struct{}
+
+func (d *DummyReadSeekCloser) Read(_ []byte) (n int, err error) {
+ return 0, io.EOF
+}
+
+func (d *DummyReadSeekCloser) Seek(_ int64, _ int) (int64, error) {
+ return 0, nil
+}
+
+func (d *DummyReadSeekCloser) Close() error {
+ return nil
+}
diff --git a/pkg/bloombuild/planner/task.go b/pkg/bloombuild/planner/task.go
index 1da39cea6bfd7..3080ec47a171c 100644
--- a/pkg/bloombuild/planner/task.go
+++ b/pkg/bloombuild/planner/task.go
@@ -4,22 +4,32 @@ import (
"context"
"time"
+ "go.uber.org/atomic"
+
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
)
-type Task struct {
+type QueueTask struct {
*protos.Task
+ resultsChannel chan *protos.TaskResult
+
// Tracking
- timesEnqueued int
+ timesEnqueued atomic.Int64
queueTime time.Time
ctx context.Context
}
-func NewTask(ctx context.Context, queueTime time.Time, task *protos.Task) *Task {
- return &Task{
- Task: task,
- ctx: ctx,
- queueTime: queueTime,
+func NewQueueTask(
+ ctx context.Context,
+ queueTime time.Time,
+ task *protos.Task,
+ resultsChannel chan *protos.TaskResult,
+) *QueueTask {
+ return &QueueTask{
+ Task: task,
+ resultsChannel: resultsChannel,
+ ctx: ctx,
+ queueTime: queueTime,
}
}
diff --git a/pkg/bloombuild/planner/versioned_range.go b/pkg/bloombuild/planner/versioned_range.go
new file mode 100644
index 0000000000000..578b5d7ef83a6
--- /dev/null
+++ b/pkg/bloombuild/planner/versioned_range.go
@@ -0,0 +1,261 @@
+package planner
+
+import (
+ "sort"
+
+ "github.com/prometheus/common/model"
+
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+)
+
+type tsdbToken struct {
+ through model.Fingerprint // inclusive
+ version int // TSDB version
+}
+
+// a ring of token ranges used to identify old metas.
+// each token represents that a TSDB version has covered the entire range
+// up to that point from the previous token.
+type tsdbTokenRange []tsdbToken
+
+func (t tsdbTokenRange) Len() int {
+ return len(t)
+}
+
+func (t tsdbTokenRange) Less(i, j int) bool {
+ return t[i].through < t[j].through
+}
+
+func (t tsdbTokenRange) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+// Add ensures a versioned set of bounds is added to the range. If the bounds are already
+// covered by a more up to date version, it returns false.
+func (t tsdbTokenRange) Add(version int, bounds v1.FingerprintBounds) (res tsdbTokenRange, added bool) {
+ // allows attempting to join neighboring token ranges with identical versions
+ // that aren't known until the end of the function
+ var shouldReassemble bool
+ var reassembleFrom int
+ defer func() {
+ if shouldReassemble {
+ res = res.reassemble(reassembleFrom)
+ }
+ }()
+
+ // special case: first token
+ if len(t) == 0 {
+ tok := tsdbToken{through: bounds.Max, version: version}
+ // special case: first token is included in bounds, no need to fill negative space
+ if bounds.Min == 0 {
+ return append(t, tok), true
+ }
+ // Use a negative version to indicate that the range is not covered by any version.
+ return append(t, tsdbToken{through: bounds.Min - 1, version: -1}, tok), true
+ }
+
+ // For non-nil token ranges, we continually update the range with newer versions.
+ for {
+ // find first token that covers the start of the range
+ i := sort.Search(len(t), func(i int) bool {
+ return t[i].through >= bounds.Min
+ })
+
+ if i == len(t) {
+ tok := tsdbToken{through: bounds.Max, version: version}
+
+ // edge case: there is no gap between the previous token range
+ // and the new one;
+ // skip adding a negative token
+ if t[len(t)-1].through == bounds.Min-1 {
+ return append(t, tok), true
+ }
+
+ // the range is not covered by any version and we are at the end of the range.
+ // Add a negative token and the new token.
+ negative := tsdbToken{through: bounds.Min - 1, version: -1}
+ return append(t, negative, tok), true
+ }
+
+ // Otherwise, we've found a token that covers the start of the range.
+ newer := t[i].version < version
+ preExisting := t.boundsForToken(i)
+ if !newer {
+ if bounds.Within(preExisting) {
+ // The range is already covered by a more up to date version, no need
+ // to add anything, but honor if an earlier token was added
+ return t, added
+ }
+
+ // The range is partially covered by a more up to date version;
+ // update the range we need to check and continue
+ bounds = v1.NewBounds(preExisting.Max+1, bounds.Max)
+ continue
+ }
+
+ // If we need to update the range, there are 5 cases:
+ // 1. `equal`: the incoming range equals an existing range ()
+ // ------ # addition
+ // ------ # src
+ // 2. `subset`: the incoming range is a subset of an existing range
+ // ------ # addition
+ // -------- # src
+ // 3. `overflow_both_sides`: the incoming range is a superset of an existing range. This is not possible
+ // because the first token in the ring implicitly covers the left bound (zero) of all possible fps.
+ // Therefore, we can skip this case.
+ // ------ # addition
+ // ---- # src
+ // 4. `right_overflow`: the incoming range overflows the right side of an existing range
+ // ------ # addition
+ // ------ # src
+ // 5. `left_overflow`: the incoming range overflows the left side of an existing range. This can be skipped
+ // for the same reason as `superset`.
+ // ------ # addition
+ // ------ # src
+
+ // 1) (`equal`): we're replacing the same bounds
+ if bounds.Equal(preExisting) {
+ t[i].version = version
+ return t, true
+ }
+
+ // 2) (`subset`): the incoming range is a subset of an existing range
+ if bounds.Within(preExisting) {
+ // 2a) the incoming range touches the existing range's minimum bound
+ if bounds.Min == preExisting.Min {
+ tok := tsdbToken{through: bounds.Max, version: version}
+ t = append(t, tsdbToken{})
+ copy(t[i+1:], t[i:])
+ t[i] = tok
+ return t, true
+ }
+ // 2b) the incoming range touches the existing range's maximum bound
+ if bounds.Max == preExisting.Max {
+ t[i].through = bounds.Min - 1
+ tok := tsdbToken{through: bounds.Max, version: version}
+ t = append(t, tsdbToken{})
+ copy(t[i+2:], t[i+1:])
+ t[i+1] = tok
+ return t, true
+ }
+
+ // 2c) the incoming range is does not touch either edge;
+ // add two tokens (the new one and a new left-bound for the old range)
+ tok := tsdbToken{through: bounds.Max, version: version}
+ t = append(t, tsdbToken{}, tsdbToken{})
+ copy(t[i+2:], t[i:])
+ t[i+1] = tok
+ t[i].through = bounds.Min - 1
+ return t, true
+ }
+
+ // 4) (`right_overflow`): the incoming range overflows the right side of an existing range
+
+ // 4a) shortcut: the incoming range is a right-overlapping superset of the existing range.
+ // replace the existing token's version, update reassembly targets for merging neighboring ranges
+ // w/ the same version, and continue
+ if preExisting.Min == bounds.Min {
+ t[i].version = version
+ bounds.Min = preExisting.Max + 1
+ added = true
+ if !shouldReassemble {
+ reassembleFrom = i
+ shouldReassemble = true
+ }
+ continue
+ }
+
+ // 4b) the incoming range overlaps the right side of the existing range but
+ // does not touch the left side;
+ // add a new token for the right side of the existing range then update the reassembly targets
+ // and continue
+ overlap := tsdbToken{through: t[i].through, version: version}
+ t[i].through = bounds.Min - 1
+ t = append(t, tsdbToken{})
+ copy(t[i+2:], t[i+1:])
+ t[i+1] = overlap
+ added = true
+ bounds.Min = overlap.through + 1
+ if !shouldReassemble {
+ reassembleFrom = i + 1
+ shouldReassemble = true
+ }
+ continue
+ }
+}
+
+func (t tsdbTokenRange) boundsForToken(i int) v1.FingerprintBounds {
+ if i == 0 {
+ return v1.FingerprintBounds{Min: 0, Max: t[i].through}
+ }
+ return v1.FingerprintBounds{Min: t[i-1].through + 1, Max: t[i].through}
+}
+
+// reassemble merges neighboring tokens with the same version
+func (t tsdbTokenRange) reassemble(from int) tsdbTokenRange {
+ reassembleTo := from
+ for i := from; i < len(t)-1; i++ {
+ if t[i].version != t[i+1].version {
+ break
+ }
+ reassembleTo = i + 1
+ }
+
+ if reassembleTo == from {
+ return t
+ }
+ t[from].through = t[reassembleTo].through
+ copy(t[from+1:], t[reassembleTo+1:])
+ return t[:len(t)-(reassembleTo-from)]
+}
+
+func outdatedMetas(metas []bloomshipper.Meta) []bloomshipper.Meta {
+ var outdated []bloomshipper.Meta
+
+ // Sort metas descending by most recent source when checking
+ // for outdated metas (older metas are discarded if they don't change the range).
+ sort.Slice(metas, func(i, j int) bool {
+ a, aExists := metas[i].MostRecentSource()
+ b, bExists := metas[j].MostRecentSource()
+
+ if !aExists && !bExists {
+ // stable sort two sourceless metas by their bounds (easier testing)
+ return metas[i].Bounds.Less(metas[j].Bounds)
+ }
+
+ if !aExists {
+ // If a meta has no sources, it's out of date by definition.
+ // By convention we sort it to the beginning of the list and will mark it for removal later
+ return true
+ }
+
+ if !bExists {
+ // if a exists but b does not, mark b as lesser, sorting b to the
+ // front
+ return false
+ }
+ return !a.TS.Before(b.TS)
+ })
+
+ var (
+ tokenRange tsdbTokenRange
+ added bool
+ )
+
+ for _, meta := range metas {
+ mostRecent, exists := meta.MostRecentSource()
+ if !exists {
+ // if the meta exists but does not reference a TSDB, it's out of date
+ // TODO(owen-d): this shouldn't happen, figure out why
+ outdated = append(outdated, meta)
+ }
+ version := int(model.TimeFromUnixNano(mostRecent.TS.UnixNano()))
+ tokenRange, added = tokenRange.Add(version, meta.Bounds)
+ if !added {
+ outdated = append(outdated, meta)
+ }
+ }
+
+ return outdated
+}
diff --git a/pkg/bloombuild/planner/versioned_range_test.go b/pkg/bloombuild/planner/versioned_range_test.go
new file mode 100644
index 0000000000000..e58f143842f1c
--- /dev/null
+++ b/pkg/bloombuild/planner/versioned_range_test.go
@@ -0,0 +1,322 @@
+package planner
+
+import (
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+)
+
+func Test_TsdbTokenRange(t *testing.T) {
+ type addition struct {
+ version int
+ bounds v1.FingerprintBounds
+ }
+ type exp struct {
+ added bool
+ err bool
+ }
+ mk := func(version int, min, max model.Fingerprint) addition {
+ return addition{version, v1.FingerprintBounds{Min: min, Max: max}}
+ }
+ tok := func(version int, through model.Fingerprint) tsdbToken {
+ return tsdbToken{version: version, through: through}
+ }
+
+ for _, tc := range []struct {
+ desc string
+ additions []addition
+ exp []bool
+ result tsdbTokenRange
+ }{
+ {
+ desc: "ascending versions",
+ additions: []addition{
+ mk(1, 0, 10),
+ mk(2, 11, 20),
+ mk(3, 15, 25),
+ },
+ exp: []bool{true, true, true},
+ result: tsdbTokenRange{
+ tok(1, 10),
+ tok(2, 14),
+ tok(3, 25),
+ },
+ },
+ {
+ desc: "descending versions",
+ additions: []addition{
+ mk(3, 15, 25),
+ mk(2, 11, 20),
+ mk(1, 0, 10),
+ },
+ exp: []bool{true, true, true},
+ result: tsdbTokenRange{
+ tok(1, 10),
+ tok(2, 14),
+ tok(3, 25),
+ },
+ },
+ {
+ desc: "simple",
+ additions: []addition{
+ mk(3, 0, 10),
+ mk(2, 11, 20),
+ mk(1, 15, 25),
+ },
+ exp: []bool{true, true, true},
+ result: tsdbTokenRange{
+ tok(3, 10),
+ tok(2, 20),
+ tok(1, 25),
+ },
+ },
+ {
+ desc: "simple replacement",
+ additions: []addition{
+ mk(3, 10, 20),
+ mk(2, 0, 9),
+ },
+ exp: []bool{true, true},
+ result: tsdbTokenRange{
+ tok(2, 9),
+ tok(3, 20),
+ },
+ },
+ {
+ desc: "complex",
+ additions: []addition{
+ mk(5, 30, 50),
+ mk(4, 20, 45),
+ mk(3, 25, 70),
+ mk(2, 10, 20),
+ mk(1, 1, 5),
+ },
+ exp: []bool{true, true, true, true, true, true},
+ result: tsdbTokenRange{
+ tok(-1, 0),
+ tok(1, 5),
+ tok(-1, 9),
+ tok(2, 19),
+ tok(4, 29),
+ tok(5, 50),
+ tok(3, 70),
+ },
+ },
+ {
+ desc: "neighboring upper range",
+ additions: []addition{
+ mk(5, 30, 50),
+ mk(4, 51, 60),
+ },
+ exp: []bool{true, true},
+ result: tsdbTokenRange{
+ tok(-1, 29),
+ tok(5, 50),
+ tok(4, 60),
+ },
+ },
+ {
+ desc: "non-neighboring upper range",
+ additions: []addition{
+ mk(5, 30, 50),
+ mk(4, 55, 60),
+ },
+ exp: []bool{true, true},
+ result: tsdbTokenRange{
+ tok(-1, 29),
+ tok(5, 50),
+ tok(-1, 54),
+ tok(4, 60),
+ },
+ },
+ {
+ desc: "earlier version within",
+ additions: []addition{
+ mk(5, 30, 50),
+ mk(4, 40, 45),
+ },
+ exp: []bool{true, false},
+ result: tsdbTokenRange{
+ tok(-1, 29),
+ tok(5, 50),
+ },
+ },
+ {
+ desc: "earlier version right overlapping",
+ additions: []addition{
+ mk(5, 10, 20),
+ mk(4, 15, 25),
+ },
+ exp: []bool{true, true},
+ result: tsdbTokenRange{
+ tok(-1, 9),
+ tok(5, 20),
+ tok(4, 25),
+ },
+ },
+ {
+ desc: "older version overlaps two",
+ additions: []addition{
+ mk(3, 10, 20),
+ mk(2, 21, 30),
+ mk(1, 15, 25),
+ },
+ exp: []bool{true, true, false},
+ result: tsdbTokenRange{
+ tok(-1, 9),
+ tok(3, 20),
+ tok(2, 30),
+ },
+ },
+ {
+ desc: "older version overlaps two w middle",
+ additions: []addition{
+ mk(3, 10, 20),
+ mk(2, 22, 30),
+ mk(1, 15, 25),
+ },
+ exp: []bool{true, true, true},
+ result: tsdbTokenRange{
+ tok(-1, 9),
+ tok(3, 20),
+ tok(1, 21),
+ tok(2, 30),
+ },
+ },
+ {
+ desc: "newer right overflow",
+ additions: []addition{
+ mk(1, 30, 50),
+ mk(2, 40, 60),
+ },
+ exp: []bool{true, true},
+ result: tsdbTokenRange{
+ tok(-1, 29),
+ tok(1, 39),
+ tok(2, 60),
+ },
+ },
+ {
+ desc: "newer right overflow superset",
+ additions: []addition{
+ mk(1, 30, 50),
+ mk(2, 30, 60),
+ },
+ exp: []bool{true, true},
+ result: tsdbTokenRange{
+ tok(-1, 29),
+ tok(2, 60),
+ },
+ },
+ {
+ desc: "newer right overflow partial",
+ additions: []addition{
+ mk(1, 30, 50),
+ mk(2, 40, 60),
+ },
+ exp: []bool{true, true},
+ result: tsdbTokenRange{
+ tok(-1, 29),
+ tok(1, 39),
+ tok(2, 60),
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ var (
+ tr tsdbTokenRange
+ added bool
+ )
+ for i, a := range tc.additions {
+ tr, added = tr.Add(a.version, a.bounds)
+ exp := tc.exp[i]
+ require.Equal(t, exp, added, "on iteration %d", i)
+ }
+ require.Equal(t, tc.result, tr)
+ })
+ }
+}
+
+func Test_OutdatedMetas(t *testing.T) {
+ gen := func(bounds v1.FingerprintBounds, tsdbTimes ...model.Time) (meta bloomshipper.Meta) {
+ for _, tsdbTime := range tsdbTimes {
+ meta.Sources = append(meta.Sources, tsdb.SingleTenantTSDBIdentifier{TS: tsdbTime.Time()})
+ }
+ meta.Bounds = bounds
+ return meta
+ }
+
+ for _, tc := range []struct {
+ desc string
+ metas []bloomshipper.Meta
+ exp []bloomshipper.Meta
+ }{
+ {
+ desc: "no metas",
+ metas: nil,
+ exp: nil,
+ },
+ {
+ desc: "single meta",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 10), 0),
+ },
+ exp: nil,
+ },
+ {
+ desc: "single outdated meta",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 10), 0),
+ gen(v1.NewBounds(0, 10), 1),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 10), 0),
+ },
+ },
+ {
+ desc: "single outdated via partitions",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ gen(v1.NewBounds(6, 10), 0),
+ gen(v1.NewBounds(0, 10), 1),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(6, 10), 0),
+ gen(v1.NewBounds(0, 5), 0),
+ },
+ },
+ {
+ desc: "same tsdb versions",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ gen(v1.NewBounds(6, 10), 0),
+ gen(v1.NewBounds(0, 10), 1),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(6, 10), 0),
+ gen(v1.NewBounds(0, 5), 0),
+ },
+ },
+ {
+ desc: "multi version ordering",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ gen(v1.NewBounds(0, 10), 1), // only part of the range is outdated, must keep
+ gen(v1.NewBounds(8, 10), 2),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ outdated := outdatedMetas(tc.metas)
+ require.Equal(t, tc.exp, outdated)
+ })
+ }
+}
diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
index 4247fc1e4b52c..4525bca006a07 100644
--- a/pkg/bloomcompactor/batch.go
+++ b/pkg/bloomcompactor/batch.go
@@ -168,9 +168,9 @@ func newBatchedBlockLoader(
}
// compiler checks
-var _ v1.Iterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
-var _ v1.CloseableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
-var _ v1.ResettableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{}
+var _ v1.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{}
+var _ v1.CloseableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{}
+var _ v1.ResettableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{}
// TODO(chaudum): testware
func newBlockLoadingIter(ctx context.Context, blocks []bloomshipper.BlockRef, fetcher FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier], batchSize int) *blockLoadingIter {
@@ -196,13 +196,13 @@ type blockLoadingIter struct {
// internals
initialized bool
err error
- iter v1.Iterator[*v1.SeriesWithBloom]
+ iter v1.Iterator[*v1.SeriesWithBlooms]
loader *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier]
loaded map[io.Closer]struct{}
}
// At implements v1.Iterator.
-func (i *blockLoadingIter) At() *v1.SeriesWithBloom {
+func (i *blockLoadingIter) At() *v1.SeriesWithBlooms {
if !i.initialized {
panic("iterator not initialized")
}
@@ -229,7 +229,7 @@ func (i *blockLoadingIter) init() {
i.overlapping = overlappingBlocksIter(i.inputs)
// set initial iter
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
// set "match all" filter function if not present
if i.filter == nil {
@@ -249,14 +249,14 @@ func (i *blockLoadingIter) loadNext() bool {
loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize)
filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter)
- iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
+ iters := make([]v1.PeekingIterator[*v1.SeriesWithBlooms], 0, len(blockRefs))
for filtered.Next() {
bq := filtered.At()
i.loaded[bq] = struct{}{}
iter, err := bq.SeriesIter()
if err != nil {
i.err = err
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
return false
}
iters = append(iters, iter)
@@ -264,7 +264,7 @@ func (i *blockLoadingIter) loadNext() bool {
if err := filtered.Err(); err != nil {
i.err = err
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
return false
}
@@ -278,12 +278,12 @@ func (i *blockLoadingIter) loadNext() bool {
// two overlapping blocks can conceivably have the same series, so we need to dedupe,
// preferring the one with the most chunks already indexed since we'll have
// to add fewer chunks to the bloom
- i.iter = v1.NewDedupingIter[*v1.SeriesWithBloom, *v1.SeriesWithBloom](
- func(a, b *v1.SeriesWithBloom) bool {
+ i.iter = v1.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms](
+ func(a, b *v1.SeriesWithBlooms) bool {
return a.Series.Fingerprint == b.Series.Fingerprint
},
- v1.Identity[*v1.SeriesWithBloom],
- func(a, b *v1.SeriesWithBloom) *v1.SeriesWithBloom {
+ v1.Identity[*v1.SeriesWithBlooms],
+ func(a, b *v1.SeriesWithBlooms) *v1.SeriesWithBlooms {
if len(a.Series.Chunks) > len(b.Series.Chunks) {
return a
}
@@ -294,7 +294,7 @@ func (i *blockLoadingIter) loadNext() bool {
return i.iter.Next()
}
- i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]()
+ i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]()
i.err = i.overlapping.Err()
return false
}
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index b46ec1cba7c87..8eed0823314a7 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -53,7 +53,7 @@ type Compactor struct {
retentionManager *RetentionManager
// temporary workaround until bloomStore has implemented read/write shipper interface
- bloomStore bloomshipper.Store
+ bloomStore bloomshipper.StoreBase
sharding util_ring.TenantSharding
@@ -69,7 +69,7 @@ func New(
ring ring.ReadRing,
ringLifeCycler *ring.BasicLifecycler,
limits Limits,
- store bloomshipper.StoreWithMetrics,
+ store bloomshipper.Store,
logger log.Logger,
r prometheus.Registerer,
) (*Compactor, error) {
@@ -303,7 +303,7 @@ func (c *Compactor) loadWork(
if err != nil {
return errors.Wrap(err, "getting tenants")
}
- nTenants := tenants.Len()
+ nTenants := tenants.Remaining()
type ownedTenant struct {
tenant string
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index f9defdc1fdfbc..3929f2da3f805 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -22,7 +22,7 @@ import (
type SimpleBloomController struct {
tsdbStore TSDBStore
- bloomStore bloomshipper.Store
+ bloomStore bloomshipper.StoreBase
chunkLoader ChunkLoader
metrics *Metrics
limits Limits
@@ -32,7 +32,7 @@ type SimpleBloomController struct {
func NewSimpleBloomController(
tsdbStore TSDBStore,
- blockStore bloomshipper.Store,
+ blockStore bloomshipper.StoreBase,
chunkLoader ChunkLoader,
limits Limits,
metrics *Metrics,
@@ -287,7 +287,7 @@ func (s *SimpleBloomController) loadWorkForGap(
tenant string,
id tsdb.Identifier,
gap gapWithBlocks,
-) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBloom], error) {
+) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBlooms], error) {
// load a series iterator for the gap
seriesItr, err := s.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.bounds)
if err != nil {
diff --git a/pkg/bloomcompactor/retention.go b/pkg/bloomcompactor/retention.go
index 7dd30dece9e8a..caaf80ffb9c3f 100644
--- a/pkg/bloomcompactor/retention.go
+++ b/pkg/bloomcompactor/retention.go
@@ -95,7 +95,7 @@ type RetentionLimits interface {
type RetentionManager struct {
cfg RetentionConfig
limits RetentionLimits
- bloomStore bloomshipper.Store
+ bloomStore bloomshipper.StoreBase
sharding retentionSharding
metrics *Metrics
logger log.Logger
@@ -108,7 +108,7 @@ type RetentionManager struct {
func NewRetentionManager(
cfg RetentionConfig,
limits RetentionLimits,
- bloomStore bloomshipper.Store,
+ bloomStore bloomshipper.StoreBase,
sharding retentionSharding,
metrics *Metrics,
logger log.Logger,
diff --git a/pkg/bloomcompactor/retention_test.go b/pkg/bloomcompactor/retention_test.go
index b8e855b0d4e90..e610ab5b02e02 100644
--- a/pkg/bloomcompactor/retention_test.go
+++ b/pkg/bloomcompactor/retention_test.go
@@ -24,6 +24,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"
"github.com/grafana/loki/v3/pkg/storage/types"
util_log "github.com/grafana/loki/v3/pkg/util/log"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
lokiring "github.com/grafana/loki/v3/pkg/util/ring"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -822,7 +823,7 @@ func NewMockBloomStoreWithWorkDir(t *testing.T, workDir string) (*bloomshipper.B
metasCache := cache.NewMockCache()
blocksCache := bloomshipper.NewFsBlocksCache(storageConfig.BloomShipperConfig.BlocksCache, prometheus.NewPedanticRegistry(), logger)
- store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageConfig, metrics, metasCache, blocksCache, reg, logger)
+ store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageConfig, metrics, metasCache, blocksCache, &mempool.SimpleHeapAllocator{}, reg, logger)
if err == nil {
t.Cleanup(store.Stop)
}
diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go
index 229efe9c16935..2cb16eac02eae 100644
--- a/pkg/bloomcompactor/spec.go
+++ b/pkg/bloomcompactor/spec.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
- "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@@ -45,7 +44,7 @@ type SimpleBloomGenerator struct {
userID string
store v1.Iterator[*v1.Series]
chunkLoader ChunkLoader
- blocksIter v1.ResettableIterator[*v1.SeriesWithBloom]
+ blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms]
// options to build blocks with
opts v1.BlockOptions
@@ -68,7 +67,7 @@ func NewSimpleBloomGenerator(
opts v1.BlockOptions,
store v1.Iterator[*v1.Series],
chunkLoader ChunkLoader,
- blocksIter v1.ResettableIterator[*v1.SeriesWithBloom],
+ blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms],
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
reporter func(model.Fingerprint),
metrics *Metrics,
@@ -98,44 +97,30 @@ func NewSimpleBloomGenerator(
}
}
-func (s *SimpleBloomGenerator) populator(ctx context.Context) func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) {
- return func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) {
- start := time.Now()
+func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorFunc {
+ return func(
+ series *v1.Series,
+ srcBlooms v1.SizedIterator[*v1.Bloom],
+ toAdd v1.ChunkRefs,
+ ch chan *v1.BloomCreation,
+ ) {
level.Debug(s.logger).Log(
"msg", "populating bloom filter",
"stage", "before",
"fp", series.Fingerprint,
"chunks", len(series.Chunks),
)
- chunkItersWithFP, err := s.chunkLoader.Load(ctx, s.userID, series)
- if err != nil {
- return 0, false, errors.Wrapf(err, "failed to load chunks for series: %+v", series)
- }
-
- bytesAdded, skip, err := s.tokenizer.Populate(
- &v1.SeriesWithBloom{
- Series: series,
- Bloom: bloom,
- },
- chunkItersWithFP.itr,
- )
+ chunkItersWithFP := s.chunkLoader.Load(ctx, s.userID, &v1.Series{
+ Fingerprint: series.Fingerprint,
+ Chunks: toAdd,
+ })
- level.Debug(s.logger).Log(
- "msg", "populating bloom filter",
- "stage", "after",
- "fp", series.Fingerprint,
- "chunks", len(series.Chunks),
- "series_bytes", bytesAdded,
- "duration", time.Since(start),
- "err", err,
- )
+ s.tokenizer.Populate(srcBlooms, chunkItersWithFP.itr, ch)
if s.reporter != nil {
s.reporter(series.Fingerprint)
}
- return bytesAdded, skip, err
}
-
}
func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIterator {
@@ -179,10 +164,10 @@ type LazyBlockBuilderIterator struct {
ctx context.Context
opts v1.BlockOptions
metrics *Metrics
- populate func(*v1.Series, *v1.Bloom) (int, bool, error)
+ populate v1.BloomPopulatorFunc
readWriterFn func() (v1.BlockWriter, v1.BlockReader)
series v1.PeekingIterator[*v1.Series]
- blocks v1.ResettableIterator[*v1.SeriesWithBloom]
+ blocks v1.ResettableIterator[*v1.SeriesWithBlooms]
bytesAdded int
curr *v1.Block
@@ -193,10 +178,10 @@ func NewLazyBlockBuilderIterator(
ctx context.Context,
opts v1.BlockOptions,
metrics *Metrics,
- populate func(*v1.Series, *v1.Bloom) (int, bool, error),
+ populate v1.BloomPopulatorFunc,
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
series v1.PeekingIterator[*v1.Series],
- blocks v1.ResettableIterator[*v1.SeriesWithBloom],
+ blocks v1.ResettableIterator[*v1.SeriesWithBlooms],
) *LazyBlockBuilderIterator {
return &LazyBlockBuilderIterator{
ctx: ctx,
@@ -270,7 +255,7 @@ type ChunkItersByFingerprint struct {
// ChunkLoader loads chunks from a store
type ChunkLoader interface {
- Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error)
+ Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint
}
// StoreChunkLoader loads chunks from a store
@@ -286,7 +271,7 @@ func NewStoreChunkLoader(fetcherProvider stores.ChunkFetcherProvider, metrics *M
}
}
-func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error) {
+func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint {
// NB(owen-d): This is probably unnecessary as we should only have one fetcher
// because we'll only be working on a single index period at a time, but this should protect
// us in the case of refactoring/changing this and likely isn't a perf bottleneck.
@@ -317,5 +302,5 @@ func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.S
return &ChunkItersByFingerprint{
fp: series.Fingerprint,
itr: newBatchedChunkLoader(ctx, fetchers, inputs, s.metrics, batchedLoaderDefaultBatchSize),
- }, nil
+ }
}
diff --git a/pkg/bloomcompactor/spec_test.go b/pkg/bloomcompactor/spec_test.go
index 7e39b8dec57f0..e08cafb68cab4 100644
--- a/pkg/bloomcompactor/spec_test.go
+++ b/pkg/bloomcompactor/spec_test.go
@@ -13,21 +13,22 @@ import (
"github.com/grafana/loki/v3/pkg/chunkenc"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
)
-func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) {
+func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) {
return blocksFromSchemaWithRange(t, n, options, 0, 0xffff)
}
// splits 100 series across `n` non-overlapping blocks.
// uses options to build blocks with.
-func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) {
+func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) {
if 100%n != 0 {
panic("100 series must be evenly divisible by n")
}
numSeries := 100
- data, _ = v1.MkBasicSeriesWithBlooms(numSeries, 0, fromFP, throughFp, 0, 10000)
+ data, _ = v1.MkBasicSeriesWithBlooms(numSeries, fromFP, throughFp, 0, 10000)
seriesPerBlock := numSeries / n
@@ -46,7 +47,7 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro
minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock
- itr := v1.NewSliceIter[v1.SeriesWithBloom](data[minIdx:maxIdx])
+ itr := v1.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx])
_, err = builder.BuildFrom(itr)
require.Nil(t, err)
@@ -62,11 +63,11 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro
// doesn't actually load any chunks
type dummyChunkLoader struct{}
-func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) (*ChunkItersByFingerprint, error) {
+func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) *ChunkItersByFingerprint {
return &ChunkItersByFingerprint{
fp: series.Fingerprint,
itr: v1.NewEmptyIter[v1.ChunkRefWithIter](),
- }, nil
+ }
}
func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator {
@@ -74,7 +75,7 @@ func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Ser
for i, b := range blocks {
bqs = append(bqs, &bloomshipper.CloseableBlockQuerier{
BlockRef: refs[i],
- BlockQuerier: v1.NewBlockQuerier(b, false, v1.DefaultMaxPageSize),
+ BlockQuerier: v1.NewBlockQuerier(b, &mempool.SimpleHeapAllocator{}, v1.DefaultMaxPageSize),
})
}
@@ -132,9 +133,9 @@ func TestSimpleBloomGenerator(t *testing.T) {
} {
t.Run(fmt.Sprintf("%s/%s", tc.desc, enc), func(t *testing.T) {
sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff)
- storeItr := v1.NewMapIter[v1.SeriesWithBloom, *v1.Series](
- v1.NewSliceIter[v1.SeriesWithBloom](data),
- func(swb v1.SeriesWithBloom) *v1.Series {
+ storeItr := v1.NewMapIter[v1.SeriesWithBlooms, *v1.Series](
+ v1.NewSliceIter[v1.SeriesWithBlooms](data),
+ func(swb v1.SeriesWithBlooms) *v1.Series {
return swb.Series
},
)
@@ -150,9 +151,9 @@ func TestSimpleBloomGenerator(t *testing.T) {
// Check all the input series are present in the output blocks.
expectedRefs := v1.PointerSlice(data)
- outputRefs := make([]*v1.SeriesWithBloom, 0, len(data))
+ outputRefs := make([]*v1.SeriesWithBlooms, 0, len(data))
for _, block := range outputBlocks {
- bq := v1.NewBlockQuerier(block, false, v1.DefaultMaxPageSize)
+ bq := v1.NewBlockQuerier(block, &mempool.SimpleHeapAllocator{}, v1.DefaultMaxPageSize).Iter()
for bq.Next() {
outputRefs = append(outputRefs, bq.At())
}
diff --git a/pkg/bloomcompactor/versioned_range.go b/pkg/bloomcompactor/versioned_range.go
index 03da12f1d7da5..8af56a0754cc3 100644
--- a/pkg/bloomcompactor/versioned_range.go
+++ b/pkg/bloomcompactor/versioned_range.go
@@ -214,13 +214,24 @@ func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta, err
// Sort metas descending by most recent source when checking
// for outdated metas (older metas are discarded if they don't change the range).
sort.Slice(metas, func(i, j int) bool {
- a, err := metas[i].MostRecentSource()
- if err != nil {
- panic(err.Error())
+ a, aExists := metas[i].MostRecentSource()
+ b, bExists := metas[j].MostRecentSource()
+
+ if !aExists && !bExists {
+ // stable sort two sourceless metas by their bounds (easier testing)
+ return metas[i].Bounds.Less(metas[j].Bounds)
}
- b, err := metas[j].MostRecentSource()
- if err != nil {
- panic(err.Error())
+
+ if !aExists {
+ // If a meta has no sources, it's out of date by definition.
+ // By convention we sort it to the beginning of the list and will mark it for removal later
+ return true
+ }
+
+ if !bExists {
+ // if a exists but b does not, mark b as lesser, sorting b to the
+ // front
+ return false
}
return !a.TS.Before(b.TS)
})
@@ -231,9 +242,11 @@ func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta, err
)
for _, meta := range metas {
- mostRecent, err := meta.MostRecentSource()
- if err != nil {
- return nil, err
+ mostRecent, exists := meta.MostRecentSource()
+ if !exists {
+ // if the meta exists but does not reference a TSDB, it's out of date
+ // TODO(owen-d): this shouldn't happen, figure out why
+ outdated = append(outdated, meta)
}
version := int(model.TimeFromUnixNano(mostRecent.TS.UnixNano()))
tokenRange, added = tokenRange.Add(version, meta.Bounds)
diff --git a/pkg/bloomcompactor/versioned_range_test.go b/pkg/bloomcompactor/versioned_range_test.go
index a85418bc6e1e5..67db348036ffa 100644
--- a/pkg/bloomcompactor/versioned_range_test.go
+++ b/pkg/bloomcompactor/versioned_range_test.go
@@ -313,6 +313,35 @@ func Test_OutdatedMetas(t *testing.T) {
gen(v1.NewBounds(0, 5), 0),
},
},
+ {
+ desc: "metas without sources are removed",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ gen(v1.NewBounds(6, 10), 0),
+ gen(v1.NewBounds(0, 10), 1),
+ gen(v1.NewBounds(11, 15)), // Meta without sources
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(11, 15)), // Meta without sources
+ gen(v1.NewBounds(6, 10), 0),
+ gen(v1.NewBounds(0, 5), 0),
+ },
+ },
+ {
+ desc: "metas without sources are interleaved",
+ metas: []bloomshipper.Meta{
+ gen(v1.NewBounds(0, 5), 0),
+ gen(v1.NewBounds(6, 10)), // Meta without sources
+ gen(v1.NewBounds(0, 10), 1),
+ gen(v1.NewBounds(11, 15)), // Meta without sources
+ gen(v1.NewBounds(16, 20), 2),
+ },
+ exp: []bloomshipper.Meta{
+ gen(v1.NewBounds(6, 10)), // Meta without sources
+ gen(v1.NewBounds(11, 15)), // Meta without sources
+ gen(v1.NewBounds(0, 5), 0),
+ },
+ },
} {
t.Run(tc.desc, func(t *testing.T) {
outdated, err := outdatedMetas(tc.metas)
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index 165e2d652473b..603d41c2c4371 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -1,43 +1,7 @@
/*
-Bloom Gateway package
-
The bloom gateway is a component that can be run as a standalone microserivce
target and provides capabilities for filtering ChunkRefs based on a given list
of line filter expressions.
-
- Querier Query Frontend
- | |
- ................................... service boundary
- | |
- +----+------+
- |
- indexgateway.Gateway
- |
- bloomgateway.BloomQuerier
- |
- bloomgateway.GatewayClient
- |
- logproto.BloomGatewayClient
- |
- ................................... service boundary
- |
- bloomgateway.Gateway
- |
- queue.RequestQueue
- |
- bloomgateway.Worker
- |
- bloomgateway.Processor
- |
- bloomshipper.Store
- |
- bloomshipper.Client
- |
- ObjectClient
- |
- ................................... service boundary
- |
- object storage
*/
package bloomgateway
@@ -63,13 +27,10 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/constants"
- util_log "github.com/grafana/loki/v3/pkg/util/log"
utillog "github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/util/spanlogger"
)
-var errGatewayUnhealthy = errors.New("bloom-gateway is unhealthy in the ring")
-
const (
metricsSubsystem = "bloom_gateway"
querierMetricsSubsystem = "bloom_gateway_querier"
@@ -89,7 +50,7 @@ type Gateway struct {
queue *queue.RequestQueue
activeUsers *util.ActiveUsersCleanupService
- bloomStore bloomshipper.StoreWithMetrics
+ bloomStore bloomshipper.Store
pendingTasks *atomic.Int64
@@ -111,7 +72,7 @@ func (l *fixedQueueLimits) MaxConsumers(_ string, _ int) int {
}
// New returns a new instance of the Bloom Gateway.
-func New(cfg Config, store bloomshipper.StoreWithMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
+func New(cfg Config, store bloomshipper.Store, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
utillog.WarnExperimentalUse("Bloom Gateway", logger)
g := &Gateway{
cfg: cfg,
@@ -209,7 +170,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
stats, ctx := ContextWithEmptyStats(ctx)
logger := spanlogger.FromContextWithFallback(
ctx,
- util_log.WithContext(ctx, g.logger),
+ utillog.WithContext(ctx, g.logger),
)
defer func() {
@@ -261,9 +222,6 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
}, nil
}
- // TODO(chaudum): I intentionally keep the logic for handling multiple tasks,
- // so that the PR does not explode in size. This should be cleaned up at some point.
-
seriesByDay := partitionRequest(req)
stats.NumTasks = len(seriesByDay)
@@ -279,14 +237,13 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
return nil, errors.New("request time range must span exactly one day")
}
- tasks := make([]Task, 0, len(seriesByDay))
- responses := make([][]v1.Output, 0, len(seriesByDay))
- for _, seriesForDay := range seriesByDay {
- task := newTask(ctx, tenantID, seriesForDay, filters, blocks)
- // TODO(owen-d): include capacity in constructor?
- task.responses = responsesPool.Get(len(seriesForDay.series))
- tasks = append(tasks, task)
- }
+ series := seriesByDay[0]
+ task := newTask(ctx, tenantID, series, filters, blocks)
+
+ // TODO(owen-d): include capacity in constructor?
+ task.responses = responsesPool.Get(len(series.series))
+ // free up the responses
+ defer responsesPool.Put(task.responses)
g.activeUsers.UpdateUserTimestamp(tenantID, time.Now())
@@ -297,62 +254,41 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
preFilterChunks += len(series.Refs)
}
- // Ideally we could use an unbuffered channel here, but since we return the
- // request on the first error, there can be cases where the request context
- // is not done yet and the consumeTask() function wants to send to the
- // tasksCh, but nobody reads from it any more.
- queueStart := time.Now()
- tasksCh := make(chan Task, len(tasks))
- for _, task := range tasks {
- task := task
- task.enqueueTime = time.Now()
-
- // TODO(owen-d): gracefully handle full queues
- if err := g.queue.Enqueue(tenantID, nil, task, func() {
- // When enqueuing, we also add the task to the pending tasks
- _ = g.pendingTasks.Inc()
- }); err != nil {
- stats.Status = labelFailure
- return nil, errors.Wrap(err, "failed to enqueue task")
- }
- // TODO(owen-d): use `concurrency` lib, bound parallelism
- go g.consumeTask(ctx, task, tasksCh)
- }
-
- sp.LogKV("msg", "enqueued tasks", "duration", time.Since(queueStart).String())
+ tasksCh := make(chan Task, 1)
- remaining := len(tasks)
+ // TODO(owen-d): gracefully handle full queues
+ task.enqueueTime = time.Now()
+ if err := g.queue.Enqueue(tenantID, nil, task, func() {
+ // When enqueuing, we also add the task to the pending tasks
+ _ = g.pendingTasks.Inc()
+ }); err != nil {
+ stats.Status = labelFailure
+ return nil, errors.Wrap(err, "failed to enqueue task")
+ }
+ // TODO(owen-d): use `concurrency` lib, bound parallelism
+ go g.consumeTask(ctx, task, tasksCh)
combinedRecorder := v1.NewBloomRecorder(ctx, "combined")
- for remaining > 0 {
- select {
- case <-ctx.Done():
- stats.Status = "cancel"
- return nil, errors.Wrap(ctx.Err(), "request failed")
- case task := <-tasksCh:
- if task.Err() != nil {
- stats.Status = labelFailure
- return nil, errors.Wrap(task.Err(), "request failed")
- }
- responses = append(responses, task.responses)
- combinedRecorder.Merge(task.recorder)
- remaining--
+
+ select {
+ case <-ctx.Done():
+ stats.Status = "cancel"
+ return nil, errors.Wrap(ctx.Err(), "request failed")
+ case task = <-tasksCh:
+ if task.Err() != nil {
+ stats.Status = labelFailure
+ return nil, errors.Wrap(task.Err(), "request failed")
}
+ combinedRecorder.Merge(task.recorder)
}
- combinedRecorder.Report(util_log.WithContext(ctx, g.logger), g.bloomStore.BloomMetrics())
- sp.LogKV("msg", "received all responses")
+ combinedRecorder.Report(utillog.WithContext(ctx, g.logger), g.bloomStore.BloomMetrics())
start := time.Now()
- filtered := filterChunkRefs(req, responses)
+ filtered := filterChunkRefs(req, task.responses)
duration := time.Since(start)
stats.AddPostProcessingTime(duration)
- // free up the responses
- for _, resp := range responses {
- responsesPool.Put(resp)
- }
-
var postFilterSeries, postFilterChunks int
postFilterSeries = len(filtered)
for _, group := range filtered {
@@ -404,35 +340,13 @@ func (g *Gateway) consumeTask(ctx context.Context, task Task, tasksCh chan<- Tas
}
}
-// merges a list of responses via a heap. The same fingerprints and chunks can be present in multiple responses.
-// Individual responses do not need to be be ordered beforehand.
-func orderedResponsesByFP(responses [][]v1.Output) v1.Iterator[v1.Output] {
- if len(responses) == 0 {
- return v1.NewEmptyIter[v1.Output]()
- }
- if len(responses) == 1 {
- sort.Slice(responses[0], func(i, j int) bool { return responses[0][i].Fp < responses[0][j].Fp })
- return v1.NewSliceIter(responses[0])
- }
-
- itrs := make([]v1.PeekingIterator[v1.Output], 0, len(responses))
- for _, r := range responses {
- sort.Slice(r, func(i, j int) bool { return r[i].Fp < r[j].Fp })
- itrs = append(itrs, v1.NewPeekingIter(v1.NewSliceIter(r)))
- }
- return v1.NewHeapIterator[v1.Output](
- func(o1, o2 v1.Output) bool { return o1.Fp < o2.Fp },
- itrs...,
- )
-}
-
// TODO(owen-d): improve perf. This can be faster with a more specialized impl
// NB(owen-d): `req` is mutated in place for performance, but `responses` is not
// Removals of the outputs must be sorted.
-func filterChunkRefs(
- req *logproto.FilterChunkRefRequest,
- responses [][]v1.Output,
-) []*logproto.GroupedChunkRefs {
+func filterChunkRefs(req *logproto.FilterChunkRefRequest, responses []v1.Output) []*logproto.GroupedChunkRefs {
+ // sort responses by fingerprint
+ sort.Slice(responses, func(i, j int) bool { return responses[i].Fp < responses[j].Fp })
+
res := make([]*logproto.GroupedChunkRefs, 0, len(req.Refs))
// dedupe outputs, merging the same series.
@@ -481,7 +395,7 @@ func filterChunkRefs(
res.Removals = chks
return res
},
- v1.NewPeekingIter(orderedResponsesByFP(responses)),
+ v1.NewPeekingIter(v1.NewSliceIter(responses)),
)
// Iterate through the requested and filtered series/chunks,
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index 15c9ca2be2d85..67bb59e460ad9 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -27,6 +27,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
bloomshipperconfig "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"
"github.com/grafana/loki/v3/pkg/storage/types"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -92,7 +93,7 @@ func setupBloomStore(t *testing.T) *bloomshipper.BloomStore {
reg := prometheus.NewRegistry()
blocksCache := bloomshipper.NewFsBlocksCache(storageCfg.BloomShipperConfig.BlocksCache, nil, logger)
- store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, cm, nil, blocksCache, reg, logger)
+ store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, cm, nil, blocksCache, &mempool.SimpleHeapAllocator{}, reg, logger)
require.NoError(t, err)
t.Cleanup(store.Stop)
@@ -140,8 +141,8 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
t.Run("request fails when providing invalid block", func(t *testing.T) {
now := mktime("2023-10-03 10:00")
- _, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
- mockStore := newMockBloomStore(queriers, metas)
+ refs, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
+ mockStore := newMockBloomStore(refs, queriers, metas)
reg := prometheus.NewRegistry()
gw, err := New(cfg, mockStore, logger, reg)
@@ -176,7 +177,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
now := mktime("2023-10-03 10:00")
refs, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
- mockStore := newMockBloomStore(queriers, metas)
+ mockStore := newMockBloomStore(refs, queriers, metas)
mockStore.err = errors.New("request failed")
reg := prometheus.NewRegistry()
@@ -220,7 +221,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
// replace store implementation and re-initialize workers and sub-services
refs, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
- mockStore := newMockBloomStore(queriers, metas)
+ mockStore := newMockBloomStore(refs, queriers, metas)
mockStore.delay = 2000 * time.Millisecond
reg := prometheus.NewRegistry()
@@ -263,7 +264,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
now := mktime("2023-10-03 10:00")
reg := prometheus.NewRegistry()
- gw, err := New(cfg, newMockBloomStore(nil, nil), logger, reg)
+ gw, err := New(cfg, newMockBloomStore(nil, nil, nil), logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -309,7 +310,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
now := mktime("2023-10-03 10:00")
reg := prometheus.NewRegistry()
- gw, err := New(cfg, newMockBloomStore(nil, nil), logger, reg)
+ gw, err := New(cfg, newMockBloomStore(nil, nil, nil), logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -325,7 +326,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
{
Fingerprint: uint64(1000 + 100*idx),
UserID: tenantID,
- From: now.Add(-24 * time.Hour),
+ From: now.Add(-4 * time.Hour),
Through: now,
Checksum: uint32(idx),
},
@@ -335,7 +336,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
TenantID: tenantID,
TableName: "table_1",
Bounds: v1.NewBounds(0, 10000),
- StartTimestamp: now.Add(-24 * time.Hour),
+ StartTimestamp: now.Add(-4 * time.Hour),
EndTimestamp: now,
Checksum: uint32(idx),
},
@@ -343,7 +344,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
expr, err := syntax.ParseExpr(`{foo="bar"} |= "foo"`)
require.NoError(t, err)
req := &logproto.FilterChunkRefRequest{
- From: now.Add(-24 * time.Hour),
+ From: now.Add(-4 * time.Hour),
Through: now,
Refs: groupRefs(t, chunkRefs),
Plan: plan.QueryPlan{AST: expr},
@@ -363,7 +364,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
refs, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
reg := prometheus.NewRegistry()
- store := newMockBloomStore(queriers, metas)
+ store := newMockBloomStore(refs, queriers, metas)
gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
@@ -517,18 +518,15 @@ func TestFilterChunkRefs(t *testing.T) {
fp uint64
checksums []uint32
}
- mkRemovals := func(xs [][]instruction) [][]v1.Output {
- out := make([][]v1.Output, len(xs))
+ mkRemovals := func(xs []instruction) []v1.Output {
+ out := make([]v1.Output, len(xs))
for i, x := range xs {
- out[i] = make([]v1.Output, len(x))
- for j, c := range x {
- out[i][j] = v1.Output{
- Fp: model.Fingerprint(c.fp),
- Removals: make(v1.ChunkRefs, len(c.checksums)),
- }
- for k, chk := range c.checksums {
- out[i][j].Removals[k] = v1.ChunkRef{Checksum: chk}
- }
+ out[i] = v1.Output{
+ Fp: model.Fingerprint(x.fp),
+ Removals: make(v1.ChunkRefs, len(x.checksums)),
+ }
+ for k, chk := range x.checksums {
+ out[i].Removals[k] = v1.ChunkRef{Checksum: chk}
}
}
return out
@@ -551,7 +549,7 @@ func TestFilterChunkRefs(t *testing.T) {
for _, tc := range []struct {
desc string
input *logproto.FilterChunkRefRequest
- removals [][]instruction
+ removals []instruction
expected *logproto.FilterChunkRefRequest
}{
{
@@ -562,22 +560,18 @@ func TestFilterChunkRefs(t *testing.T) {
{
desc: "remove all",
input: mkInput(2, 2),
- removals: [][]instruction{
- {
- {fp: 0, checksums: []uint32{0, 1}},
- {fp: 1, checksums: []uint32{0, 1}},
- },
+ removals: []instruction{
+ {fp: 0, checksums: []uint32{0, 1}},
+ {fp: 1, checksums: []uint32{0, 1}},
},
expected: mkInput(0, 0),
},
{
desc: "remove every other series",
input: mkInput(4, 2),
- removals: [][]instruction{
- {
- {fp: 0, checksums: []uint32{0, 1}},
- {fp: 2, checksums: []uint32{0, 1}},
- },
+ removals: []instruction{
+ {fp: 0, checksums: []uint32{0, 1}},
+ {fp: 2, checksums: []uint32{0, 1}},
},
expected: mkResult([]instruction{
{fp: 1, checksums: []uint32{0, 1}},
@@ -587,13 +581,11 @@ func TestFilterChunkRefs(t *testing.T) {
{
desc: "remove the last chunk for each series",
input: mkInput(4, 2),
- removals: [][]instruction{
- {
- {fp: 0, checksums: []uint32{1}},
- {fp: 1, checksums: []uint32{1}},
- {fp: 2, checksums: []uint32{1}},
- {fp: 3, checksums: []uint32{1}},
- },
+ removals: []instruction{
+ {fp: 0, checksums: []uint32{1}},
+ {fp: 1, checksums: []uint32{1}},
+ {fp: 2, checksums: []uint32{1}},
+ {fp: 3, checksums: []uint32{1}},
},
expected: mkResult([]instruction{
{fp: 0, checksums: []uint32{0}},
@@ -605,11 +597,9 @@ func TestFilterChunkRefs(t *testing.T) {
{
desc: "remove the middle chunk for every other series",
input: mkInput(4, 3),
- removals: [][]instruction{
- {
- {fp: 0, checksums: []uint32{1}},
- {fp: 2, checksums: []uint32{1}},
- },
+ removals: []instruction{
+ {fp: 0, checksums: []uint32{1}},
+ {fp: 2, checksums: []uint32{1}},
},
expected: mkResult([]instruction{
{fp: 0, checksums: []uint32{0, 2}},
@@ -621,10 +611,8 @@ func TestFilterChunkRefs(t *testing.T) {
{
desc: "remove the first chunk of the last series",
input: mkInput(4, 3),
- removals: [][]instruction{
- {
- {fp: 3, checksums: []uint32{0}},
- },
+ removals: []instruction{
+ {fp: 3, checksums: []uint32{0}},
},
expected: mkResult([]instruction{
{fp: 0, checksums: []uint32{0, 1, 2}},
@@ -636,13 +624,11 @@ func TestFilterChunkRefs(t *testing.T) {
{
desc: "duplicate removals",
input: mkInput(4, 3),
- removals: [][]instruction{
- {
- {fp: 0, checksums: []uint32{0, 1}},
- {fp: 0, checksums: []uint32{0, 1, 2}},
- {fp: 1, checksums: []uint32{0, 2}},
- {fp: 2, checksums: []uint32{1}},
- },
+ removals: []instruction{
+ {fp: 0, checksums: []uint32{0, 1}},
+ {fp: 0, checksums: []uint32{0, 1, 2}},
+ {fp: 1, checksums: []uint32{0, 2}},
+ {fp: 2, checksums: []uint32{1}},
},
expected: mkResult([]instruction{
{fp: 1, checksums: []uint32{1}},
@@ -650,45 +636,19 @@ func TestFilterChunkRefs(t *testing.T) {
{fp: 3, checksums: []uint32{0, 1, 2}},
}),
},
- {
- desc: "middle duplicates across 2 days",
- input: mkInput(4, 3),
- removals: [][]instruction{
- {
- {fp: 0, checksums: []uint32{1}},
- {fp: 2, checksums: []uint32{1}},
- },
- {
- {fp: 0, checksums: []uint32{1}},
- {fp: 2, checksums: []uint32{1}},
- },
- },
- expected: mkResult([]instruction{
- {fp: 0, checksums: []uint32{0, 2}},
- {fp: 1, checksums: []uint32{0, 1, 2}},
- {fp: 2, checksums: []uint32{0, 2}},
- {fp: 3, checksums: []uint32{0, 1, 2}},
- }),
- },
{
desc: "unordered fingerprints",
input: mkInput(4, 3),
- removals: [][]instruction{
- {
- {fp: 3, checksums: []uint32{2}},
- {fp: 0, checksums: []uint32{1, 2}},
- {fp: 2, checksums: []uint32{1, 2}},
- },
- {
- {fp: 1, checksums: []uint32{1}},
- {fp: 2, checksums: []uint32{0, 1}},
- {fp: 3, checksums: []uint32{0}},
- },
+ removals: []instruction{
+ {fp: 3, checksums: []uint32{2}},
+ {fp: 0, checksums: []uint32{1, 2}},
+ {fp: 2, checksums: []uint32{1, 2}},
},
expected: mkResult([]instruction{
{fp: 0, checksums: []uint32{0}},
- {fp: 1, checksums: []uint32{0, 2}},
- {fp: 3, checksums: []uint32{1}},
+ {fp: 1, checksums: []uint32{0, 1, 2}},
+ {fp: 2, checksums: []uint32{0}},
+ {fp: 3, checksums: []uint32{0, 1}},
}),
},
} {
@@ -752,7 +712,7 @@ func BenchmarkFilterChunkRefs(b *testing.B) {
{
desc: "filterChunkRefs",
f: func(req *logproto.FilterChunkRefRequest, responses []v1.Output) {
- filterChunkRefs(req, [][]v1.Output{responses})
+ filterChunkRefs(req, responses)
},
},
} {
diff --git a/pkg/bloomgateway/client.go b/pkg/bloomgateway/client.go
index 8a01514cdf2f1..1eaa1bb328834 100644
--- a/pkg/bloomgateway/client.go
+++ b/pkg/bloomgateway/client.go
@@ -4,7 +4,6 @@ import (
"context"
"flag"
"io"
- "math"
"sort"
"github.com/go-kit/log"
@@ -208,7 +207,6 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, _ string, interval blo
return nil, nil
}
- firstFp, lastFp := uint64(math.MaxUint64), uint64(0)
pos := make(map[string]int)
servers := make([]addrWithGroups, 0, len(blocks))
for _, blockWithSeries := range blocks {
@@ -217,15 +215,6 @@ func (c *GatewayClient) FilterChunks(ctx context.Context, _ string, interval blo
return nil, errors.Wrapf(err, "server address for block: %s", blockWithSeries.block)
}
- // min/max fingerprint needed for the cache locality score
- first, last := getFirstLast(blockWithSeries.series)
- if first.Fingerprint < firstFp {
- firstFp = first.Fingerprint
- }
- if last.Fingerprint > lastFp {
- lastFp = last.Fingerprint
- }
-
if idx, found := pos[addr]; found {
servers[idx].groups = append(servers[idx].groups, blockWithSeries.series...)
servers[idx].blocks = append(servers[idx].blocks, blockWithSeries.block.String())
diff --git a/pkg/bloomgateway/client_test.go b/pkg/bloomgateway/client_test.go
index d46d881078dae..8a22edfcb9789 100644
--- a/pkg/bloomgateway/client_test.go
+++ b/pkg/bloomgateway/client_test.go
@@ -46,17 +46,17 @@ func shortRef(f, t model.Time, c uint32) *logproto.ShortRef {
func TestGatewayClient_MergeSeries(t *testing.T) {
inputs := [][]*logproto.GroupedChunkRefs{
- // response 1
+ // response 1 -- sorted
{
{Fingerprint: 0x00, Refs: []*logproto.ShortRef{shortRef(0, 1, 1), shortRef(1, 2, 2)}}, // not overlapping
{Fingerprint: 0x01, Refs: []*logproto.ShortRef{shortRef(0, 1, 3), shortRef(1, 2, 4)}}, // fully overlapping chunks
{Fingerprint: 0x02, Refs: []*logproto.ShortRef{shortRef(0, 1, 5), shortRef(1, 2, 6)}}, // partially overlapping chunks
},
- // response 2
+ // response 2 -- not sorted
{
+ {Fingerprint: 0x03, Refs: []*logproto.ShortRef{shortRef(0, 1, 8), shortRef(1, 2, 9)}}, // not overlapping
{Fingerprint: 0x01, Refs: []*logproto.ShortRef{shortRef(0, 1, 3), shortRef(1, 2, 4)}}, // fully overlapping chunks
{Fingerprint: 0x02, Refs: []*logproto.ShortRef{shortRef(1, 2, 6), shortRef(2, 3, 7)}}, // partially overlapping chunks
- {Fingerprint: 0x03, Refs: []*logproto.ShortRef{shortRef(0, 1, 8), shortRef(1, 2, 9)}}, // not overlapping
},
}
diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go
index 08afeffcbf70c..3520d7b18057e 100644
--- a/pkg/bloomgateway/multiplexing.go
+++ b/pkg/bloomgateway/multiplexing.go
@@ -127,7 +127,7 @@ func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task {
interval: t.interval,
table: t.table,
ctx: t.ctx,
- done: make(chan struct{}),
+ done: t.done,
}
}
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 6973ad1f565b7..b0d4f57ca5c15 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -2,7 +2,6 @@ package bloomgateway
import (
"context"
- "math"
"time"
"github.com/go-kit/log"
@@ -10,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/grafana/dskit/concurrency"
+ "github.com/grafana/dskit/multierror"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/config"
@@ -34,21 +34,12 @@ type processor struct {
metrics *workerMetrics
}
-func (p *processor) run(ctx context.Context, tasks []Task) error {
- return p.runWithBounds(ctx, tasks, v1.MultiFingerprintBounds{{Min: 0, Max: math.MaxUint64}})
-}
-
-func (p *processor) runWithBounds(ctx context.Context, tasks []Task, bounds v1.MultiFingerprintBounds) error {
+func (p *processor) processTasks(ctx context.Context, tasks []Task) error {
tenant := tasks[0].tenant
- level.Info(p.logger).Log(
- "msg", "process tasks with bounds",
- "tenant", tenant,
- "tasks", len(tasks),
- "bounds", len(bounds),
- )
+ level.Info(p.logger).Log("msg", "process tasks", "tenant", tenant, "tasks", len(tasks))
for ts, tasks := range group(tasks, func(t Task) config.DayTime { return t.table }) {
- err := p.processTasks(ctx, tenant, ts, bounds, tasks)
+ err := p.processTasksForDay(ctx, tenant, ts, tasks)
if err != nil {
for _, task := range tasks {
task.CloseWithError(err)
@@ -62,7 +53,7 @@ func (p *processor) runWithBounds(ctx context.Context, tasks []Task, bounds v1.M
return nil
}
-func (p *processor) processTasks(ctx context.Context, tenant string, day config.DayTime, _ v1.MultiFingerprintBounds, tasks []Task) error {
+func (p *processor) processTasksForDay(ctx context.Context, tenant string, day config.DayTime, tasks []Task) error {
level.Info(p.logger).Log("msg", "process tasks for day", "tenant", tenant, "tasks", len(tasks), "day", day.String())
var duration time.Duration
@@ -71,10 +62,10 @@ func (p *processor) processTasks(ctx context.Context, tenant string, day config.
blocksRefs = append(blocksRefs, task.blocks...)
}
- data := partitionTasks(tasks, blocksRefs)
+ tasksByBlock := partitionTasksByBlock(tasks, blocksRefs)
- refs := make([]bloomshipper.BlockRef, 0, len(data))
- for _, block := range data {
+ refs := make([]bloomshipper.BlockRef, 0, len(tasksByBlock))
+ for _, block := range tasksByBlock {
refs = append(refs, block.ref)
}
@@ -88,7 +79,7 @@ func (p *processor) processTasks(ctx context.Context, tenant string, day config.
// after iteration for performance (alloc reduction).
// This is safe to do here because we do not capture
// the underlying bloom []byte outside of iteration
- bloomshipper.WithPool(true),
+ bloomshipper.WithPool(p.store.Allocator()),
)
duration = time.Since(startBlocks)
level.Debug(p.logger).Log("msg", "fetched blocks", "count", len(refs), "duration", duration, "err", err)
@@ -102,7 +93,7 @@ func (p *processor) processTasks(ctx context.Context, tenant string, day config.
}
startProcess := time.Now()
- res := p.processBlocks(ctx, bqs, data)
+ res := p.processBlocks(ctx, bqs, tasksByBlock)
duration = time.Since(startProcess)
for _, t := range tasks {
@@ -113,13 +104,14 @@ func (p *processor) processTasks(ctx context.Context, tenant string, day config.
}
func (p *processor) processBlocks(ctx context.Context, bqs []*bloomshipper.CloseableBlockQuerier, data []blockWithTasks) error {
-
+ // We opportunistically close blocks during iteration to allow returning memory to the pool, etc,
+ // as soon as possible, but since we exit early on error, we need to ensure we close all blocks.
+ hasClosed := make([]bool, len(bqs))
defer func() {
- for i := range bqs {
- if bqs[i] == nil {
- continue
+ for i, bq := range bqs {
+ if bq != nil && !hasClosed[i] {
+ _ = bq.Close()
}
- bqs[i].Close()
}
}()
@@ -136,15 +128,21 @@ func (p *processor) processBlocks(ctx context.Context, bqs []*bloomshipper.Close
return errors.Errorf("block and querier bounds differ: %s vs %s", block.ref.Bounds, bq.Bounds)
}
- err := p.processBlock(ctx, bq.BlockQuerier, block.tasks)
- if err != nil {
- return errors.Wrap(err, "processing block")
- }
- return nil
+ var errs multierror.MultiError
+ errs.Add(
+ errors.Wrap(
+ p.processBlock(ctx, bq, block.tasks),
+ "processing block",
+ ),
+ )
+ errs.Add(bq.Close())
+ hasClosed[i] = true
+ return errs.Err()
})
}
-func (p *processor) processBlock(_ context.Context, blockQuerier *v1.BlockQuerier, tasks []Task) error {
+func (p *processor) processBlock(_ context.Context, bq *bloomshipper.CloseableBlockQuerier, tasks []Task) (err error) {
+ blockQuerier := bq.BlockQuerier
schema, err := blockQuerier.Schema()
if err != nil {
return err
diff --git a/pkg/bloomgateway/processor_test.go b/pkg/bloomgateway/processor_test.go
index f9dc847f588bd..8ce78e7bdb76c 100644
--- a/pkg/bloomgateway/processor_test.go
+++ b/pkg/bloomgateway/processor_test.go
@@ -20,20 +20,30 @@ import (
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/v3/pkg/util/constants"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
)
-var _ bloomshipper.Store = &dummyStore{}
+var _ bloomshipper.StoreBase = &dummyStore{}
-func newMockBloomStore(bqs []*bloomshipper.CloseableBlockQuerier, metas []bloomshipper.Meta) *dummyStore {
+// refs and blocks must be in 1-1 correspondence.
+func newMockBloomStore(refs []bloomshipper.BlockRef, blocks []*v1.Block, metas []bloomshipper.Meta) *dummyStore {
+ allocator := mempool.New("bloompages", mempool.Buckets{
+ {Size: 32, Capacity: 512 << 10},
+ }, nil)
return &dummyStore{
- querieres: bqs,
+ refs: refs,
+ blocks: blocks,
metas: metas,
+ allocator: allocator,
}
}
type dummyStore struct {
- metas []bloomshipper.Meta
- querieres []*bloomshipper.CloseableBlockQuerier
+ metas []bloomshipper.Meta
+ refs []bloomshipper.BlockRef
+ blocks []*v1.Block
+
+ allocator mempool.Allocator
// mock how long it takes to serve block queriers
delay time.Duration
@@ -73,11 +83,15 @@ func (s *dummyStore) Client(_ model.Time) (bloomshipper.Client, error) {
return nil, nil
}
+func (s *dummyStore) Allocator() mempool.Allocator {
+ return s.allocator
+}
+
func (s *dummyStore) Stop() {
}
func (s *dummyStore) FetchBlocks(_ context.Context, refs []bloomshipper.BlockRef, _ ...bloomshipper.FetchOption) ([]*bloomshipper.CloseableBlockQuerier, error) {
- result := make([]*bloomshipper.CloseableBlockQuerier, 0, len(s.querieres))
+ result := make([]*bloomshipper.CloseableBlockQuerier, 0, len(s.blocks))
if s.err != nil {
time.Sleep(s.delay)
@@ -85,8 +99,13 @@ func (s *dummyStore) FetchBlocks(_ context.Context, refs []bloomshipper.BlockRef
}
for _, ref := range refs {
- for _, bq := range s.querieres {
- if ref.Bounds.Equal(bq.Bounds) {
+ for i, block := range s.blocks {
+ if ref.Bounds.Equal(s.refs[i].Bounds) {
+ blockCopy := *block
+ bq := &bloomshipper.CloseableBlockQuerier{
+ BlockQuerier: v1.NewBlockQuerier(&blockCopy, s.Allocator(), v1.DefaultMaxPageSize),
+ BlockRef: s.refs[i],
+ }
result = append(result, bq)
}
}
@@ -107,9 +126,9 @@ func TestProcessor(t *testing.T) {
metrics := newWorkerMetrics(prometheus.NewPedanticRegistry(), constants.Loki, "bloom_gatway")
t.Run("success case - without blocks", func(t *testing.T) {
- _, metas, queriers, data := createBlocks(t, tenant, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
+ refs, metas, queriers, data := createBlocks(t, tenant, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
- mockStore := newMockBloomStore(queriers, metas)
+ mockStore := newMockBloomStore(refs, queriers, metas)
p := newProcessor("worker", 1, mockStore, log.NewNopLogger(), metrics)
chunkRefs := createQueryInputFromBlockData(t, tenant, data, 10)
@@ -147,21 +166,21 @@ func TestProcessor(t *testing.T) {
}(tasks[i])
}
- err := p.run(ctx, tasks)
+ err := p.processTasks(ctx, tasks)
wg.Wait()
require.NoError(t, err)
require.Equal(t, int64(0), results.Load())
})
t.Run("success case - with blocks", func(t *testing.T) {
- _, metas, queriers, data := createBlocks(t, tenant, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
+ refs, metas, queriers, data := createBlocks(t, tenant, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
blocks := make([]bloomshipper.BlockRef, 0, len(metas))
for _, meta := range metas {
// we can safely append all block refs from the meta, because it only contains a single one
blocks = append(blocks, meta.Blocks...)
}
- mockStore := newMockBloomStore(queriers, metas)
+ mockStore := newMockBloomStore(refs, queriers, metas)
p := newProcessor("worker", 1, mockStore, log.NewNopLogger(), metrics)
chunkRefs := createQueryInputFromBlockData(t, tenant, data, 10)
@@ -199,16 +218,16 @@ func TestProcessor(t *testing.T) {
}(tasks[i])
}
- err := p.run(ctx, tasks)
+ err := p.processTasks(ctx, tasks)
wg.Wait()
require.NoError(t, err)
require.Equal(t, int64(len(swb.series)), results.Load())
})
t.Run("failure case", func(t *testing.T) {
- _, metas, queriers, data := createBlocks(t, tenant, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
+ refs, metas, queriers, data := createBlocks(t, tenant, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
- mockStore := newMockBloomStore(queriers, metas)
+ mockStore := newMockBloomStore(refs, queriers, metas)
mockStore.err = errors.New("store failed")
p := newProcessor("worker", 1, mockStore, log.NewNopLogger(), metrics)
@@ -248,7 +267,7 @@ func TestProcessor(t *testing.T) {
}(tasks[i])
}
- err := p.run(ctx, tasks)
+ err := p.processTasks(ctx, tasks)
wg.Wait()
require.Errorf(t, err, "store failed")
require.Equal(t, int64(0), results.Load())
diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go
index c92d6fad30f73..23de7a15e2be7 100644
--- a/pkg/bloomgateway/querier.go
+++ b/pkg/bloomgateway/querier.go
@@ -23,6 +23,7 @@ import (
type querierMetrics struct {
chunksTotal prometheus.Counter
chunksFiltered prometheus.Counter
+ chunksSkipped prometheus.Counter
seriesTotal prometheus.Counter
seriesFiltered prometheus.Counter
seriesSkipped prometheus.Counter
@@ -42,6 +43,12 @@ func newQuerierMetrics(registerer prometheus.Registerer, namespace, subsystem st
Name: "chunks_filtered_total",
Help: "Total amount of chunks that have been filtered out. Does not count chunks in failed requests.",
}),
+ chunksSkipped: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "chunks_skipped_total",
+ Help: "Total amount of chunks that have been skipped and returned unfiltered, because no block matched the series.",
+ }),
seriesTotal: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
@@ -137,6 +144,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
}
}
+ var skippedGrps [][]*logproto.GroupedChunkRefs
responses := make([][]*logproto.GroupedChunkRefs, 0, 2)
// We can perform requests sequentially, because most of the time the request
// only covers a single day, and if not, it's at most two days.
@@ -152,9 +160,19 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
return nil, err
}
- // add chunk refs from series that were not mapped to any blocks
+ skippedGrps = append(skippedGrps, skipped)
responses = append(responses, refs, skipped)
- bq.metrics.seriesSkipped.Add(float64(len(skipped)))
+ }
+
+ // add chunk refs from series that were not mapped to any blocks
+ skippedDeduped, err := mergeSeries(skippedGrps, nil)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to dedupe skipped series")
+ }
+
+ var chunksSkipped int
+ for _, skippedSeries := range skippedDeduped {
+ chunksSkipped += len(skippedSeries.Refs)
}
deduped, err := mergeSeries(responses, nil)
@@ -185,15 +203,19 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
"responses", len(responses),
"preFilterChunks", preFilterChunks,
"postFilterChunks", postFilterChunks,
+ "skippedChunks", chunksSkipped,
"filteredChunks", preFilterChunks-postFilterChunks,
"preFilterSeries", preFilterSeries,
"postFilterSeries", postFilterSeries,
+ "skippedSeries", len(skippedDeduped),
"filteredSeries", preFilterSeries-postFilterSeries,
)
bq.metrics.chunksTotal.Add(float64(preFilterChunks))
+ bq.metrics.chunksSkipped.Add(float64(chunksSkipped))
bq.metrics.chunksFiltered.Add(float64(preFilterChunks - postFilterChunks))
bq.metrics.seriesTotal.Add(float64(preFilterSeries))
+ bq.metrics.seriesSkipped.Add(float64(len(skippedDeduped)))
bq.metrics.seriesFiltered.Add(float64(preFilterSeries - postFilterSeries))
return result, nil
diff --git a/pkg/bloomgateway/resolver.go b/pkg/bloomgateway/resolver.go
index 62ec5836cc136..0f6fe27626958 100644
--- a/pkg/bloomgateway/resolver.go
+++ b/pkg/bloomgateway/resolver.go
@@ -24,7 +24,7 @@ type blockWithSeries struct {
}
type defaultBlockResolver struct {
- store bloomshipper.Store
+ store bloomshipper.StoreBase
logger log.Logger
}
@@ -123,7 +123,7 @@ func unassignedSeries(mapped []blockWithSeries, series []*logproto.GroupedChunkR
return skipped
}
-func NewBlockResolver(store bloomshipper.Store, logger log.Logger) BlockResolver {
+func NewBlockResolver(store bloomshipper.StoreBase, logger log.Logger) BlockResolver {
return &defaultBlockResolver{
store: store,
logger: logger,
diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go
index 9617202b948c3..bb130019d4381 100644
--- a/pkg/bloomgateway/util.go
+++ b/pkg/bloomgateway/util.go
@@ -16,6 +16,24 @@ func truncateDay(ts model.Time) model.Time {
return model.TimeFromUnix(ts.Time().Truncate(Day).Unix())
}
+// daysForRange returns a list of model.Time truncated to the start of each day
+// for the inclusive range [from, through]
+func daysForRange(from, through model.Time) []model.Time {
+ fromDay, throughDay := truncateDay(from), truncateDay(through)
+
+ // Trim the last day if it's the same as the through time,
+ // but preserve at least 1 day
+ if throughDay.Equal(through) && !fromDay.Equal(throughDay) {
+ throughDay = throughDay.Add(-Day)
+ }
+
+ days := make([]model.Time, 0, int(throughDay.Sub(fromDay)/Day)+1)
+ for day := fromDay; !day.After(throughDay); day = day.Add(Day) {
+ days = append(days, day)
+ }
+ return days
+}
+
// getFromThrough assumes a list of ShortRefs sorted by From time
func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) {
if len(refs) == 0 {
@@ -27,23 +45,17 @@ func getFromThrough(refs []*logproto.ShortRef) (model.Time, model.Time) {
}
maxItem := slices.MaxFunc(refs, func(a, b *logproto.ShortRef) int {
- if a.Through > b.Through {
- return 1
- } else if a.Through < b.Through {
- return -1
- }
- return 0
+ return int(a.Through) - int(b.Through)
})
return refs[0].From, maxItem.Through
}
// convertToChunkRefs converts a []*logproto.ShortRef into v1.ChunkRefs
-// TODO(chaudum): Avoid conversion by transferring v1.ChunkRefs in gRPC request.
func convertToChunkRefs(refs []*logproto.ShortRef) v1.ChunkRefs {
result := make(v1.ChunkRefs, 0, len(refs))
- for _, ref := range refs {
- result = append(result, v1.ChunkRef{From: ref.From, Through: ref.Through, Checksum: ref.Checksum})
+ for i := range refs {
+ result = append(result, v1.ChunkRef(*refs[i]))
}
return result
}
@@ -53,7 +65,7 @@ type blockWithTasks struct {
tasks []Task
}
-func partitionTasks(tasks []Task, blocks []bloomshipper.BlockRef) []blockWithTasks {
+func partitionTasksByBlock(tasks []Task, blocks []bloomshipper.BlockRef) []blockWithTasks {
result := make([]blockWithTasks, 0, len(blocks))
for _, block := range blocks {
@@ -100,40 +112,24 @@ func partitionRequest(req *logproto.FilterChunkRefRequest) []seriesWithInterval
func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto.GroupedChunkRefs) []seriesWithInterval {
result := make([]seriesWithInterval, 0)
- fromDay, throughDay := truncateDay(from), truncateDay(through)
-
- for day := fromDay; day.Equal(throughDay) || day.Before(throughDay); day = day.Add(Day) {
+ for _, day := range daysForRange(from, through) {
minTs, maxTs := model.Latest, model.Earliest
- nextDay := day.Add(Day)
res := make([]*logproto.GroupedChunkRefs, 0, len(seriesWithChunks))
for _, series := range seriesWithChunks {
chunks := series.Refs
- min := sort.Search(len(chunks), func(i int) bool {
- return chunks[i].From >= day
- })
-
- max := sort.Search(len(chunks), func(i int) bool {
- return chunks[i].From >= nextDay
- })
+ var relevantChunks []*logproto.ShortRef
+ minTs, maxTs, relevantChunks = overlappingChunks(day, day.Add(Day), minTs, maxTs, chunks)
- // All chunks fall outside of the range
- if min == len(chunks) || max == 0 || min == max {
+ if len(relevantChunks) == 0 {
continue
}
- if chunks[min].From < minTs {
- minTs = chunks[min].From
- }
- if chunks[max-1].Through > maxTs {
- maxTs = chunks[max-1].Through
- }
-
res = append(res, &logproto.GroupedChunkRefs{
Fingerprint: series.Fingerprint,
Tenant: series.Tenant,
- Refs: chunks[min:max],
+ Refs: relevantChunks,
})
}
@@ -152,3 +148,28 @@ func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto
return result
}
+
+func overlappingChunks(from, through, minTs, maxTs model.Time, chunks []*logproto.ShortRef) (model.Time, model.Time, []*logproto.ShortRef) {
+
+ // chunks are ordered first by `From`. Can disregard all chunks
+ // that start later than the search range ends
+ maxIdx := sort.Search(len(chunks), func(i int) bool {
+ return chunks[i].From > through
+ })
+
+ res := make([]*logproto.ShortRef, 0, len(chunks[:maxIdx]))
+
+ for _, chunk := range chunks[:maxIdx] {
+ // if chunk ends before the search range starts, skip
+ if from.After(chunk.Through) {
+ continue
+ }
+
+ // Bound min & max ranges to the search range
+ minTs = max(min(minTs, chunk.From), from)
+ maxTs = min(max(maxTs, chunk.Through), through)
+ res = append(res, chunk)
+ }
+
+ return minTs, maxTs, res
+}
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index a3f219c326efd..f2be3b0665dd4 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -65,6 +65,69 @@ func TestTruncateDay(t *testing.T) {
}
}
+func TestDaysForRange(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ pairs [2]string
+ exp []string
+ }{
+ {
+ desc: "single day range",
+ pairs: [2]string{"2024-01-24 00:00", "2024-01-24 23:59"},
+ exp: []string{"2024-01-24 00:00"},
+ },
+ {
+ desc: "two consecutive days",
+ pairs: [2]string{"2024-01-24 00:00", "2024-01-25 23:59"},
+ exp: []string{"2024-01-24 00:00", "2024-01-25 00:00"},
+ },
+ {
+ desc: "multiple days range",
+ pairs: [2]string{"2024-01-24 00:00", "2024-01-27 23:59"},
+ exp: []string{"2024-01-24 00:00", "2024-01-25 00:00", "2024-01-26 00:00", "2024-01-27 00:00"},
+ },
+ {
+ desc: "end of month to beginning of next",
+ pairs: [2]string{"2024-01-31 00:00", "2024-02-01 23:59"},
+ exp: []string{"2024-01-31 00:00", "2024-02-01 00:00"},
+ },
+ {
+ desc: "leap year day range",
+ pairs: [2]string{"2024-02-28 00:00", "2024-02-29 23:59"},
+ exp: []string{"2024-02-28 00:00", "2024-02-29 00:00"},
+ },
+ {
+ desc: "two consecutive days not nicely aligned",
+ pairs: [2]string{"2024-01-24 04:00", "2024-01-25 10:00"},
+ exp: []string{"2024-01-24 00:00", "2024-01-25 00:00"},
+ },
+ {
+ desc: "two consecutive days end zeroed trimmed",
+ pairs: [2]string{"2024-01-24 00:00", "2024-01-25 00:00"},
+ exp: []string{"2024-01-24 00:00"},
+ },
+ {
+ desc: "preserve one day",
+ pairs: [2]string{"2024-01-24 00:00", "2024-01-24 00:00"},
+ exp: []string{"2024-01-24 00:00"},
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ from := mktime(tc.pairs[0])
+ through := mktime(tc.pairs[1])
+ result := daysForRange(from, through)
+ var collected []string
+ for _, d := range result {
+ parsed := d.Time().UTC().Format("2006-01-02 15:04")
+ collected = append(collected, parsed)
+ }
+
+ require.Equal(t, tc.exp, collected)
+ })
+ }
+
+}
+
func mkBlockRef(minFp, maxFp uint64) bloomshipper.BlockRef {
return bloomshipper.BlockRef{
Ref: bloomshipper.Ref{
@@ -73,7 +136,7 @@ func mkBlockRef(minFp, maxFp uint64) bloomshipper.BlockRef {
}
}
-func TestPartitionTasks(t *testing.T) {
+func TestPartitionTasksByBlock(t *testing.T) {
t.Run("consecutive block ranges", func(t *testing.T) {
bounds := []bloomshipper.BlockRef{
@@ -93,7 +156,7 @@ func TestPartitionTasks(t *testing.T) {
tasks[i%nTasks].series = append(tasks[i%nTasks].series, &logproto.GroupedChunkRefs{Fingerprint: uint64(i)})
}
- results := partitionTasks(tasks, bounds)
+ results := partitionTasksByBlock(tasks, bounds)
require.Equal(t, 3, len(results)) // ensure we only return bounds in range
actualFingerprints := make([]*logproto.GroupedChunkRefs, 0, nSeries)
@@ -128,7 +191,7 @@ func TestPartitionTasks(t *testing.T) {
task.series = append(task.series, &logproto.GroupedChunkRefs{Fingerprint: uint64(i)})
}
- results := partitionTasks([]Task{task}, bounds)
+ results := partitionTasksByBlock([]Task{task}, bounds)
require.Equal(t, 3, len(results)) // ensure we only return bounds in range
for _, res := range results {
// ensure we have the right number of tasks per bound
@@ -153,9 +216,38 @@ func TestPartitionTasks(t *testing.T) {
},
}
- results := partitionTasks(tasks, bounds)
+ results := partitionTasksByBlock(tasks, bounds)
require.Len(t, results, 0)
})
+
+ t.Run("overlapping and unsorted block ranges", func(t *testing.T) {
+ bounds := []bloomshipper.BlockRef{
+ mkBlockRef(5, 14),
+ mkBlockRef(0, 9),
+ mkBlockRef(10, 19),
+ }
+
+ tasks := []Task{
+ {
+ series: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 6},
+ },
+ },
+ {
+ series: []*logproto.GroupedChunkRefs{
+ {Fingerprint: 12},
+ },
+ },
+ }
+
+ expected := []blockWithTasks{
+ {ref: bounds[0], tasks: tasks}, // both tasks
+ {ref: bounds[1], tasks: tasks[:1]}, // first task
+ {ref: bounds[2], tasks: tasks[1:]}, // second task
+ }
+ results := partitionTasksByBlock(tasks, bounds)
+ require.Equal(t, expected, results)
+ })
}
func TestPartitionRequest(t *testing.T) {
@@ -201,7 +293,7 @@ func TestPartitionRequest(t *testing.T) {
{
Fingerprint: 0x00,
Refs: []*logproto.ShortRef{
- {From: ts.Add(-13 * time.Hour), Through: ts.Add(-12 * time.Hour)},
+ {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)},
{From: ts.Add(13 * time.Hour), Through: ts.Add(14 * time.Hour)},
},
},
@@ -306,35 +398,69 @@ func TestPartitionRequest(t *testing.T) {
{
Fingerprint: 0x00,
Refs: []*logproto.ShortRef{
- {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)},
- {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)},
- {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)},
+ {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)}, // previous day
+ {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, // previous & target day
+ {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)}, // target day
},
},
},
},
exp: []seriesWithInterval{
+ // previous day
{
- interval: bloomshipper.Interval{Start: ts.Add(-14 * time.Hour), End: ts.Add(-11 * time.Hour)},
+ interval: bloomshipper.Interval{Start: ts.Add(-14 * time.Hour), End: ts.Add(-12 * time.Hour)},
day: config.NewDayTime(mktime("2024-01-23 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
Refs: []*logproto.ShortRef{
- {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)},
- {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)},
+ {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)}, // previous day
+ {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, // previous & target day
+ },
+ },
+ },
+ },
+ // target day
+ {
+ interval: bloomshipper.Interval{Start: ts.Add(-12 * time.Hour), End: ts.Add(-10 * time.Hour)},
+ day: config.NewDayTime(mktime("2024-01-24 00:00")),
+ series: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 0x00,
+ Refs: []*logproto.ShortRef{
+ {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, // previous & target day
+ {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)}, // target day
},
},
},
},
+ },
+ },
+
+ "through target day inclusion": {
+ inp: &logproto.FilterChunkRefRequest{
+ // Only search for the target day, but ensure chunks whose through (but not from)
+ // is on the target day are included
+ From: ts.Add(-1 * time.Hour),
+ Through: ts,
+ Refs: []*logproto.GroupedChunkRefs{
+ {
+ Fingerprint: 0x00,
+ Refs: []*logproto.ShortRef{
+ {From: ts.Add(-13 * time.Hour), Through: ts.Add(-1 * time.Hour)}, // previous & target day
+ },
+ },
+ },
+ },
+ exp: []seriesWithInterval{
{
- interval: bloomshipper.Interval{Start: ts.Add(-11 * time.Hour), End: ts.Add(-10 * time.Hour)},
+ interval: bloomshipper.Interval{Start: ts.Add(-12 * time.Hour), End: ts.Add(-1 * time.Hour)},
day: config.NewDayTime(mktime("2024-01-24 00:00")),
series: []*logproto.GroupedChunkRefs{
{
Fingerprint: 0x00,
Refs: []*logproto.ShortRef{
- {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)},
+ {From: ts.Add(-13 * time.Hour), Through: ts.Add(-1 * time.Hour)}, // inherited from the chunk
},
},
},
@@ -358,13 +484,13 @@ func TestPartitionRequest(t *testing.T) {
}
}
-func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockRef, []bloomshipper.Meta, []*bloomshipper.CloseableBlockQuerier, [][]v1.SeriesWithBloom) {
+func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockRef, []bloomshipper.Meta, []*v1.Block, [][]v1.SeriesWithBlooms) {
t.Helper()
blockRefs := make([]bloomshipper.BlockRef, 0, n)
metas := make([]bloomshipper.Meta, 0, n)
- queriers := make([]*bloomshipper.CloseableBlockQuerier, 0, n)
- series := make([][]v1.SeriesWithBloom, 0, n)
+ blocks := make([]*v1.Block, 0, n)
+ series := make([][]v1.SeriesWithBlooms, 0, n)
step := (maxFp - minFp) / model.Fingerprint(n)
for i := 0; i < n; i++ {
@@ -398,19 +524,16 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
// t.Log(i, j, string(keys[i][j]))
// }
// }
- querier := &bloomshipper.CloseableBlockQuerier{
- BlockQuerier: v1.NewBlockQuerier(block, false, v1.DefaultMaxPageSize),
- BlockRef: blockRef,
- }
- queriers = append(queriers, querier)
+
+ blocks = append(blocks, block)
metas = append(metas, meta)
blockRefs = append(blockRefs, blockRef)
series = append(series, data)
}
- return blockRefs, metas, queriers, series
+ return blockRefs, metas, blocks, series
}
-func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.SeriesWithBloom, nthSeries int) []*logproto.ChunkRef {
+func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.SeriesWithBlooms, nthSeries int) []*logproto.ChunkRef {
t.Helper()
n := 0
res := make([]*logproto.ChunkRef, 0)
@@ -449,3 +572,78 @@ func createBlockRefsFromBlockData(t *testing.T, tenant string, data []*bloomship
}
return res
}
+
+func TestOverlappingChunks(t *testing.T) {
+ mkRef := func(from, through model.Time) *logproto.ShortRef {
+ return &logproto.ShortRef{From: from, Through: through}
+ }
+
+ for _, tc := range []struct {
+ desc string
+ from, through model.Time
+ input []*logproto.ShortRef
+ exp []*logproto.ShortRef
+ expMin, expMax model.Time
+ }{
+ {
+ desc: "simple ordered",
+ from: 0, through: 10,
+ input: []*logproto.ShortRef{
+ mkRef(0, 2),
+ mkRef(3, 5),
+ mkRef(6, 8),
+ mkRef(10, 12),
+ mkRef(14, 16),
+ },
+ exp: []*logproto.ShortRef{
+ mkRef(0, 2),
+ mkRef(3, 5),
+ mkRef(6, 8),
+ mkRef(10, 12),
+ },
+ expMin: 0, expMax: 10,
+ },
+ {
+ desc: "refs through timestamps aren't in monotonic order",
+ from: 0, through: 10,
+ input: []*logproto.ShortRef{
+ mkRef(0, 2),
+ mkRef(3, 5),
+ mkRef(6, 8),
+ mkRef(10, 12),
+ mkRef(14, 16),
+ },
+ exp: []*logproto.ShortRef{
+ mkRef(0, 2),
+ mkRef(3, 5),
+ mkRef(6, 8),
+ mkRef(10, 12),
+ },
+ expMin: 0, expMax: 10,
+ },
+ {
+ desc: "expMin & expMax are within from/through",
+ from: 10, through: 20,
+ input: []*logproto.ShortRef{
+ mkRef(0, 2),
+ mkRef(3, 5),
+ mkRef(6, 8),
+ mkRef(14, 16),
+ mkRef(17, 19),
+ mkRef(21, 30),
+ },
+ exp: []*logproto.ShortRef{
+ mkRef(14, 16),
+ mkRef(17, 19),
+ },
+ expMin: 14, expMax: 19,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ minTs, maxTs, got := overlappingChunks(tc.from, tc.through, model.Latest, model.Earliest, tc.input)
+ require.Equal(t, tc.expMin, minTs)
+ require.Equal(t, tc.expMax, maxTs)
+ require.Equal(t, tc.exp, got)
+ })
+ }
+}
diff --git a/pkg/bloomgateway/worker.go b/pkg/bloomgateway/worker.go
index 6b234db27189c..0fa154c3b413f 100644
--- a/pkg/bloomgateway/worker.go
+++ b/pkg/bloomgateway/worker.go
@@ -8,11 +8,9 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
"github.com/pkg/errors"
- "github.com/prometheus/common/model"
"go.uber.org/atomic"
"github.com/grafana/loki/v3/pkg/queue"
- v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
)
@@ -92,7 +90,6 @@ func (w *worker) running(_ context.Context) error {
w.metrics.tasksDequeued.WithLabelValues(w.id, labelSuccess).Add(float64(len(items)))
tasks := make([]Task, 0, len(items))
- var mb v1.MultiFingerprintBounds
for _, item := range items {
task, ok := item.(Task)
if !ok {
@@ -104,13 +101,10 @@ func (w *worker) running(_ context.Context) error {
w.metrics.queueDuration.WithLabelValues(w.id).Observe(time.Since(task.enqueueTime).Seconds())
FromContext(task.ctx).AddQueueTime(time.Since(task.enqueueTime))
tasks = append(tasks, task)
-
- first, last := getFirstLast(task.series)
- mb = mb.Union(v1.NewBounds(model.Fingerprint(first.Fingerprint), model.Fingerprint(last.Fingerprint)))
}
start = time.Now()
- err = p.runWithBounds(taskCtx, tasks, mb)
+ err = p.processTasks(taskCtx, tasks)
if err != nil {
w.metrics.processDuration.WithLabelValues(w.id, labelFailure).Observe(time.Since(start).Seconds())
diff --git a/pkg/canary/comparator/comparator_test.go b/pkg/canary/comparator/comparator_test.go
index db28545397e3c..2b441b709a2a6 100644
--- a/pkg/canary/comparator/comparator_test.go
+++ b/pkg/canary/comparator/comparator_test.go
@@ -386,7 +386,7 @@ func TestCacheTest(t *testing.T) {
queryResultsDiff = &mockCounter{} // reset counter
mr.countOverTime = 2.3 // value not important
- mr.noCacheCountOvertime = 2.30000005 // different than `countOverTime` value but withing tolerance
+ mr.noCacheCountOvertime = 2.30000005 // different than `countOverTime` value but within tolerance
c.cacheTest(now)
assert.Equal(t, 0, queryResultsDiff.(*mockCounter).count)
diff --git a/pkg/chunkenc/dumb_chunk.go b/pkg/chunkenc/dumb_chunk.go
index e2d520df6e024..ef8548b1438da 100644
--- a/pkg/chunkenc/dumb_chunk.go
+++ b/pkg/chunkenc/dumb_chunk.go
@@ -36,17 +36,18 @@ func (c *dumbChunk) SpaceFor(_ *logproto.Entry) bool {
return len(c.entries) < tmpNumEntries
}
-func (c *dumbChunk) Append(entry *logproto.Entry) error {
+// The dumbChunk does not check for duplicates, and will always return false
+func (c *dumbChunk) Append(entry *logproto.Entry) (bool, error) {
if len(c.entries) == tmpNumEntries {
- return ErrChunkFull
+ return false, ErrChunkFull
}
if len(c.entries) > 0 && c.entries[len(c.entries)-1].Timestamp.After(entry.Timestamp) {
- return ErrOutOfOrder
+ return false, ErrOutOfOrder
}
c.entries = append(c.entries, *entry)
- return nil
+ return false, nil
}
func (c *dumbChunk) Size() int {
diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go
index 3825a6520af5f..f0b17c7750f3d 100644
--- a/pkg/chunkenc/interface.go
+++ b/pkg/chunkenc/interface.go
@@ -132,7 +132,8 @@ func SupportedEncoding() string {
type Chunk interface {
Bounds() (time.Time, time.Time)
SpaceFor(*logproto.Entry) bool
- Append(*logproto.Entry) error
+ // Append returns true if the entry appended was a duplicate
+ Append(*logproto.Entry) (bool, error)
Iterator(ctx context.Context, mintT, maxtT time.Time, direction logproto.Direction, pipeline log.StreamPipeline) (iter.EntryIterator, error)
SampleIterator(ctx context.Context, from, through time.Time, extractor log.StreamSampleExtractor) iter.SampleIterator
// Returns the list of blocks in the chunks.
diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go
index 107e3c71a97d5..f4e27255633dd 100644
--- a/pkg/chunkenc/memchunk.go
+++ b/pkg/chunkenc/memchunk.go
@@ -181,9 +181,10 @@ func (hb *headBlock) Reset() {
func (hb *headBlock) Bounds() (int64, int64) { return hb.mint, hb.maxt }
-func (hb *headBlock) Append(ts int64, line string, _ labels.Labels) error {
+// The headBlock does not check for duplicates, and will always return false
+func (hb *headBlock) Append(ts int64, line string, _ labels.Labels) (bool, error) {
if !hb.IsEmpty() && hb.maxt > ts {
- return ErrOutOfOrder
+ return false, ErrOutOfOrder
}
hb.entries = append(hb.entries, entry{t: ts, s: line})
@@ -193,7 +194,7 @@ func (hb *headBlock) Append(ts int64, line string, _ labels.Labels) error {
hb.maxt = ts
hb.size += len(line)
- return nil
+ return false, nil
}
func (hb *headBlock) Serialise(pool WriterPool) ([]byte, error) {
@@ -340,7 +341,7 @@ func (hb *headBlock) Convert(version HeadBlockFmt, symbolizer *symbolizer) (Head
out := version.NewBlock(symbolizer)
for _, e := range hb.entries {
- if err := out.Append(e.t, e.s, e.structuredMetadata); err != nil {
+ if _, err := out.Append(e.t, e.s, e.structuredMetadata); err != nil {
return nil, err
}
}
@@ -834,27 +835,29 @@ func (c *MemChunk) Utilization() float64 {
}
// Append implements Chunk.
-func (c *MemChunk) Append(entry *logproto.Entry) error {
+// The MemChunk may return true or false, depending on what the head block returns.
+func (c *MemChunk) Append(entry *logproto.Entry) (bool, error) {
entryTimestamp := entry.Timestamp.UnixNano()
// If the head block is empty but there are cut blocks, we have to make
// sure the new entry is not out of order compared to the previous block
if c.headFmt < UnorderedHeadBlockFmt && c.head.IsEmpty() && len(c.blocks) > 0 && c.blocks[len(c.blocks)-1].maxt > entryTimestamp {
- return ErrOutOfOrder
+ return false, ErrOutOfOrder
}
if c.format < ChunkFormatV4 {
entry.StructuredMetadata = nil
}
- if err := c.head.Append(entryTimestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)); err != nil {
- return err
+ dup, err := c.head.Append(entryTimestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata))
+ if err != nil {
+ return dup, err
}
if c.head.UncompressedSize() >= c.blockSize {
- return c.cut()
+ return false, c.cut()
}
- return nil
+ return dup, nil
}
// Close implements Chunk.
@@ -1122,7 +1125,7 @@ func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, err
if filter != nil && filter(entry.Timestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)...) {
continue
}
- if err := newChunk.Append(&entry); err != nil {
+ if _, err := newChunk.Append(&entry); err != nil {
return nil, err
}
}
diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go
index 09eab22f74be4..1d9ef3eea21f7 100644
--- a/pkg/chunkenc/memchunk_test.go
+++ b/pkg/chunkenc/memchunk_test.go
@@ -88,7 +88,8 @@ func TestBlocksInclusive(t *testing.T) {
for _, format := range allPossibleFormats {
chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt
chk := NewMemChunk(chunkfmt, enc, headfmt, testBlockSize, testTargetSize)
- err := chk.Append(logprotoEntry(1, "1"))
+ dup, err := chk.Append(logprotoEntry(1, "1"))
+ require.False(t, dup)
require.Nil(t, err)
err = chk.cut()
require.Nil(t, err)
@@ -178,7 +179,9 @@ func TestBlock(t *testing.T) {
}
for _, c := range cases {
- require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(c.ts, c.str, c.lbs)))
+ dup, err := chk.Append(logprotoEntryWithStructuredMetadata(c.ts, c.str, c.lbs))
+ require.False(t, dup)
+ require.NoError(t, err)
if c.cut {
require.NoError(t, chk.cut())
}
@@ -442,7 +445,9 @@ func TestSerialization(t *testing.T) {
if appendWithStructuredMetadata {
entry.StructuredMetadata = []logproto.LabelAdapter{{Name: "foo", Value: strconv.Itoa(i)}}
}
- require.NoError(t, chk.Append(entry))
+ dup, err := chk.Append(entry)
+ require.False(t, dup)
+ require.NoError(t, err)
}
require.NoError(t, chk.Close())
@@ -470,7 +475,7 @@ func TestSerialization(t *testing.T) {
}
require.NoError(t, it.Error())
- countExtractor = func() log.StreamSampleExtractor {
+ extractor := func() log.StreamSampleExtractor {
ex, err := log.NewLineSampleExtractor(log.CountExtractor, nil, nil, false, false)
if err != nil {
panic(err)
@@ -478,7 +483,7 @@ func TestSerialization(t *testing.T) {
return ex.ForStream(labels.Labels{})
}()
- sampleIt := bc.SampleIterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), countExtractor)
+ sampleIt := bc.SampleIterator(context.Background(), time.Unix(0, 0), time.Unix(0, math.MaxInt64), extractor)
for i := 0; i < numSamples; i++ {
require.True(t, sampleIt.Next(), i)
@@ -527,7 +532,9 @@ func TestChunkFilling(t *testing.T) {
i := int64(0)
for ; chk.SpaceFor(entry) && i < 30; i++ {
entry.Timestamp = time.Unix(0, i)
- require.NoError(t, chk.Append(entry))
+ dup, err := chk.Append(entry)
+ require.False(t, dup)
+ require.NoError(t, err)
}
require.Equal(t, int64(lines), i)
@@ -576,7 +583,9 @@ func TestGZIPChunkTargetSize(t *testing.T) {
Line: string(logLine),
}
entry.Timestamp = time.Unix(0, i)
- require.NoError(t, chk.Append(entry))
+ dup, err := chk.Append(entry)
+ require.False(t, dup)
+ require.NoError(t, err)
}
// 5000 is a limit ot make sure the test doesn't run away, we shouldn't need this many log lines to make 1MB chunk
@@ -606,37 +615,61 @@ func TestMemChunk_AppendOutOfOrder(t *testing.T) {
tests := map[string]tester{
"append out of order in the same block": func(t *testing.T, chk *MemChunk) {
- assert.NoError(t, chk.Append(logprotoEntry(5, "test")))
- assert.NoError(t, chk.Append(logprotoEntry(6, "test")))
+ dup, err := chk.Append(logprotoEntry(5, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
+ dup, err = chk.Append(logprotoEntry(6, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
if chk.headFmt == OrderedHeadBlockFmt {
- assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error())
+ dup, err = chk.Append(logprotoEntry(1, "test"))
+ assert.EqualError(t, err, ErrOutOfOrder.Error())
+ assert.False(t, dup)
} else {
- assert.NoError(t, chk.Append(logprotoEntry(1, "test")))
+ dup, err = chk.Append(logprotoEntry(1, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
}
},
"append out of order in a new block right after cutting the previous one": func(t *testing.T, chk *MemChunk) {
- assert.NoError(t, chk.Append(logprotoEntry(5, "test")))
- assert.NoError(t, chk.Append(logprotoEntry(6, "test")))
+ dup, err := chk.Append(logprotoEntry(5, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
+ dup, err = chk.Append(logprotoEntry(6, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
assert.NoError(t, chk.cut())
if chk.headFmt == OrderedHeadBlockFmt {
- assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error())
+ dup, err = chk.Append(logprotoEntry(1, "test"))
+ assert.False(t, dup)
+ assert.EqualError(t, err, ErrOutOfOrder.Error())
} else {
- assert.NoError(t, chk.Append(logprotoEntry(1, "test")))
+ dup, err = chk.Append(logprotoEntry(1, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
}
},
"append out of order in a new block after multiple cuts": func(t *testing.T, chk *MemChunk) {
- assert.NoError(t, chk.Append(logprotoEntry(5, "test")))
+ dup, err := chk.Append(logprotoEntry(5, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
assert.NoError(t, chk.cut())
- assert.NoError(t, chk.Append(logprotoEntry(6, "test")))
+ dup, err = chk.Append(logprotoEntry(6, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
assert.NoError(t, chk.cut())
if chk.headFmt == OrderedHeadBlockFmt {
- assert.EqualError(t, chk.Append(logprotoEntry(1, "test")), ErrOutOfOrder.Error())
+ dup, err = chk.Append(logprotoEntry(1, "test"))
+ assert.False(t, dup)
+ assert.EqualError(t, err, ErrOutOfOrder.Error())
} else {
- assert.NoError(t, chk.Append(logprotoEntry(1, "test")))
+ dup, err = chk.Append(logprotoEntry(1, "test"))
+ assert.False(t, dup)
+ assert.NoError(t, err)
}
},
}
@@ -705,7 +738,7 @@ func TestChunkStats(t *testing.T) {
if !c.SpaceFor(entry) {
break
}
- if err := c.Append(entry); err != nil {
+ if _, err := c.Append(entry); err != nil {
t.Fatal(err)
}
inserted++
@@ -826,7 +859,7 @@ func BenchmarkWrite(b *testing.B) {
c := NewMemChunk(ChunkFormatV3, enc, f, testBlockSize, testTargetSize)
// adds until full so we trigger cut which serialize using gzip
for c.SpaceFor(entry) {
- _ = c.Append(entry)
+ _, _ = c.Append(entry)
entry.Timestamp = time.Unix(0, i)
entry.Line = testdata.LogString(i)
if withStructuredMetadata {
@@ -977,7 +1010,7 @@ func BenchmarkHeadBlockIterator(b *testing.B) {
}
for i := 0; i < j; i++ {
- if err := h.Append(int64(i), "this is the append string", structuredMetadata); err != nil {
+ if _, err := h.Append(int64(i), "this is the append string", structuredMetadata); err != nil {
b.Fatal(err)
}
}
@@ -1009,7 +1042,7 @@ func BenchmarkHeadBlockSampleIterator(b *testing.B) {
}
for i := 0; i < j; i++ {
- if err := h.Append(int64(i), "this is the append string", structuredMetadata); err != nil {
+ if _, err := h.Append(int64(i), "this is the append string", structuredMetadata); err != nil {
b.Fatal(err)
}
}
@@ -1034,13 +1067,13 @@ func TestMemChunk_IteratorBounds(t *testing.T) {
t.Helper()
c := NewMemChunk(ChunkFormatV3, EncNone, DefaultTestHeadBlockFmt, 1e6, 1e6)
- if err := c.Append(&logproto.Entry{
+ if _, err := c.Append(&logproto.Entry{
Timestamp: time.Unix(0, 1),
Line: "1",
}); err != nil {
t.Fatal(err)
}
- if err := c.Append(&logproto.Entry{
+ if _, err := c.Append(&logproto.Entry{
Timestamp: time.Unix(0, 2),
Line: "2",
}); err != nil {
@@ -1099,7 +1132,9 @@ func TestMemchunkLongLine(t *testing.T) {
c := NewMemChunk(ChunkFormatV3, enc, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
for i := 1; i <= 10; i++ {
- require.NoError(t, c.Append(&logproto.Entry{Timestamp: time.Unix(0, int64(i)), Line: strings.Repeat("e", 200000)}))
+ dup, err := c.Append(&logproto.Entry{Timestamp: time.Unix(0, int64(i)), Line: strings.Repeat("e", 200000)})
+ require.False(t, dup)
+ require.NoError(t, err)
}
noopStreamPipeline := log.NewNoopPipeline().ForStream(labels.Labels{})
it, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Unix(0, 100), logproto.FORWARD, noopStreamPipeline)
@@ -1143,7 +1178,9 @@ func TestCheckpointEncoding(t *testing.T) {
}},
}
require.Equal(t, true, c.SpaceFor(entry))
- require.Nil(t, c.Append(entry))
+ dup, err := c.Append(entry)
+ require.False(t, dup)
+ require.Nil(t, err)
}
// cut it
@@ -1178,7 +1215,9 @@ func TestCheckpointEncoding(t *testing.T) {
Line: fmt.Sprintf("hi there - %d", i),
}
require.Equal(t, true, c.SpaceFor(entry))
- require.Nil(t, c.Append(entry))
+ dup, err := c.Append(entry)
+ require.False(t, dup)
+ require.Nil(t, err)
}
// ensure new blocks are not cut
@@ -1321,7 +1360,9 @@ func Test_HeadIteratorReverse(t *testing.T) {
}
var i int64
for e := genEntry(i); c.SpaceFor(e); e, i = genEntry(i+1), i+1 {
- require.NoError(t, c.Append(e))
+ dup, err := c.Append(e)
+ require.False(t, dup)
+ require.NoError(t, err)
}
assertOrder := func(t *testing.T, total int64) {
@@ -1427,7 +1468,7 @@ func TestMemChunk_Rebound(t *testing.T) {
func buildTestMemChunk(t *testing.T, from, through time.Time) *MemChunk {
chk := NewMemChunk(ChunkFormatV3, EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
for ; from.Before(through); from = from.Add(time.Second) {
- err := chk.Append(&logproto.Entry{
+ _, err := chk.Append(&logproto.Entry{
Line: from.String(),
Timestamp: from,
})
@@ -1558,7 +1599,7 @@ func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matching
if matchingFrom != nil && matchingTo != nil &&
(from.Equal(*matchingFrom) || (from.After(*matchingFrom) && (from.Before(*matchingTo)))) {
t.Logf("%v matching line", from.String())
- err := chk.Append(&logproto.Entry{
+ _, err := chk.Append(&logproto.Entry{
Line: fmt.Sprintf("matching %v", from.String()),
Timestamp: from,
StructuredMetadata: structuredMetadata,
@@ -1570,7 +1611,7 @@ func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matching
if withStructuredMetadata {
structuredMetadata = push.LabelsAdapter{{Name: "ding", Value: "dong"}}
}
- err := chk.Append(&logproto.Entry{
+ _, err := chk.Append(&logproto.Entry{
Line: from.String(),
Timestamp: from,
StructuredMetadata: structuredMetadata,
@@ -1700,7 +1741,9 @@ func TestMemChunk_SpaceFor(t *testing.T) {
chk.blocks = make([]block, tc.nBlocks)
chk.cutBlockSize = tc.cutBlockSize
for i := 0; i < tc.headSize; i++ {
- require.NoError(t, chk.head.Append(int64(i), "a", nil))
+ dup, err := chk.head.Append(int64(i), "a", nil)
+ require.False(t, dup)
+ require.NoError(t, err)
}
expect := tc.expect
@@ -1724,23 +1767,31 @@ func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) {
{Name: "job", Value: "fake"},
}
chk := newMemChunkWithFormat(ChunkFormatV4, enc, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
- require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(1, "lineA", []logproto.LabelAdapter{
+ dup, err := chk.Append(logprotoEntryWithStructuredMetadata(1, "lineA", []logproto.LabelAdapter{
{Name: "traceID", Value: "123"},
{Name: "user", Value: "a"},
- })))
- require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(2, "lineB", []logproto.LabelAdapter{
+ }))
+ require.False(t, dup)
+ require.NoError(t, err)
+ dup, err = chk.Append(logprotoEntryWithStructuredMetadata(2, "lineB", []logproto.LabelAdapter{
{Name: "traceID", Value: "456"},
{Name: "user", Value: "b"},
- })))
+ }))
+ require.False(t, dup)
+ require.NoError(t, err)
require.NoError(t, chk.cut())
- require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(3, "lineC", []logproto.LabelAdapter{
+ dup, err = chk.Append(logprotoEntryWithStructuredMetadata(3, "lineC", []logproto.LabelAdapter{
{Name: "traceID", Value: "789"},
{Name: "user", Value: "c"},
- })))
- require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(4, "lineD", []logproto.LabelAdapter{
+ }))
+ require.False(t, dup)
+ require.NoError(t, err)
+ dup, err = chk.Append(logprotoEntryWithStructuredMetadata(4, "lineD", []logproto.LabelAdapter{
{Name: "traceID", Value: "123"},
{Name: "user", Value: "d"},
- })))
+ }))
+ require.False(t, dup)
+ require.NoError(t, err)
// The expected bytes is the sum of bytes decompressed and bytes read from the head chunk.
// First we add the bytes read from the store (aka decompressed). That's
diff --git a/pkg/chunkenc/symbols.go b/pkg/chunkenc/symbols.go
index bed4035400c74..a3e2a5b011e2f 100644
--- a/pkg/chunkenc/symbols.go
+++ b/pkg/chunkenc/symbols.go
@@ -6,6 +6,7 @@ import (
"fmt"
"hash"
"io"
+ "strings"
"sync"
"github.com/pkg/errors"
@@ -78,6 +79,7 @@ func (s *symbolizer) add(lbl string) uint32 {
idx, ok = s.symbolsMap[lbl]
if !ok {
+ lbl = strings.Clone(lbl)
idx = uint32(len(s.labels))
s.symbolsMap[lbl] = idx
s.labels = append(s.labels, lbl)
diff --git a/pkg/chunkenc/unordered.go b/pkg/chunkenc/unordered.go
index 788f9c0a7c45b..807f80b2c0f87 100644
--- a/pkg/chunkenc/unordered.go
+++ b/pkg/chunkenc/unordered.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/binary"
"fmt"
+
"io"
"math"
"time"
@@ -34,7 +35,7 @@ type HeadBlock interface {
Entries() int
UncompressedSize() int
Convert(HeadBlockFmt, *symbolizer) (HeadBlock, error)
- Append(int64, string, labels.Labels) error
+ Append(int64, string, labels.Labels) (bool, error)
Iterator(
ctx context.Context,
direction logproto.Direction,
@@ -110,7 +111,8 @@ func (e *nsEntries) ValueAtDimension(_ uint64) int64 {
return e.ts
}
-func (hb *unorderedHeadBlock) Append(ts int64, line string, structuredMetadata labels.Labels) error {
+// unorderedHeadBlock will return true if the entry is a duplicate, false otherwise
+func (hb *unorderedHeadBlock) Append(ts int64, line string, structuredMetadata labels.Labels) (bool, error) {
if hb.format < UnorderedWithStructuredMetadataHeadBlockFmt {
// structuredMetadata must be ignored for the previous head block formats
structuredMetadata = nil
@@ -135,7 +137,7 @@ func (hb *unorderedHeadBlock) Append(ts int64, line string, structuredMetadata l
for _, et := range displaced[0].(*nsEntries).entries {
if et.line == line {
e.entries = displaced[0].(*nsEntries).entries
- return nil
+ return true, nil
}
}
e.entries = append(displaced[0].(*nsEntries).entries, nsEntry{line, hb.symbolizer.Add(structuredMetadata)})
@@ -156,7 +158,7 @@ func (hb *unorderedHeadBlock) Append(ts int64, line string, structuredMetadata l
hb.size += len(structuredMetadata) * 2 * 4 // 4 bytes per label and value pair as structuredMetadataSymbols
hb.lines++
- return nil
+ return false, nil
}
func metaLabelsLen(metaLabels labels.Labels) int {
@@ -443,7 +445,8 @@ func (hb *unorderedHeadBlock) Convert(version HeadBlockFmt, symbolizer *symboliz
0,
math.MaxInt64,
func(_ *stats.Context, ts int64, line string, structuredMetadataSymbols symbols) error {
- return out.Append(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols))
+ _, err := out.Append(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols))
+ return err
},
)
return out, err
@@ -583,7 +586,7 @@ func (hb *unorderedHeadBlock) LoadBytes(b []byte) error {
}
}
- if err := hb.Append(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols)); err != nil {
+ if _, err := hb.Append(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols)); err != nil {
return err
}
}
diff --git a/pkg/chunkenc/unordered_test.go b/pkg/chunkenc/unordered_test.go
index f4930952660fc..43c07d0f835f3 100644
--- a/pkg/chunkenc/unordered_test.go
+++ b/pkg/chunkenc/unordered_test.go
@@ -35,7 +35,9 @@ func iterEq(t *testing.T, exp []entry, got iter.EntryIterator) {
func Test_forEntriesEarlyReturn(t *testing.T) {
hb := newUnorderedHeadBlock(UnorderedHeadBlockFmt, newSymbolizer())
for i := 0; i < 10; i++ {
- require.Nil(t, hb.Append(int64(i), fmt.Sprint(i), labels.Labels{{Name: "i", Value: fmt.Sprint(i)}}))
+ dup, err := hb.Append(int64(i), fmt.Sprint(i), labels.Labels{{Name: "i", Value: fmt.Sprint(i)}})
+ require.False(t, dup)
+ require.Nil(t, err)
}
// forward
@@ -86,6 +88,7 @@ func Test_Unordered_InsertRetrieval(t *testing.T) {
desc string
input, exp []entry
dir logproto.Direction
+ hasDup bool
}{
{
desc: "simple forward",
@@ -152,7 +155,8 @@ func Test_Unordered_InsertRetrieval(t *testing.T) {
exp: []entry{
{0, "a", nil}, {0, "b", nil}, {1, "c", nil},
},
- dir: logproto.FORWARD,
+ dir: logproto.FORWARD,
+ hasDup: true,
},
{
desc: "ts remove exact dupe backward",
@@ -162,7 +166,8 @@ func Test_Unordered_InsertRetrieval(t *testing.T) {
exp: []entry{
{1, "c", nil}, {0, "b", nil}, {0, "a", nil},
},
- dir: logproto.BACKWARD,
+ dir: logproto.BACKWARD,
+ hasDup: true,
},
} {
t.Run(tc.desc, func(t *testing.T) {
@@ -172,9 +177,17 @@ func Test_Unordered_InsertRetrieval(t *testing.T) {
} {
t.Run(format.String(), func(t *testing.T) {
hb := newUnorderedHeadBlock(format, newSymbolizer())
+ dup := false
for _, e := range tc.input {
- require.Nil(t, hb.Append(e.t, e.s, e.structuredMetadata))
+ tmpdup, err := hb.Append(e.t, e.s, e.structuredMetadata)
+ if !dup { // only set dup if it's not already true
+ if tmpdup { // can't examine duplicates until we start getting all the data
+ dup = true
+ }
+ }
+ require.Nil(t, err)
}
+ require.Equal(t, tc.hasDup, dup)
itr := hb.Iterator(
context.Background(),
@@ -250,7 +263,9 @@ func Test_UnorderedBoundedIter(t *testing.T) {
t.Run(format.String(), func(t *testing.T) {
hb := newUnorderedHeadBlock(format, newSymbolizer())
for _, e := range tc.input {
- require.Nil(t, hb.Append(e.t, e.s, e.structuredMetadata))
+ dup, err := hb.Append(e.t, e.s, e.structuredMetadata)
+ require.False(t, dup)
+ require.Nil(t, err)
}
itr := hb.Iterator(
@@ -281,9 +296,15 @@ func TestHeadBlockInterop(t *testing.T) {
unorderedWithStructuredMetadata := newUnorderedHeadBlock(UnorderedWithStructuredMetadataHeadBlockFmt, newSymbolizer())
for i := 0; i < 100; i++ {
metaLabels := labels.Labels{{Name: "foo", Value: fmt.Sprint(99 - i)}}
- require.Nil(t, unordered.Append(int64(99-i), fmt.Sprint(99-i), metaLabels))
- require.Nil(t, unorderedWithStructuredMetadata.Append(int64(99-i), fmt.Sprint(99-i), metaLabels))
- require.Nil(t, ordered.Append(int64(i), fmt.Sprint(i), labels.Labels{{Name: "foo", Value: fmt.Sprint(i)}}))
+ dup, err := unordered.Append(int64(99-i), fmt.Sprint(99-i), metaLabels)
+ require.False(t, dup)
+ require.Nil(t, err)
+ dup, err = unorderedWithStructuredMetadata.Append(int64(99-i), fmt.Sprint(99-i), metaLabels)
+ require.False(t, dup)
+ require.Nil(t, err)
+ dup, err = ordered.Append(int64(i), fmt.Sprint(i), labels.Labels{{Name: "foo", Value: fmt.Sprint(i)}})
+ require.False(t, dup)
+ require.Nil(t, err)
}
// turn to bytes
@@ -359,14 +380,14 @@ func BenchmarkHeadBlockWrites(b *testing.B) {
headBlockFn := func() func(int64, string, labels.Labels) {
hb := &headBlock{}
return func(ts int64, line string, metaLabels labels.Labels) {
- _ = hb.Append(ts, line, metaLabels)
+ _, _ = hb.Append(ts, line, metaLabels)
}
}
unorderedHeadBlockFn := func() func(int64, string, labels.Labels) {
hb := newUnorderedHeadBlock(UnorderedHeadBlockFmt, nil)
return func(ts int64, line string, metaLabels labels.Labels) {
- _ = hb.Append(ts, line, metaLabels)
+ _, _ = hb.Append(ts, line, metaLabels)
}
}
@@ -432,10 +453,12 @@ func TestUnorderedChunkIterators(t *testing.T) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for i := 0; i < 100; i++ {
// push in reverse order
- require.Nil(t, c.Append(&logproto.Entry{
+ dup, err := c.Append(&logproto.Entry{
Timestamp: time.Unix(int64(99-i), 0),
Line: fmt.Sprint(99 - i),
- }))
+ })
+ require.False(t, dup)
+ require.Nil(t, err)
// ensure we have a mix of cut blocks + head block.
if i%30 == 0 {
@@ -574,7 +597,7 @@ func TestUnorderedIteratorCountsAllEntries(t *testing.T) {
func chunkFrom(xs []logproto.Entry) ([]byte, error) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, x := range xs {
- if err := c.Append(&x); err != nil {
+ if _, err := c.Append(&x); err != nil {
return nil, err
}
}
@@ -634,7 +657,9 @@ func TestReorder(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, x := range tc.input {
- require.Nil(t, c.Append(&x))
+ dup, err := c.Append(&x)
+ require.False(t, dup)
+ require.Nil(t, err)
}
require.Nil(t, c.Close())
b, err := c.Bytes()
@@ -657,10 +682,12 @@ func TestReorderAcrossBlocks(t *testing.T) {
{3, 7},
} {
for _, x := range batch {
- require.Nil(t, c.Append(&logproto.Entry{
+ dup, err := c.Append(&logproto.Entry{
Timestamp: time.Unix(int64(x), 0),
Line: fmt.Sprint(x),
- }))
+ })
+ require.False(t, dup)
+ require.Nil(t, err)
}
require.Nil(t, c.cut())
}
@@ -705,7 +732,9 @@ func Test_HeadIteratorHash(t *testing.T) {
"ordered": &headBlock{},
} {
t.Run(name, func(t *testing.T) {
- require.NoError(t, b.Append(1, "foo", labels.Labels{{Name: "foo", Value: "bar"}}))
+ dup, err := b.Append(1, "foo", labels.Labels{{Name: "foo", Value: "bar"}})
+ require.False(t, dup)
+ require.NoError(t, err)
eit := b.Iterator(context.Background(), logproto.BACKWARD, 0, 2, log.NewNoopPipeline().ForStream(lbs))
for eit.Next() {
diff --git a/pkg/chunkenc/util_test.go b/pkg/chunkenc/util_test.go
index a1860f9ae297a..de74f7946e2ad 100644
--- a/pkg/chunkenc/util_test.go
+++ b/pkg/chunkenc/util_test.go
@@ -33,7 +33,7 @@ func generateData(enc Encoding, chunksCount, blockSize, targetSize int) ([]Chunk
c := NewMemChunk(ChunkFormatV4, enc, UnorderedWithStructuredMetadataHeadBlockFmt, blockSize, targetSize)
for c.SpaceFor(entry) {
size += uint64(len(entry.Line))
- _ = c.Append(entry)
+ _, _ = c.Append(entry)
i++
entry = logprotoEntry(i, testdata.LogString(i))
}
@@ -55,7 +55,7 @@ func fillChunkClose(c Chunk, close bool) int64 {
Line: testdata.LogString(i),
}
for c.SpaceFor(entry) {
- err := c.Append(entry)
+ _, err := c.Append(entry)
if err != nil {
panic(err)
}
@@ -81,7 +81,7 @@ func fillChunkRandomOrder(c Chunk, close bool) {
}
for c.SpaceFor(entry) {
- err := c.Append(entry)
+ _, err := c.Append(entry)
if err != nil {
panic(err)
}
diff --git a/pkg/compactor/retention/retention_test.go b/pkg/compactor/retention/retention_test.go
index 6c261d34799e5..a3f157dc77743 100644
--- a/pkg/compactor/retention/retention_test.go
+++ b/pkg/compactor/retention/retention_test.go
@@ -223,11 +223,13 @@ func createChunk(t testing.TB, userID string, lbs labels.Labels, from model.Time
chunkEnc := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, blockSize, targetSize)
for ts := from; !ts.After(through); ts = ts.Add(1 * time.Minute) {
- require.NoError(t, chunkEnc.Append(&logproto.Entry{
+ dup, err := chunkEnc.Append(&logproto.Entry{
Timestamp: ts.Time(),
Line: ts.String(),
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", ts.String())),
- }))
+ })
+ require.False(t, dup)
+ require.NoError(t, err)
}
require.NoError(t, chunkEnc.Close())
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 87036b5e23c37..ebe531e2ab4b2 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -1,6 +1,7 @@
package distributor
import (
+ "bytes"
"context"
"flag"
"fmt"
@@ -11,11 +12,11 @@ import (
"strings"
"time"
"unicode"
+ "unsafe"
"github.com/buger/jsonparser"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/go-logfmt/logfmt"
"github.com/gogo/status"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/collector/pdata/plog"
@@ -45,6 +46,7 @@ import (
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/logql/log/logfmt"
"github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/runtime"
"github.com/grafana/loki/v3/pkg/util"
@@ -917,56 +919,57 @@ func detectLogLevelFromLogEntry(entry logproto.Entry, structuredMetadata labels.
}
func extractLogLevelFromLogLine(log string) string {
- var v string
+ logSlice := unsafe.Slice(unsafe.StringData(log), len(log))
+ var v []byte
if isJSON(log) {
- v = getValueUsingJSONParser(log)
+ v = getValueUsingJSONParser(logSlice)
} else {
- v = getValueUsingLogfmtParser(log)
+ v = getValueUsingLogfmtParser(logSlice)
}
- switch strings.ToLower(v) {
- case "trace", "trc":
+ switch {
+ case bytes.EqualFold(v, []byte("trace")), bytes.EqualFold(v, []byte("trc")):
return logLevelTrace
- case "debug", "dbg":
+ case bytes.EqualFold(v, []byte("debug")), bytes.EqualFold(v, []byte("dbg")):
return logLevelDebug
- case "info", "inf":
+ case bytes.EqualFold(v, []byte("info")), bytes.EqualFold(v, []byte("inf")):
return logLevelInfo
- case "warn", "wrn":
+ case bytes.EqualFold(v, []byte("warn")), bytes.EqualFold(v, []byte("wrn")):
return logLevelWarn
- case "error", "err":
+ case bytes.EqualFold(v, []byte("error")), bytes.EqualFold(v, []byte("err")):
return logLevelError
- case "critical":
+ case bytes.EqualFold(v, []byte("critical")):
return logLevelCritical
- case "fatal":
+ case bytes.EqualFold(v, []byte("fatal")):
return logLevelFatal
default:
return detectLevelFromLogLine(log)
}
}
-func getValueUsingLogfmtParser(line string) string {
- equalIndex := strings.Index(line, "=")
+func getValueUsingLogfmtParser(line []byte) []byte {
+ equalIndex := bytes.Index(line, []byte("="))
if len(line) == 0 || equalIndex == -1 {
- return logLevelUnknown
+ return nil
}
- d := logfmt.NewDecoder(strings.NewReader(line))
- d.ScanRecord()
- for d.ScanKeyval() {
+
+ d := logfmt.NewDecoder(line)
+ for !d.EOL() && d.ScanKeyval() {
if _, ok := allowedLabelsForLevel[string(d.Key())]; ok {
- return string(d.Value())
+ return (d.Value())
}
}
- return logLevelUnknown
+ return nil
}
-func getValueUsingJSONParser(log string) string {
+func getValueUsingJSONParser(log []byte) []byte {
for allowedLabel := range allowedLabelsForLevel {
- l, err := jsonparser.GetString([]byte(log), allowedLabel)
+ l, _, _, err := jsonparser.Get(log, allowedLabel)
if err == nil {
return l
}
}
- return logLevelUnknown
+ return nil
}
func isJSON(line string) bool {
@@ -991,6 +994,10 @@ func isJSON(line string) bool {
}
func detectLevelFromLogLine(log string) string {
+ if strings.Contains(log, "info:") || strings.Contains(log, "INFO:") ||
+ strings.Contains(log, "info") || strings.Contains(log, "INFO") {
+ return logLevelInfo
+ }
if strings.Contains(log, "err:") || strings.Contains(log, "ERR:") ||
strings.Contains(log, "error") || strings.Contains(log, "ERROR") {
return logLevelError
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index dc58f758835c0..19019e62dd4a3 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -1711,6 +1711,20 @@ func Test_detectLogLevelFromLogEntry(t *testing.T) {
},
expectedLogLevel: logLevelError,
},
+ {
+ name: "json log line with an INFO in block case",
+ entry: logproto.Entry{
+ Line: `{"foo":"bar","msg":"message with keyword INFO get picked up"}`,
+ },
+ expectedLogLevel: logLevelInfo,
+ },
+ {
+ name: "logfmt log line with an INFO and not level returns info log level",
+ entry: logproto.Entry{
+ Line: `foo=bar msg="message with info and not level should get picked up"`,
+ },
+ expectedLogLevel: logLevelInfo,
+ },
{
name: "logfmt log line with a warn",
entry: logproto.Entry{
diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go
index 00c3ba53a2806..ec0660b91bc01 100644
--- a/pkg/distributor/http.go
+++ b/pkg/distributor/http.go
@@ -23,7 +23,26 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) {
}
func (d *Distributor) OTLPPushHandler(w http.ResponseWriter, r *http.Request) {
- d.pushHandler(w, r, push.ParseOTLPRequest)
+ interceptor := newOtelErrorHeaderInterceptor(w)
+ d.pushHandler(interceptor, r, push.ParseOTLPRequest)
+}
+
+// otelErrorHeaderInterceptor maps 500 errors to 503.
+// According to the OTLP specification, 500 errors are never retried on the client side, but 503 are.
+type otelErrorHeaderInterceptor struct {
+ http.ResponseWriter
+}
+
+func newOtelErrorHeaderInterceptor(w http.ResponseWriter) *otelErrorHeaderInterceptor {
+ return &otelErrorHeaderInterceptor{ResponseWriter: w}
+}
+
+func (i *otelErrorHeaderInterceptor) WriteHeader(statusCode int) {
+ if statusCode == http.StatusInternalServerError {
+ statusCode = http.StatusServiceUnavailable
+ }
+
+ i.ResponseWriter.WriteHeader(statusCode)
}
func (d *Distributor) pushHandler(w http.ResponseWriter, r *http.Request, pushRequestParser push.RequestParser) {
diff --git a/pkg/distributor/http_test.go b/pkg/distributor/http_test.go
index 0ecf70fa9a498..b6281b81bf3d7 100644
--- a/pkg/distributor/http_test.go
+++ b/pkg/distributor/http_test.go
@@ -82,6 +82,38 @@ func TestRequestParserWrapping(t *testing.T) {
require.True(t, called)
}
+func Test_OtelErrorHeaderInterceptor(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ inputCode int
+ expectedCode int
+ }{
+ {
+ name: "500",
+ inputCode: http.StatusInternalServerError,
+ expectedCode: http.StatusServiceUnavailable,
+ },
+ {
+ name: "400",
+ inputCode: http.StatusBadRequest,
+ expectedCode: http.StatusBadRequest,
+ },
+ {
+ name: "204",
+ inputCode: http.StatusNoContent,
+ expectedCode: http.StatusNoContent,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ r := httptest.NewRecorder()
+ i := newOtelErrorHeaderInterceptor(r)
+
+ http.Error(i, "error", tc.inputCode)
+ require.Equal(t, tc.expectedCode, r.Code)
+ })
+ }
+}
+
func stubParser(_ string, _ *http.Request, _ push.TenantsRetention, _ push.Limits, _ push.UsageTracker) (*logproto.PushRequest, *push.Stats, error) {
return &logproto.PushRequest{}, &push.Stats{}, nil
}
diff --git a/pkg/distributor/writefailures/manager.go b/pkg/distributor/writefailures/manager.go
index f02ab2e57d76f..5a02a7f2c2226 100644
--- a/pkg/distributor/writefailures/manager.go
+++ b/pkg/distributor/writefailures/manager.go
@@ -39,7 +39,8 @@ func (m *Manager) Log(tenantID string, err error) {
return
}
- if !m.tenantCfgs.LimitedLogPushErrors(tenantID) {
+ if !(m.tenantCfgs.LimitedLogPushErrors(tenantID) ||
+ m.tenantCfgs.LogDuplicateStreamInfo(tenantID)) {
return
}
diff --git a/pkg/indexgateway/gateway.go b/pkg/indexgateway/gateway.go
index e2850e8c9317f..7b49490a012ef 100644
--- a/pkg/indexgateway/gateway.go
+++ b/pkg/indexgateway/gateway.go
@@ -465,12 +465,15 @@ func (g *Gateway) boundedShards(
// 2) filter via blooms if enabled
filters := syntax.ExtractLineFilters(p.Plan().AST)
if g.bloomQuerier != nil && len(filters) > 0 {
- filtered, err = g.bloomQuerier.FilterChunkRefs(ctx, instanceID, req.From, req.Through, refs, p.Plan())
+ xs, err := g.bloomQuerier.FilterChunkRefs(ctx, instanceID, req.From, req.Through, refs, p.Plan())
if err != nil {
- return err
+ level.Error(logger).Log("msg", "failed to filter chunk refs", "err", err)
+ } else {
+ filtered = xs
}
sp.LogKV(
"stage", "queried bloom gateway",
+ "err", err,
)
}
diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go
index d530d937d42fe..1b0c76466dc15 100644
--- a/pkg/ingester/checkpoint_test.go
+++ b/pkg/ingester/checkpoint_test.go
@@ -70,7 +70,9 @@ func TestIngesterWAL(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -113,7 +115,7 @@ func TestIngesterWAL(t *testing.T) {
expectCheckpoint(t, walDir, false, time.Second)
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -127,7 +129,7 @@ func TestIngesterWAL(t *testing.T) {
require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -150,7 +152,9 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -196,7 +200,7 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) {
require.NoError(t, err)
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -253,7 +257,9 @@ func TestIngesterWALBackpressureSegments(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -274,7 +280,7 @@ func TestIngesterWALBackpressureSegments(t *testing.T) {
expectCheckpoint(t, walDir, false, time.Second)
// restart the ingester, ensuring we replayed from WAL.
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -295,7 +301,9 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -316,7 +324,7 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) {
require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
// restart the ingester, ensuring we can replay from the checkpoint as well.
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -591,7 +599,9 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -663,7 +673,7 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) {
require.NoError(t, err)
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
diff --git a/pkg/ingester/chunk_test.go b/pkg/ingester/chunk_test.go
index 4523bc8cc1d8b..9ceb3c740926e 100644
--- a/pkg/ingester/chunk_test.go
+++ b/pkg/ingester/chunk_test.go
@@ -55,10 +55,11 @@ func TestIterator(t *testing.T) {
t.Run(chk.name, func(t *testing.T) {
chunk := chk.new()
for i := int64(0); i < entries; i++ {
- err := chunk.Append(&logproto.Entry{
+ dup, err := chunk.Append(&logproto.Entry{
Timestamp: time.Unix(i, 0),
Line: fmt.Sprintf("line %d", i),
})
+ require.False(t, dup)
require.NoError(t, err)
}
diff --git a/pkg/ingester/encoding_test.go b/pkg/ingester/encoding_test.go
index 4bb1aab0b8da6..458da1132c963 100644
--- a/pkg/ingester/encoding_test.go
+++ b/pkg/ingester/encoding_test.go
@@ -22,7 +22,9 @@ func fillChunk(t testing.TB, c chunkenc.Chunk) {
}
for c.SpaceFor(entry) {
- require.NoError(t, c.Append(entry))
+ dup, err := c.Append(entry)
+ require.False(t, dup)
+ require.NoError(t, err)
i++
entry.Timestamp = time.Unix(0, i)
entry.Line = fmt.Sprintf("entry for line %d", i)
@@ -120,10 +122,12 @@ func Test_EncodingChunks(t *testing.T) {
func Test_EncodingCheckpoint(t *testing.T) {
conf := dummyConf()
c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
- require.Nil(t, c.Append(&logproto.Entry{
+ dup, err := c.Append(&logproto.Entry{
Timestamp: time.Unix(1, 0),
Line: "hi there",
- }))
+ })
+ require.False(t, dup)
+ require.Nil(t, err)
data, err := c.Bytes()
require.Nil(t, err)
from, to := c.Bounds()
diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go
index 00aad05475495..e6e22f72f097e 100644
--- a/pkg/ingester/flush.go
+++ b/pkg/ingester/flush.go
@@ -7,15 +7,17 @@ import (
"sync"
"time"
+ "github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/ring"
+ "github.com/grafana/dskit/tenant"
"github.com/grafana/dskit/user"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"golang.org/x/net/context"
-
- "github.com/grafana/dskit/tenant"
+ "golang.org/x/time/rate"
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/storage/chunk"
@@ -28,14 +30,18 @@ const (
// position, not wallclock time.
flushBackoff = 1 * time.Second
+ // Lower bound on flushes per check period for rate-limiter
+ minFlushes = 100
+
nameLabel = "__name__"
logsValue = "logs"
- flushReasonIdle = "idle"
- flushReasonMaxAge = "max_age"
- flushReasonForced = "forced"
- flushReasonFull = "full"
- flushReasonSynced = "synced"
+ flushReasonIdle = "idle"
+ flushReasonMaxAge = "max_age"
+ flushReasonForced = "forced"
+ flushReasonNotOwned = "not_owned"
+ flushReasonFull = "full"
+ flushReasonSynced = "synced"
)
// Note: this is called both during the WAL replay (zero or more times)
@@ -96,13 +102,14 @@ func (o *flushOp) Priority() int64 {
return -int64(o.from)
}
-// sweepUsers periodically schedules series for flushing and garbage collects users with no series
+// sweepUsers periodically schedules series for flushing and garbage collects users with no streams
func (i *Ingester) sweepUsers(immediate, mayRemoveStreams bool) {
instances := i.getInstances()
for _, instance := range instances {
i.sweepInstance(instance, immediate, mayRemoveStreams)
}
+ i.setFlushRate()
}
func (i *Ingester) sweepInstance(instance *instance, immediate, mayRemoveStreams bool) {
@@ -122,7 +129,7 @@ func (i *Ingester) sweepStream(instance *instance, stream *stream, immediate boo
lastChunk := stream.chunks[len(stream.chunks)-1]
shouldFlush, _ := i.shouldFlushChunk(&lastChunk)
- if len(stream.chunks) == 1 && !immediate && !shouldFlush {
+ if len(stream.chunks) == 1 && !immediate && !shouldFlush && !instance.ownedStreamsSvc.isStreamNotOwned(stream.fp) {
return
}
@@ -134,9 +141,28 @@ func (i *Ingester) sweepStream(instance *instance, stream *stream, immediate boo
})
}
+// Compute a rate such to spread calls to the store over nearly all of the flush period,
+// for example if we have 600 items in the queue and period 1 min we will send 10.5 per second.
+// Note if the store can't keep up with this rate then it doesn't make any difference.
+func (i *Ingester) setFlushRate() {
+ totalQueueLength := 0
+ for _, q := range i.flushQueues {
+ totalQueueLength += q.Length()
+ }
+ const jitter = 1.05 // aim to finish a little bit before the end of the period
+ flushesPerSecond := float64(totalQueueLength) / i.cfg.FlushCheckPeriod.Seconds() * jitter
+ // Avoid going very slowly with tiny queues
+ if flushesPerSecond*i.cfg.FlushCheckPeriod.Seconds() < minFlushes {
+ flushesPerSecond = minFlushes / i.cfg.FlushCheckPeriod.Seconds()
+ }
+ level.Debug(util_log.Logger).Log("msg", "computed flush rate", "rate", flushesPerSecond)
+ i.flushRateLimiter.SetLimit(rate.Limit(flushesPerSecond))
+}
+
func (i *Ingester) flushLoop(j int) {
+ l := log.With(i.logger, "loop", j)
defer func() {
- level.Debug(i.logger).Log("msg", "Ingester.flushLoop() exited")
+ level.Debug(l).Log("msg", "Ingester.flushLoop() exited")
i.flushQueuesDone.Done()
}()
@@ -147,9 +173,15 @@ func (i *Ingester) flushLoop(j int) {
}
op := o.(*flushOp)
- err := i.flushUserSeries(op.userID, op.fp, op.immediate)
+ if !op.immediate {
+ _ = i.flushRateLimiter.Wait(context.Background())
+ }
+
+ m := util_log.WithUserID(op.userID, l)
+ err := i.flushOp(m, op)
+
if err != nil {
- level.Error(util_log.WithUserID(op.userID, i.logger)).Log("msg", "failed to flush", "err", err)
+ level.Error(m).Log("msg", "failed to flush", "err", err)
}
// If we're exiting & we failed to flush, put the failed operation
@@ -161,7 +193,23 @@ func (i *Ingester) flushLoop(j int) {
}
}
-func (i *Ingester) flushUserSeries(userID string, fp model.Fingerprint, immediate bool) error {
+func (i *Ingester) flushOp(l log.Logger, op *flushOp) error {
+ ctx, cancelFunc := context.WithCancel(context.Background())
+ defer cancelFunc()
+
+ b := backoff.New(ctx, i.cfg.FlushOpBackoff)
+ for b.Ongoing() {
+ err := i.flushUserSeries(ctx, op.userID, op.fp, op.immediate)
+ if err == nil {
+ break
+ }
+ level.Error(l).Log("msg", "failed to flush", "retries", b.NumRetries(), "err", err)
+ b.Wait()
+ }
+ return b.Err()
+}
+
+func (i *Ingester) flushUserSeries(ctx context.Context, userID string, fp model.Fingerprint, immediate bool) error {
instance, ok := i.getInstanceByID(userID)
if !ok {
return nil
@@ -175,9 +223,9 @@ func (i *Ingester) flushUserSeries(userID string, fp model.Fingerprint, immediat
lbs := labels.String()
level.Info(i.logger).Log("msg", "flushing stream", "user", userID, "fp", fp, "immediate", immediate, "num_chunks", len(chunks), "labels", lbs)
- ctx := user.InjectOrgID(context.Background(), userID)
- ctx, cancel := context.WithTimeout(ctx, i.cfg.FlushOpTimeout)
- defer cancel()
+ ctx = user.InjectOrgID(ctx, userID)
+ ctx, cancelFunc := context.WithTimeout(ctx, i.cfg.FlushOpTimeout)
+ defer cancelFunc()
err := i.flushChunks(ctx, fp, labels, chunks, chunkMtx)
if err != nil {
return fmt.Errorf("failed to flush chunks: %w, num_chunks: %d, labels: %s", err, len(chunks), lbs)
@@ -197,10 +245,14 @@ func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint
stream.chunkMtx.Lock()
defer stream.chunkMtx.Unlock()
+ notOwnedStream := instance.ownedStreamsSvc.isStreamNotOwned(fp)
var result []*chunkDesc
for j := range stream.chunks {
shouldFlush, reason := i.shouldFlushChunk(&stream.chunks[j])
+ if !shouldFlush && notOwnedStream {
+ shouldFlush, reason = true, flushReasonNotOwned
+ }
if immediate || shouldFlush {
// Ensure no more writes happen to this chunk.
if !stream.chunks[j].closed {
diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
index 6fd52bafa066f..1287be3d4bfdb 100644
--- a/pkg/ingester/flush_test.go
+++ b/pkg/ingester/flush_test.go
@@ -1,6 +1,7 @@
package ingester
import (
+ "errors"
"fmt"
"os"
"sort"
@@ -102,6 +103,67 @@ func Benchmark_FlushLoop(b *testing.B) {
}
}
+func Test_FlushOp(t *testing.T) {
+ t.Run("no error", func(t *testing.T) {
+ cfg := defaultIngesterTestConfig(t)
+ cfg.FlushOpBackoff.MinBackoff = time.Second
+ cfg.FlushOpBackoff.MaxBackoff = 10 * time.Second
+ cfg.FlushOpBackoff.MaxRetries = 1
+ cfg.FlushCheckPeriod = 100 * time.Millisecond
+
+ _, ing := newTestStore(t, cfg, nil)
+
+ ctx := user.InjectOrgID(context.Background(), "foo")
+ ins, err := ing.GetOrCreateInstance("foo")
+ require.NoError(t, err)
+
+ lbs := makeRandomLabels()
+ req := &logproto.PushRequest{Streams: []logproto.Stream{{
+ Labels: lbs.String(),
+ Entries: entries(5, time.Now()),
+ }}}
+ require.NoError(t, ins.Push(ctx, req))
+
+ time.Sleep(cfg.FlushCheckPeriod)
+ require.NoError(t, ing.flushOp(gokitlog.NewNopLogger(), &flushOp{
+ immediate: true,
+ userID: "foo",
+ fp: ins.getHashForLabels(lbs),
+ }))
+ })
+
+ t.Run("max retries exceeded", func(t *testing.T) {
+ cfg := defaultIngesterTestConfig(t)
+ cfg.FlushOpBackoff.MinBackoff = time.Second
+ cfg.FlushOpBackoff.MaxBackoff = 10 * time.Second
+ cfg.FlushOpBackoff.MaxRetries = 1
+ cfg.FlushCheckPeriod = 100 * time.Millisecond
+
+ store, ing := newTestStore(t, cfg, nil)
+ store.onPut = func(_ context.Context, _ []chunk.Chunk) error {
+ return errors.New("failed to write chunks")
+ }
+
+ ctx := user.InjectOrgID(context.Background(), "foo")
+ ins, err := ing.GetOrCreateInstance("foo")
+ require.NoError(t, err)
+
+ lbs := makeRandomLabels()
+ req := &logproto.PushRequest{Streams: []logproto.Stream{{
+ Labels: lbs.String(),
+ Entries: entries(5, time.Now()),
+ }}}
+ require.NoError(t, ins.Push(ctx, req))
+
+ time.Sleep(cfg.FlushCheckPeriod)
+ require.EqualError(t, ing.flushOp(gokitlog.NewNopLogger(), &flushOp{
+ immediate: true,
+ userID: "foo",
+ fp: ins.getHashForLabels(lbs),
+ }), "terminated after 1 retries")
+ })
+}
+
func Test_Flush(t *testing.T) {
var (
store, ing = newTestStore(t, defaultIngesterTestConfig(t), nil)
@@ -191,6 +253,56 @@ func TestFlushingCollidingLabels(t *testing.T) {
}
}
+func Test_flush_not_owned_stream(t *testing.T) {
+ cfg := defaultIngesterTestConfig(t)
+ cfg.FlushCheckPeriod = time.Millisecond * 100
+ cfg.MaxChunkAge = time.Minute
+ cfg.MaxChunkIdle = time.Hour
+
+ store, ing := newTestStore(t, cfg, nil)
+ defer store.Stop()
+
+ now := time.Unix(0, 0)
+
+ entries := []logproto.Entry{
+ {Timestamp: now.Add(time.Nanosecond), Line: "1"},
+ {Timestamp: now.Add(time.Minute), Line: "2"},
+ }
+
+ labelSet := model.LabelSet{"app": "l"}
+ req := &logproto.PushRequest{Streams: []logproto.Stream{
+ {Labels: labelSet.String(), Entries: entries},
+ }}
+
+ const userID = "testUser"
+ ctx := user.InjectOrgID(context.Background(), userID)
+
+ _, err := ing.Push(ctx, req)
+ require.NoError(t, err)
+
+ time.Sleep(2 * cfg.FlushCheckPeriod)
+
+ // ensure chunk is not flushed after flush period elapses
+ store.checkData(t, map[string][]logproto.Stream{})
+
+ instance, found := ing.getInstanceByID(userID)
+ require.True(t, found)
+ fingerprint := instance.getHashForLabels(labels.FromStrings("app", "l"))
+ require.Equal(t, model.Fingerprint(16794418009594958), fingerprint)
+ instance.ownedStreamsSvc.trackStreamOwnership(fingerprint, false)
+
+ time.Sleep(2 * cfg.FlushCheckPeriod)
+
+ // assert stream is now both batches
+ store.checkData(t, map[string][]logproto.Stream{
+ userID: {
+ {Labels: labelSet.String(), Entries: entries},
+ },
+ })
+
+ require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing))
+}
+
func TestFlushMaxAge(t *testing.T) {
cfg := defaultIngesterTestConfig(t)
cfg.FlushCheckPeriod = time.Millisecond * 100
@@ -275,10 +387,12 @@ func newTestStore(t require.TestingT, cfg Config, walOverride WAL) (*testStore,
chunks: map[string][]chunk.Chunk{},
}
+ readRingMock := mockReadRingWithOneActiveIngester()
+
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
- ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger(), nil)
+ ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
@@ -297,6 +411,10 @@ func defaultIngesterTestConfig(t testing.TB) Config {
cfg := Config{}
flagext.DefaultValues(&cfg)
+ cfg.FlushOpBackoff.MinBackoff = 100 * time.Millisecond
+ cfg.FlushOpBackoff.MaxBackoff = 10 * time.Second
+ cfg.FlushOpBackoff.MaxRetries = 1
+ cfg.FlushOpTimeout = 15 * time.Second
cfg.FlushCheckPeriod = 99999 * time.Hour
cfg.MaxChunkIdle = 99999 * time.Hour
cfg.ConcurrentFlushes = 1
@@ -310,6 +428,7 @@ func defaultIngesterTestConfig(t testing.TB) Config {
cfg.BlockSize = 256 * 1024
cfg.TargetChunkSize = 1500 * 1024
cfg.WAL.Enabled = false
+ cfg.OwnedStreamsCheckInterval = 1 * time.Second
return cfg
}
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 41b358906e0a1..af7f1fde288c9 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -21,6 +21,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/concurrency"
"github.com/grafana/dskit/modules"
"github.com/grafana/dskit/multierror"
@@ -32,8 +33,11 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
+ "golang.org/x/time/rate"
"google.golang.org/grpc/health/grpc_health_v1"
+ server_util "github.com/grafana/loki/v3/pkg/util/server"
+
"github.com/grafana/loki/v3/pkg/analytics"
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
@@ -82,6 +86,7 @@ type Config struct {
ConcurrentFlushes int `yaml:"concurrent_flushes"`
FlushCheckPeriod time.Duration `yaml:"flush_check_period"`
+ FlushOpBackoff backoff.Config `yaml:"flush_op_backoff"`
FlushOpTimeout time.Duration `yaml:"flush_op_timeout"`
RetainPeriod time.Duration `yaml:"chunk_retain_period"`
MaxChunkIdle time.Duration `yaml:"chunk_idle_period"`
@@ -118,6 +123,8 @@ type Config struct {
MaxDroppedStreams int `yaml:"max_dropped_streams"`
ShutdownMarkerPath string `yaml:"shutdown_marker_path"`
+
+ OwnedStreamsCheckInterval time.Duration `yaml:"owned_streams_check_interval" doc:"description=Interval at which the ingester ownedStreamService checks for changes in the ring to recalculate owned streams."`
}
// RegisterFlags registers the flags.
@@ -127,7 +134,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushes", 32, "How many flushes can happen concurrently from each stream.")
f.DurationVar(&cfg.FlushCheckPeriod, "ingester.flush-check-period", 30*time.Second, "How often should the ingester see if there are any blocks to flush. The first flush check is delayed by a random time up to 0.8x the flush check period. Additionally, there is +/- 1% jitter added to the interval.")
- f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 10*time.Minute, "The timeout before a flush is cancelled.")
+ f.DurationVar(&cfg.FlushOpBackoff.MinBackoff, "ingester.flush-op-backoff-min-period", 10*time.Second, "Minimum backoff period when a flush fails. Each concurrent flush has its own backoff, see `ingester.concurrent-flushes`.")
+ f.DurationVar(&cfg.FlushOpBackoff.MaxBackoff, "ingester.flush-op-backoff-max-period", time.Minute, "Maximum backoff period when a flush fails. Each concurrent flush has its own backoff, see `ingester.concurrent-flushes`.")
+ f.IntVar(&cfg.FlushOpBackoff.MaxRetries, "ingester.flush-op-backoff-retries", 10, "Maximum retries for failed flushes.")
+ f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 10*time.Minute, "The timeout for an individual flush. Will be retried up to `flush-op-backoff-retries` times.")
f.DurationVar(&cfg.RetainPeriod, "ingester.chunks-retain-period", 0, "How long chunks should be retained in-memory after they've been flushed.")
f.DurationVar(&cfg.MaxChunkIdle, "ingester.chunks-idle-period", 30*time.Minute, "How long chunks should sit in-memory with no updates before being flushed if they don't hit the max block size. This means that half-empty chunks will still be flushed after a certain period as long as they receive no further activity.")
f.IntVar(&cfg.BlockSize, "ingester.chunks-block-size", 256*1024, "The targeted _uncompressed_ size in bytes of a chunk block When this threshold is exceeded the head block will be cut and compressed inside the chunk.")
@@ -142,6 +152,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.IndexShards, "ingester.index-shards", index.DefaultIndexShards, "Shard factor used in the ingesters for the in process reverse index. This MUST be evenly divisible by ALL schema shard factors or Loki will not start.")
f.IntVar(&cfg.MaxDroppedStreams, "ingester.tailer.max-dropped-streams", 10, "Maximum number of dropped streams to keep in memory during tailing.")
f.StringVar(&cfg.ShutdownMarkerPath, "ingester.shutdown-marker-path", "", "Path where the shutdown marker file is stored. If not set and common.path_prefix is set then common.path_prefix will be used.")
+ f.DurationVar(&cfg.OwnedStreamsCheckInterval, "ingester.owned-streams-check-interval", 30*time.Second, "Interval at which the ingester ownedStreamService checks for changes in the ring to recalculate owned streams.")
}
func (cfg *Config) Validate() error {
@@ -155,6 +166,15 @@ func (cfg *Config) Validate() error {
return err
}
+ if cfg.FlushOpBackoff.MinBackoff > cfg.FlushOpBackoff.MaxBackoff {
+ return errors.New("invalid flush op min backoff: cannot be larger than max backoff")
+ }
+ if cfg.FlushOpBackoff.MaxRetries <= 0 {
+ return fmt.Errorf("invalid flush op max retries: %d", cfg.FlushOpBackoff.MaxRetries)
+ }
+ if cfg.FlushOpTimeout <= 0 {
+ return fmt.Errorf("invalid flush op timeout: %s", cfg.FlushOpTimeout)
+ }
if cfg.IndexShards <= 0 {
return fmt.Errorf("invalid ingester index shard factor: %d", cfg.IndexShards)
}
@@ -220,6 +240,9 @@ type Ingester struct {
flushQueues []*util.PriorityQueue
flushQueuesDone sync.WaitGroup
+ // Spread out calls to the chunk store over the flush period
+ flushRateLimiter *rate.Limiter
+
limiter *Limiter
// Denotes whether the ingester should flush on shutdown.
@@ -246,10 +269,14 @@ type Ingester struct {
writeLogManager *writefailures.Manager
customStreamsTracker push.UsageTracker
+
+ // recalculateOwnedStreams periodically checks the ring for changes and recalculates owned streams for each instance.
+ readRing ring.ReadRing
+ recalculateOwnedStreams *recalculateOwnedStreams
}
// New makes a new Ingester.
-func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker) (*Ingester, error) {
+func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker, readRing ring.ReadRing) (*Ingester, error) {
if cfg.ingesterClientFactory == nil {
cfg.ingesterClientFactory = client.New
}
@@ -271,6 +298,7 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
periodicConfigs: store.GetSchemaConfigs(),
loopQuit: make(chan struct{}),
flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes),
+ flushRateLimiter: rate.NewLimiter(rate.Inf, 1),
tailersQuit: make(chan struct{}),
metrics: metrics,
flushOnShutdownSwitch: &OnceSwitch{},
@@ -278,6 +306,7 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
streamRateCalculator: NewStreamRateCalculator(),
writeLogManager: writefailures.NewManager(logger, registerer, writeFailuresCfg, configs, "ingester"),
customStreamsTracker: customStreamsTracker,
+ readRing: readRing,
}
i.replayController = newReplayController(metrics, cfg.WAL, &replayFlusher{i})
@@ -327,6 +356,8 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
i.SetExtractorWrapper(i.cfg.SampleExtractorWrapper)
}
+ i.recalculateOwnedStreams = newRecalculateOwnedStreams(i.getInstances, i.lifecycler.ID, i.readRing, cfg.OwnedStreamsCheckInterval, util_log.Logger)
+
return i, nil
}
@@ -520,6 +551,16 @@ func (i *Ingester) starting(ctx context.Context) error {
i.setPrepareShutdown()
}
+ err = i.recalculateOwnedStreams.StartAsync(ctx)
+ if err != nil {
+ return fmt.Errorf("can not start recalculate owned streams service: %w", err)
+ }
+
+ err = i.lifecycler.AwaitRunning(ctx)
+ if err != nil {
+ return fmt.Errorf("can not ensure recalculate owned streams service is running: %w", err)
+ }
+
// start our loop
i.loopDone.Add(1)
go i.loop()
@@ -834,16 +875,16 @@ func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logpro
return nil, ErrReadOnly
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "write"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return &logproto.PushResponse{}, err
}
-
- pprof.Do(ctx, pprof.Labels("path", "write", "tenant", instanceID), func(c context.Context) {
- err = instance.Push(ctx, req)
- })
-
- return &logproto.PushResponse{}, err
+ return &logproto.PushResponse{}, instance.Push(ctx, req)
}
// GetStreamRates returns a response containing all streams and their current rate
@@ -854,11 +895,12 @@ func (i *Ingester) GetStreamRates(ctx context.Context, _ *logproto.StreamRatesRe
defer sp.LogKV("event", "ingester finished handling GetStreamRates")
}
- var allRates []logproto.StreamRate
- pprof.Do(ctx, pprof.Labels("path", "write"), func(c context.Context) {
- allRates = i.streamRateCalculator.Rates()
- })
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "write"))
+ pprof.SetGoroutineLabels(ctx)
+ allRates := i.streamRateCalculator.Rates()
rates := make([]*logproto.StreamRate, len(allRates))
for idx := range allRates {
rates[idx] = &allRates[idx]
@@ -908,49 +950,48 @@ func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querie
return err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "log"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return err
}
+ it, err := instance.Query(ctx, logql.SelectLogParams{QueryRequest: req})
+ if err != nil {
+ return err
+ }
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "log", "tenant", instanceID), func(c context.Context) {
- var it iter.EntryIterator
- it, err = instance.Query(ctx, logql.SelectLogParams{QueryRequest: req})
+ if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok {
+ storeReq := logql.SelectLogParams{QueryRequest: &logproto.QueryRequest{
+ Selector: req.Selector,
+ Direction: req.Direction,
+ Start: start,
+ End: end,
+ Limit: req.Limit,
+ Shards: req.Shards,
+ Deletes: req.Deletes,
+ Plan: req.Plan,
+ }}
+ storeItr, err := i.store.SelectLogs(ctx, storeReq)
if err != nil {
- return
- }
-
- if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok {
- storeReq := logql.SelectLogParams{QueryRequest: &logproto.QueryRequest{
- Selector: req.Selector,
- Direction: req.Direction,
- Start: start,
- End: end,
- Limit: req.Limit,
- Shards: req.Shards,
- Deletes: req.Deletes,
- Plan: req.Plan,
- }}
- var storeItr iter.EntryIterator
- storeItr, err = i.store.SelectLogs(ctx, storeReq)
- if err != nil {
- util.LogErrorWithContext(ctx, "closing iterator", it.Close)
- return
- }
- it = iter.NewMergeEntryIterator(ctx, []iter.EntryIterator{it, storeItr}, req.Direction)
+ util.LogErrorWithContext(ctx, "closing iterator", it.Close)
+ return err
}
+ it = iter.NewMergeEntryIterator(ctx, []iter.EntryIterator{it, storeItr}, req.Direction)
+ }
- defer util.LogErrorWithContext(ctx, "closing iterator", it.Close)
+ defer util.LogErrorWithContext(ctx, "closing iterator", it.Close)
- // sendBatches uses -1 to specify no limit.
- batchLimit := int32(req.Limit)
- if batchLimit == 0 {
- batchLimit = -1
- }
- err = sendBatches(ctx, it, queryServer, batchLimit)
- })
+ // sendBatches uses -1 to specify no limit.
+ batchLimit := int32(req.Limit)
+ if batchLimit == 0 {
+ batchLimit = -1
+ }
- return err
+ return sendBatches(ctx, it, queryServer, batchLimit)
}
// QuerySample the ingesters for series from logs matching a set of matchers.
@@ -976,46 +1017,45 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log
return err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "metric"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return err
}
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "metric", "tenant", instanceID), func(c context.Context) {
- var it iter.SampleIterator
- it, err = instance.QuerySample(ctx, logql.SelectSampleParams{SampleQueryRequest: req})
- if err != nil {
- return
- }
- if sp != nil {
- sp.LogKV("event", "finished instance query sample", "selector", req.Selector, "start", req.Start, "end", req.End)
- }
-
- if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok {
- storeReq := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{
- Start: start,
- End: end,
- Selector: req.Selector,
- Shards: req.Shards,
- Deletes: req.Deletes,
- Plan: req.Plan,
- }}
- var storeItr iter.SampleIterator
- storeItr, err = i.store.SelectSamples(ctx, storeReq)
- if err != nil {
- util.LogErrorWithContext(ctx, "closing iterator", it.Close)
- return
- }
+ it, err := instance.QuerySample(ctx, logql.SelectSampleParams{SampleQueryRequest: req})
+ if err != nil {
+ return err
+ }
+ if sp != nil {
+ sp.LogKV("event", "finished instance query sample", "selector", req.Selector, "start", req.Start, "end", req.End)
+ }
- it = iter.NewMergeSampleIterator(ctx, []iter.SampleIterator{it, storeItr})
+ if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok {
+ storeReq := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{
+ Start: start,
+ End: end,
+ Selector: req.Selector,
+ Shards: req.Shards,
+ Deletes: req.Deletes,
+ Plan: req.Plan,
+ }}
+ storeItr, err := i.store.SelectSamples(ctx, storeReq)
+ if err != nil {
+ util.LogErrorWithContext(ctx, "closing iterator", it.Close)
+ return err
}
- defer util.LogErrorWithContext(ctx, "closing iterator", it.Close)
+ it = iter.NewMergeSampleIterator(ctx, []iter.SampleIterator{it, storeItr})
+ }
- err = sendSampleBatches(ctx, it, queryServer)
- })
+ defer util.LogErrorWithContext(ctx, "closing iterator", it.Close)
- return err
+ return sendSampleBatches(ctx, it, queryServer)
}
// asyncStoreMaxLookBack returns a max look back period only if active index type is one of async index stores like `boltdb-shipper` and `tsdb`.
@@ -1041,11 +1081,23 @@ func (i *Ingester) asyncStoreMaxLookBack() time.Duration {
// GetChunkIDs is meant to be used only when using an async store like boltdb-shipper or tsdb.
func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) {
+ gcr, err := i.getChunkIDs(ctx, req)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return gcr, err
+}
+
+// GetChunkIDs is meant to be used only when using an async store like boltdb-shipper or tsdb.
+func (i *Ingester) getChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) {
orgID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "chunkIDs"))
+ pprof.SetGoroutineLabels(ctx)
+
asyncStoreMaxLookBack := i.asyncStoreMaxLookBack()
if asyncStoreMaxLookBack == 0 {
return &logproto.GetChunkIDsResponse{}, nil
@@ -1061,27 +1113,24 @@ func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsReq
return nil, err
}
- var resp logproto.GetChunkIDsResponse
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "chunkIDs", "tenant", orgID), func(c context.Context) {
- // get chunk references
- chunksGroups, _, err := i.store.GetChunks(ctx, orgID, start, end, chunk.NewPredicate(matchers, nil), nil)
- if err != nil {
- return
- }
+ // get chunk references
+ chunksGroups, _, err := i.store.GetChunks(ctx, orgID, start, end, chunk.NewPredicate(matchers, nil), nil)
+ if err != nil {
+ return nil, err
+ }
- // todo (Callum) ingester should maybe store the whole schema config?
- s := config.SchemaConfig{
- Configs: i.periodicConfigs,
- }
+ // todo (Callum) ingester should maybe store the whole schema config?
+ s := config.SchemaConfig{
+ Configs: i.periodicConfigs,
+ }
- // build the response
- resp = logproto.GetChunkIDsResponse{ChunkIDs: []string{}}
- for _, chunks := range chunksGroups {
- for _, chk := range chunks {
- resp.ChunkIDs = append(resp.ChunkIDs, s.ExternalKey(chk.ChunkRef))
- }
+ // build the response
+ resp := logproto.GetChunkIDsResponse{ChunkIDs: []string{}}
+ for _, chunks := range chunksGroups {
+ for _, chk := range chunks {
+ resp.ChunkIDs = append(resp.ChunkIDs, s.ExternalKey(chk.ChunkRef))
}
- })
+ }
return &resp, nil
}
@@ -1093,6 +1142,11 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp
return nil, err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "labels"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(userID)
if err != nil {
return nil, err
@@ -1106,59 +1160,49 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp
}
}
- var resp *logproto.LabelResponse
- var storeValues []string
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "labels", "tenant", userID), func(c context.Context) {
- resp, err = instance.Label(ctx, req, matchers...)
- if err != nil {
- return
- }
- if req.Start == nil {
- return
- }
+ resp, err := instance.Label(ctx, req, matchers...)
+ if err != nil {
+ return nil, err
+ }
- // Only continue if the active index type is one of async index store types or QueryStore flag is true.
- asyncStoreMaxLookBack := i.asyncStoreMaxLookBack()
- if asyncStoreMaxLookBack == 0 && !i.cfg.QueryStore {
- return
- }
+ if req.Start == nil {
+ return resp, nil
+ }
- var cs storage.Store
- var ok bool
- if cs, ok = i.store.(storage.Store); !ok {
- return
- }
+ // Only continue if the active index type is one of async index store types or QueryStore flag is true.
+ asyncStoreMaxLookBack := i.asyncStoreMaxLookBack()
+ if asyncStoreMaxLookBack == 0 && !i.cfg.QueryStore {
+ return resp, nil
+ }
- maxLookBackPeriod := i.cfg.QueryStoreMaxLookBackPeriod
- if asyncStoreMaxLookBack != 0 {
- maxLookBackPeriod = asyncStoreMaxLookBack
- }
- // Adjust the start time based on QueryStoreMaxLookBackPeriod.
- start := adjustQueryStartTime(maxLookBackPeriod, *req.Start, time.Now())
- if start.After(*req.End) {
- // The request is older than we are allowed to query the store, just return what we have.
- return
- }
- from, through := model.TimeFromUnixNano(start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano())
+ var cs storage.Store
+ var ok bool
+ if cs, ok = i.store.(storage.Store); !ok {
+ return resp, nil
+ }
- if req.Values {
- storeValues, err = cs.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name, matchers...)
- if err != nil {
- return
- }
- } else {
- storeValues, err = cs.LabelNamesForMetricName(ctx, userID, from, through, "logs", matchers...)
- if err != nil {
- return
- }
+ maxLookBackPeriod := i.cfg.QueryStoreMaxLookBackPeriod
+ if asyncStoreMaxLookBack != 0 {
+ maxLookBackPeriod = asyncStoreMaxLookBack
+ }
+ // Adjust the start time based on QueryStoreMaxLookBackPeriod.
+ start := adjustQueryStartTime(maxLookBackPeriod, *req.Start, time.Now())
+ if start.After(*req.End) {
+ // The request is older than we are allowed to query the store, just return what we have.
+ return resp, nil
+ }
+ from, through := model.TimeFromUnixNano(start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano())
+ var storeValues []string
+ if req.Values {
+ storeValues, err = cs.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name, matchers...)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ storeValues, err = cs.LabelNamesForMetricName(ctx, userID, from, through, "logs", matchers...)
+ if err != nil {
+ return nil, err
}
- })
-
- // When wrapping the work above in the pprof.Do function we created a possible scenario where resp could
- // be populated with values but an error occurred later on, prior to this profiling wrapper we would have
- // always exited with a nil response and the error message, this is here to keep that behavior.
- if err != nil {
- return nil, err
}
return &logproto.LabelResponse{
@@ -1168,22 +1212,27 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp
// Series queries the ingester for log stream identifiers (label sets) matching a set of matchers
func (i *Ingester) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) {
+ sr, err := i.series(ctx, req)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return sr, err
+}
+
+func (i *Ingester) series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) {
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "series"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return nil, err
}
-
- var series *logproto.SeriesResponse
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "series", "tenant", instanceID), func(c context.Context) {
- series, err = instance.Series(ctx, req)
- })
-
- return series, err
+ return instance.Series(ctx, req)
}
func (i *Ingester) GetStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) {
@@ -1194,6 +1243,11 @@ func (i *Ingester) GetStats(ctx context.Context, req *logproto.IndexStatsRequest
return nil, err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "stats"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(user)
if err != nil {
return nil, err
@@ -1204,47 +1258,43 @@ func (i *Ingester) GetStats(ctx context.Context, req *logproto.IndexStatsRequest
return nil, err
}
- var merged logproto.IndexStatsResponse
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "stats", "tenant", user), func(c context.Context) {
-
- type f func() (*logproto.IndexStatsResponse, error)
- jobs := []f{
- f(func() (*logproto.IndexStatsResponse, error) {
- return instance.GetStats(ctx, req)
- }),
- f(func() (*logproto.IndexStatsResponse, error) {
- return i.store.Stats(ctx, user, req.From, req.Through, matchers...)
- }),
- }
- resps := make([]*logproto.IndexStatsResponse, len(jobs))
-
- if err := concurrency.ForEachJob(
- ctx,
- len(jobs),
- 2,
- func(_ context.Context, idx int) error {
- res, err := jobs[idx]()
- resps[idx] = res
- return err
- },
- ); err != nil {
- return
- }
+ type f func() (*logproto.IndexStatsResponse, error)
+ jobs := []f{
+ f(func() (*logproto.IndexStatsResponse, error) {
+ return instance.GetStats(ctx, req)
+ }),
+ f(func() (*logproto.IndexStatsResponse, error) {
+ return i.store.Stats(ctx, user, req.From, req.Through, matchers...)
+ }),
+ }
+ resps := make([]*logproto.IndexStatsResponse, len(jobs))
+
+ if err := concurrency.ForEachJob(
+ ctx,
+ len(jobs),
+ 2,
+ func(_ context.Context, idx int) error {
+ res, err := jobs[idx]()
+ resps[idx] = res
+ return err
+ },
+ ); err != nil {
+ return nil, err
+ }
- merged = index_stats.MergeStats(resps...)
- if sp != nil {
- sp.LogKV(
- "user", user,
- "from", req.From.Time(),
- "through", req.Through.Time(),
- "matchers", syntax.MatchersString(matchers),
- "streams", merged.Streams,
- "chunks", merged.Chunks,
- "bytes", merged.Bytes,
- "entries", merged.Entries,
- )
- }
- })
+ merged := index_stats.MergeStats(resps...)
+ if sp != nil {
+ sp.LogKV(
+ "user", user,
+ "from", req.From.Time(),
+ "through", req.Through.Time(),
+ "matchers", syntax.MatchersString(matchers),
+ "streams", merged.Streams,
+ "chunks", merged.Chunks,
+ "bytes", merged.Bytes,
+ "entries", merged.Entries,
+ )
+ }
return &merged, nil
}
@@ -1255,6 +1305,11 @@ func (i *Ingester) GetVolume(ctx context.Context, req *logproto.VolumeRequest) (
return nil, err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "volume"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(user)
if err != nil {
return nil, err
@@ -1265,33 +1320,31 @@ func (i *Ingester) GetVolume(ctx context.Context, req *logproto.VolumeRequest) (
return nil, err
}
- var merged *logproto.VolumeResponse
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "volume", "tenant", user), func(c context.Context) {
- type f func() (*logproto.VolumeResponse, error)
- jobs := []f{
- f(func() (*logproto.VolumeResponse, error) {
- return instance.GetVolume(ctx, req)
- }),
- f(func() (*logproto.VolumeResponse, error) {
- return i.store.Volume(ctx, user, req.From, req.Through, req.Limit, req.TargetLabels, req.AggregateBy, matchers...)
- }),
- }
- resps := make([]*logproto.VolumeResponse, len(jobs))
-
- if err := concurrency.ForEachJob(
- ctx,
- len(jobs),
- 2,
- func(_ context.Context, idx int) error {
- res, err := jobs[idx]()
- resps[idx] = res
- return err
- },
- ); err != nil {
- return
- }
- merged = seriesvolume.Merge(resps, req.Limit)
- })
+ type f func() (*logproto.VolumeResponse, error)
+ jobs := []f{
+ f(func() (*logproto.VolumeResponse, error) {
+ return instance.GetVolume(ctx, req)
+ }),
+ f(func() (*logproto.VolumeResponse, error) {
+ return i.store.Volume(ctx, user, req.From, req.Through, req.Limit, req.TargetLabels, req.AggregateBy, matchers...)
+ }),
+ }
+ resps := make([]*logproto.VolumeResponse, len(jobs))
+
+ if err := concurrency.ForEachJob(
+ ctx,
+ len(jobs),
+ 2,
+ func(_ context.Context, idx int) error {
+ res, err := jobs[idx]()
+ resps[idx] = res
+ return err
+ },
+ ); err != nil {
+ return nil, err
+ }
+
+ merged := seriesvolume.Merge(resps, req.Limit)
return merged, nil
}
@@ -1331,6 +1384,11 @@ func (i *Ingester) getInstances() []*instance {
// Tail logs matching given query
func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_TailServer) error {
+ err := i.tail(req, queryServer)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return err
+}
+func (i *Ingester) tail(req *logproto.TailRequest, queryServer logproto.Querier_TailServer) error {
select {
case <-i.tailersQuit:
return errors.New("Ingester is stopping")
@@ -1376,6 +1434,12 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_
// TailersCount returns count of active tail requests from a user
func (i *Ingester) TailersCount(ctx context.Context, _ *logproto.TailersCountRequest) (*logproto.TailersCountResponse, error) {
+ tcr, err := i.tailersCount(ctx)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return tcr, err
+}
+
+func (i *Ingester) tailersCount(ctx context.Context) (*logproto.TailersCountResponse, error) {
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
@@ -1431,11 +1495,22 @@ func (i *Ingester) GetDetectedFields(_ context.Context, r *logproto.DetectedFiel
// GetDetectedLabels returns map of detected labels and unique values from this ingester
func (i *Ingester) GetDetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.LabelToValuesResponse, error) {
+ lvr, err := i.getDetectedLabels(ctx, req)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return lvr, err
+}
+
+func (i *Ingester) getDetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.LabelToValuesResponse, error) {
userID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
}
+ // Set profiling tags
+ defer pprof.SetGoroutineLabels(ctx)
+ ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "detectedLabels"))
+ pprof.SetGoroutineLabels(ctx)
+
instance, err := i.GetOrCreateInstance(userID)
if err != nil {
return nil, err
@@ -1448,22 +1523,19 @@ func (i *Ingester) GetDetectedLabels(ctx context.Context, req *logproto.Detected
}
}
- var result map[string]*logproto.UniqueLabelValues
- pprof.Do(ctx, pprof.Labels("path", "read", "type", "detectedLabels", "tenant", userID), func(c context.Context) {
- labelMap, err := instance.LabelsWithValues(ctx, req.Start, matchers...)
- if err != nil {
- return
- }
- result = make(map[string]*logproto.UniqueLabelValues)
- for label, values := range labelMap {
- var uniqueValues []string
- for v := range values {
- uniqueValues = append(uniqueValues, v)
- }
+ labelMap, err := instance.LabelsWithValues(ctx, req.Start, matchers...)
- result[label] = &logproto.UniqueLabelValues{Values: uniqueValues}
+ if err != nil {
+ return nil, err
+ }
+ result := make(map[string]*logproto.UniqueLabelValues)
+ for label, values := range labelMap {
+ var uniqueValues []string
+ for v := range values {
+ uniqueValues = append(uniqueValues, v)
}
- })
+ result[label] = &logproto.UniqueLabelValues{Values: uniqueValues}
+ }
return &logproto.LabelToValuesResponse{Labels: result}, nil
}
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index 1c438bd6bf2c0..570452af44eb0 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -2,6 +2,7 @@ package ingester
import (
"fmt"
+ math "math"
"net"
"net/http"
"net/http/httptest"
@@ -12,11 +13,14 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/middleware"
+ "github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/user"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
@@ -57,7 +61,9 @@ func TestPrepareShutdownMarkerPathNotSet(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ mockRing := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, mockRing)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -80,7 +86,9 @@ func TestPrepareShutdown(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -141,7 +149,9 @@ func TestIngester_GetStreamRates_Correctness(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -172,8 +182,9 @@ func BenchmarkGetStreamRatesAllocs(b *testing.B) {
store := &mockStore{
chunks: map[string][]chunk.Chunk{},
}
+ readRingMock := mockReadRingWithOneActiveIngester()
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(b, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -197,7 +208,9 @@ func TestIngester(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -382,7 +395,9 @@ func TestIngesterStreamLimitExceeded(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -676,57 +691,119 @@ func TestIngester_asyncStoreMaxLookBack(t *testing.T) {
func TestValidate(t *testing.T) {
for i, tc := range []struct {
- in Config
- err bool
- expected Config
+ in Config
+ expected Config
+ expectedErr string
}{
{
in: Config{
- MaxChunkAge: time.Minute,
ChunkEncoding: chunkenc.EncGZIP.String(),
- IndexShards: index.DefaultIndexShards,
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ FlushOpTimeout: 15 * time.Second,
+ IndexShards: index.DefaultIndexShards,
+ MaxChunkAge: time.Minute,
},
expected: Config{
+ ChunkEncoding: chunkenc.EncGZIP.String(),
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ FlushOpTimeout: 15 * time.Second,
+ IndexShards: index.DefaultIndexShards,
MaxChunkAge: time.Minute,
- ChunkEncoding: chunkenc.EncGZIP.String(),
parsedEncoding: chunkenc.EncGZIP,
- IndexShards: index.DefaultIndexShards,
},
},
{
in: Config{
ChunkEncoding: chunkenc.EncSnappy.String(),
- IndexShards: index.DefaultIndexShards,
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ FlushOpTimeout: 15 * time.Second,
+ IndexShards: index.DefaultIndexShards,
},
expected: Config{
- ChunkEncoding: chunkenc.EncSnappy.String(),
- parsedEncoding: chunkenc.EncSnappy,
+ ChunkEncoding: chunkenc.EncSnappy.String(),
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ FlushOpTimeout: 15 * time.Second,
IndexShards: index.DefaultIndexShards,
+ parsedEncoding: chunkenc.EncSnappy,
},
},
{
in: Config{
- IndexShards: index.DefaultIndexShards,
ChunkEncoding: "bad-enc",
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ FlushOpTimeout: 15 * time.Second,
+ IndexShards: index.DefaultIndexShards,
},
- err: true,
+ expectedErr: "invalid encoding: bad-enc, supported: none, gzip, lz4-64k, snappy, lz4-256k, lz4-1M, lz4, flate, zstd",
},
{
in: Config{
- MaxChunkAge: time.Minute,
ChunkEncoding: chunkenc.EncGZIP.String(),
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ },
+ FlushOpTimeout: 15 * time.Second,
+ IndexShards: index.DefaultIndexShards,
+ MaxChunkAge: time.Minute,
},
- err: true,
+ expectedErr: "invalid flush op max retries: 0",
+ },
+ {
+ in: Config{
+ ChunkEncoding: chunkenc.EncGZIP.String(),
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ IndexShards: index.DefaultIndexShards,
+ MaxChunkAge: time.Minute,
+ },
+ expectedErr: "invalid flush op timeout: 0s",
+ },
+ {
+ in: Config{
+ ChunkEncoding: chunkenc.EncGZIP.String(),
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ FlushOpTimeout: 15 * time.Second,
+ MaxChunkAge: time.Minute,
+ },
+ expectedErr: "invalid ingester index shard factor: 0",
},
} {
t.Run(fmt.Sprint(i), func(t *testing.T) {
err := tc.in.Validate()
- if tc.err {
- require.NotNil(t, err)
- return
+ if tc.expectedErr != "" {
+ require.EqualError(t, err, tc.expectedErr)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.expected, tc.in)
}
- require.Nil(t, err)
- require.Equal(t, tc.expected, tc.in)
})
}
}
@@ -740,7 +817,9 @@ func Test_InMemoryLabels(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -793,8 +872,9 @@ func TestIngester_GetDetectedLabels(t *testing.T) {
store := &mockStore{
chunks: map[string][]chunk.Chunk{},
}
+ readRingMock := mockReadRingWithOneActiveIngester()
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -856,8 +936,9 @@ func TestIngester_GetDetectedLabelsWithQuery(t *testing.T) {
store := &mockStore{
chunks: map[string][]chunk.Chunk{},
}
+ readRingMock := mockReadRingWithOneActiveIngester()
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -1223,8 +1304,9 @@ func TestStats(t *testing.T) {
ingesterConfig := defaultIngesterTestConfig(t)
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
+ readRingMock := mockReadRingWithOneActiveIngester()
- i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
i.instances["test"] = defaultInstance(t)
@@ -1250,8 +1332,9 @@ func TestVolume(t *testing.T) {
ingesterConfig := defaultIngesterTestConfig(t)
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
+ readRingMock := mockReadRingWithOneActiveIngester()
- i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
i.instances["test"] = defaultInstance(t)
@@ -1329,8 +1412,9 @@ func createIngesterServer(t *testing.T, ingesterConfig Config) (ingesterClient,
t.Helper()
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
+ readRingMock := mockReadRingWithOneActiveIngester()
- ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
listener := bufconn.Listen(1024 * 1024)
@@ -1409,3 +1493,154 @@ func jsonLine(ts int64, i int) string {
}
return fmt.Sprintf(`{"e":"f", "h":"i", "j":"k", "g":"h", "ts":"%d"}`, ts)
}
+
+type readRingMock struct {
+ replicationSet ring.ReplicationSet
+ getAllHealthyCallsCount int
+ tokenRangesByIngester map[string]ring.TokenRanges
+}
+
+func (r *readRingMock) HealthyInstancesCount() int {
+ return len(r.replicationSet.Instances)
+}
+
+func newReadRingMock(ingesters []ring.InstanceDesc, maxErrors int) *readRingMock {
+ return &readRingMock{
+ replicationSet: ring.ReplicationSet{
+ Instances: ingesters,
+ MaxErrors: maxErrors,
+ },
+ }
+}
+
+func (r *readRingMock) Describe(_ chan<- *prometheus.Desc) {
+}
+
+func (r *readRingMock) Collect(_ chan<- prometheus.Metric) {
+}
+
+func (r *readRingMock) Get(_ uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
+ return r.replicationSet, nil
+}
+
+func (r *readRingMock) ShuffleShard(_ string, size int) ring.ReadRing {
+ // pass by value to copy
+ return func(r readRingMock) *readRingMock {
+ r.replicationSet.Instances = r.replicationSet.Instances[:size]
+ return &r
+ }(*r)
+}
+
+func (r *readRingMock) BatchGet(_ []uint32, _ ring.Operation) ([]ring.ReplicationSet, error) {
+ return []ring.ReplicationSet{r.replicationSet}, nil
+}
+
+func (r *readRingMock) GetAllHealthy(_ ring.Operation) (ring.ReplicationSet, error) {
+ r.getAllHealthyCallsCount++
+ return r.replicationSet, nil
+}
+
+func (r *readRingMock) GetReplicationSetForOperation(_ ring.Operation) (ring.ReplicationSet, error) {
+ return r.replicationSet, nil
+}
+
+func (r *readRingMock) ReplicationFactor() int {
+ return 1
+}
+
+func (r *readRingMock) InstancesCount() int {
+ return len(r.replicationSet.Instances)
+}
+
+func (r *readRingMock) InstancesInZoneCount(_ string) int {
+ return len(r.replicationSet.Instances)
+}
+
+func (r *readRingMock) InstancesWithTokensCount() int {
+ return len(r.replicationSet.Instances)
+}
+
+func (r *readRingMock) InstancesWithTokensInZoneCount(_ string) int {
+ return len(r.replicationSet.Instances)
+}
+
+func (r *readRingMock) ZonesCount() int {
+ return 1
+}
+
+func (r *readRingMock) HealthyInstancesInZoneCount() int {
+ return len(r.replicationSet.Instances)
+}
+
+func (r *readRingMock) Subring(_ uint32, _ int) ring.ReadRing {
+ return r
+}
+
+func (r *readRingMock) HasInstance(instanceID string) bool {
+ for _, ing := range r.replicationSet.Instances {
+ if ing.Addr != instanceID {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *readRingMock) ShuffleShardWithLookback(_ string, _ int, _ time.Duration, _ time.Time) ring.ReadRing {
+ return r
+}
+
+func (r *readRingMock) CleanupShuffleShardCache(_ string) {}
+
+func (r *readRingMock) GetInstanceState(_ string) (ring.InstanceState, error) {
+ return 0, nil
+}
+
+func (r *readRingMock) GetTokenRangesForInstance(instance string) (ring.TokenRanges, error) {
+ if r.tokenRangesByIngester != nil {
+ ranges, exists := r.tokenRangesByIngester[instance]
+ if !exists {
+ return nil, ring.ErrInstanceNotFound
+ }
+ return ranges, nil
+ }
+ tr := ring.TokenRanges{0, math.MaxUint32}
+ return tr, nil
+}
+
+func mockReadRingWithOneActiveIngester() *readRingMock {
+ return newReadRingMock([]ring.InstanceDesc{
+ {Addr: "test", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{1, 2, 3}},
+ }, 0)
+}
+
+func TestUpdateOwnedStreams(t *testing.T) {
+ ingesterConfig := defaultIngesterTestConfig(t)
+ limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
+ require.NoError(t, err)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
+ require.NoError(t, err)
+
+ i.instances["test"] = defaultInstance(t)
+
+ tt := time.Now().Add(-5 * time.Minute)
+ err = i.instances["test"].Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{
+ // both label sets have FastFingerprint=e002a3a451262627
+ {Labels: "{app=\"l\",uniq0=\"0\",uniq1=\"1\"}", Entries: entries(5, tt.Add(time.Minute))},
+ {Labels: "{uniq0=\"1\",app=\"m\",uniq1=\"1\"}", Entries: entries(5, tt)},
+
+ // e002a3a451262247
+ {Labels: "{app=\"l\",uniq0=\"1\",uniq1=\"0\"}", Entries: entries(5, tt.Add(time.Minute))},
+ {Labels: "{uniq1=\"0\",app=\"m\",uniq0=\"0\"}", Entries: entries(5, tt)},
+
+ // e002a2a4512624f4
+ {Labels: "{app=\"l\",uniq0=\"0\",uniq1=\"0\"}", Entries: entries(5, tt.Add(time.Minute))},
+ {Labels: "{uniq0=\"1\",uniq1=\"0\",app=\"m\"}", Entries: entries(5, tt)},
+ }})
+ require.NoError(t, err)
+
+ // streams are pushed, let's check owned stream counts
+ ownedStreams := i.instances["test"].ownedStreamsSvc.getOwnedStreamCount()
+ require.Equal(t, 8, ownedStreams)
+}
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index 7f1ec78601fff..7391a8af71916 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -10,12 +10,10 @@ import (
"syscall"
"time"
- "github.com/grafana/loki/v3/pkg/logqlmodel/metadata"
-
- "github.com/grafana/loki/v3/pkg/util/httpreq"
-
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
+ "github.com/grafana/dskit/ring"
+ "github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -26,8 +24,6 @@ import (
tsdb_record "github.com/prometheus/prometheus/tsdb/record"
"go.uber.org/atomic"
- "github.com/grafana/dskit/tenant"
-
"github.com/grafana/loki/v3/pkg/analytics"
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
@@ -39,6 +35,7 @@ import (
"github.com/grafana/loki/v3/pkg/logql"
"github.com/grafana/loki/v3/pkg/logql/log"
"github.com/grafana/loki/v3/pkg/logql/syntax"
+ "github.com/grafana/loki/v3/pkg/logqlmodel/metadata"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
"github.com/grafana/loki/v3/pkg/runtime"
"github.com/grafana/loki/v3/pkg/storage/chunk"
@@ -47,8 +44,11 @@ import (
"github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/constants"
"github.com/grafana/loki/v3/pkg/util/deletion"
+ "github.com/grafana/loki/v3/pkg/util/httpreq"
util_log "github.com/grafana/loki/v3/pkg/util/log"
mathutil "github.com/grafana/loki/v3/pkg/util/math"
+ lokiring "github.com/grafana/loki/v3/pkg/util/ring"
+ server_util "github.com/grafana/loki/v3/pkg/util/server"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -185,6 +185,7 @@ func newInstance(
customStreamsTracker: customStreamsTracker,
}
i.mapper = NewFPMapper(i.getLabelsFromFingerprint)
+
return i, err
}
@@ -308,7 +309,7 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre
return nil, fmt.Errorf("failed to create stream: %w", err)
}
- s := newStream(chunkfmt, headfmt, i.cfg, i.limiter, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures)
+ s := newStream(chunkfmt, headfmt, i.cfg, i.limiter, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures, i.configs)
// record will be nil when replaying the wal (we don't want to rewrite wal entries as we replay them).
if record != nil {
@@ -354,7 +355,8 @@ func (i *instance) onStreamCreated(s *stream) {
i.streamsCreatedTotal.Inc()
i.addTailersToNewStream(s)
streamsCountStats.Add(1)
- i.ownedStreamsSvc.incOwnedStreamCount()
+ // we count newly created stream as owned
+ i.ownedStreamsSvc.trackStreamOwnership(s.fp, true)
if i.configs.LogStreamCreation(i.instanceID) {
level.Debug(util_log.Logger).Log(
"msg", "successfully created stream",
@@ -372,12 +374,9 @@ func (i *instance) createStreamByFP(ls labels.Labels, fp model.Fingerprint) (*st
return nil, fmt.Errorf("failed to create stream for fingerprint: %w", err)
}
- s := newStream(chunkfmt, headfmt, i.cfg, i.limiter, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures)
+ s := newStream(chunkfmt, headfmt, i.cfg, i.limiter, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures, i.configs)
- i.streamsCreatedTotal.Inc()
- memoryStreams.WithLabelValues(i.instanceID).Inc()
- memoryStreamsLabelsBytes.Add(float64(len(s.labels.String())))
- i.addTailersToNewStream(s)
+ i.onStreamCreated(s)
return s, nil
}
@@ -421,7 +420,7 @@ func (i *instance) removeStream(s *stream) {
memoryStreams.WithLabelValues(i.instanceID).Dec()
memoryStreamsLabelsBytes.Sub(float64(len(s.labels.String())))
streamsCountStats.Add(-1)
- i.ownedStreamsSvc.decOwnedStreamCount()
+ i.ownedStreamsSvc.trackRemovedStream(s.fp)
}
}
@@ -441,6 +440,12 @@ func (i *instance) getLabelsFromFingerprint(fp model.Fingerprint) labels.Labels
}
func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
+ it, err := i.query(ctx, req)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return it, err
+}
+
+func (i *instance) query(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
expr, err := req.LogSelector()
if err != nil {
return nil, err
@@ -495,6 +500,12 @@ func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) (iter.E
}
func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) {
+ it, err := i.querySample(ctx, req)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return it, err
+}
+
+func (i *instance) querySample(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) {
expr, err := req.Expr()
if err != nil {
return nil, err
@@ -556,6 +567,12 @@ func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams
// If label matchers are given only the matching streams are fetched from the index.
// The label names or values are then retrieved from those matching streams.
func (i *instance) Label(ctx context.Context, req *logproto.LabelRequest, matchers ...*labels.Matcher) (*logproto.LabelResponse, error) {
+ lr, err := i.label(ctx, req, matchers...)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return lr, err
+}
+
+func (i *instance) label(ctx context.Context, req *logproto.LabelRequest, matchers ...*labels.Matcher) (*logproto.LabelResponse, error) {
if len(matchers) == 0 {
var labels []string
if req.Values {
@@ -709,6 +726,12 @@ func (i *instance) Series(ctx context.Context, req *logproto.SeriesRequest) (*lo
}
func (i *instance) GetStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) {
+ isr, err := i.getStats(ctx, req)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return isr, err
+}
+
+func (i *instance) getStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) {
matchers, err := syntax.ParseMatchers(req.Matchers, true)
if err != nil {
return nil, err
@@ -765,6 +788,12 @@ func (i *instance) GetStats(ctx context.Context, req *logproto.IndexStatsRequest
}
func (i *instance) GetVolume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) {
+ vr, err := i.getVolume(ctx, req)
+ err = server_util.ClientGrpcStatusAndError(err)
+ return vr, err
+}
+
+func (i *instance) getVolume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) {
matchers, err := syntax.ParseMatchers(req.Matchers, true)
if err != nil && req.Matchers != seriesvolume.MatchAny {
return nil, err
@@ -1144,3 +1173,37 @@ func minTs(stream *logproto.Stream) model.Time {
}
return model.TimeFromUnixNano(streamMinTs)
}
+
+// For each stream, we check if the stream is owned by the ingester or not and increment/decrement the owned stream count.
+func (i *instance) updateOwnedStreams(ingesterRing ring.ReadRing, ingesterID string) error {
+ start := time.Now()
+ defer func() {
+ i.metrics.streamsOwnershipCheck.Observe(float64(time.Since(start).Milliseconds()))
+ }()
+ var descsBuf = make([]ring.InstanceDesc, ingesterRing.ReplicationFactor()+1)
+ var hostsBuf = make([]string, ingesterRing.ReplicationFactor()+1)
+ var zoneBuf = make([]string, ingesterRing.ZonesCount()+1)
+ var err error
+ i.streams.WithLock(func() {
+ i.ownedStreamsSvc.resetStreamCounts()
+ err = i.streams.ForEach(func(s *stream) (bool, error) {
+ replicationSet, err := ingesterRing.Get(lokiring.TokenFor(i.instanceID, s.labelsString), ring.WriteNoExtend, descsBuf, hostsBuf, zoneBuf)
+ if err != nil {
+ return false, fmt.Errorf("error getting replication set for stream %s: %v", s.labelsString, err)
+ }
+ ownedStream := i.isOwnedStream(replicationSet, ingesterID)
+ i.ownedStreamsSvc.trackStreamOwnership(s.fp, ownedStream)
+ return true, nil
+ })
+ })
+ return err
+}
+
+func (i *instance) isOwnedStream(replicationSet ring.ReplicationSet, ingesterID string) bool {
+ for _, instanceDesc := range replicationSet.Instances {
+ if instanceDesc.Id == ingesterID {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
index 7f7dc30361d6a..80074f6391e90 100644
--- a/pkg/ingester/instance_test.go
+++ b/pkg/ingester/instance_test.go
@@ -18,6 +18,7 @@ import (
"github.com/grafana/loki/v3/pkg/logql/log"
+ "github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/flagext"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
@@ -40,9 +41,16 @@ import (
func defaultConfig() *Config {
cfg := Config{
- BlockSize: 512,
- ChunkEncoding: "gzip",
- IndexShards: 32,
+ BlockSize: 512,
+ ChunkEncoding: "gzip",
+ IndexShards: 32,
+ FlushOpTimeout: 15 * time.Second,
+ FlushOpBackoff: backoff.Config{
+ MinBackoff: 100 * time.Millisecond,
+ MaxBackoff: 10 * time.Second,
+ MaxRetries: 1,
+ },
+ OwnedStreamsCheckInterval: 1 * time.Second,
}
if err := cfg.Validate(); err != nil {
panic(errors.Wrap(err, "error building default test config"))
@@ -308,9 +316,10 @@ func setupTestStreams(t *testing.T) (*instance, time.Time, int) {
require.NoError(t, err)
chunkfmt, headfmt, err := instance.chunkFormatAt(minTs(&testStream))
require.NoError(t, err)
- chunk := newStream(chunkfmt, headfmt, cfg, limiter, "fake", 0, nil, true, NewStreamRateCalculator(), NilMetrics, nil).NewChunk()
+ chunk := newStream(chunkfmt, headfmt, cfg, limiter, "fake", 0, nil, true, NewStreamRateCalculator(), NilMetrics, nil, nil).NewChunk()
for _, entry := range testStream.Entries {
- err = chunk.Append(&entry)
+ dup, err := chunk.Append(&entry)
+ require.False(t, dup)
require.NoError(t, err)
}
stream.chunks = append(stream.chunks, chunkDesc{chunk: chunk})
@@ -567,7 +576,7 @@ func Benchmark_instance_addNewTailer(b *testing.B) {
b.Run("addTailersToNewStream", func(b *testing.B) {
for n := 0; n < b.N; n++ {
- inst.addTailersToNewStream(newStream(chunkfmt, headfmt, nil, limiter, "fake", 0, lbs, true, NewStreamRateCalculator(), NilMetrics, nil))
+ inst.addTailersToNewStream(newStream(chunkfmt, headfmt, nil, limiter, "fake", 0, lbs, true, NewStreamRateCalculator(), NilMetrics, nil, nil))
}
})
}
@@ -1096,7 +1105,8 @@ func TestStreamShardingUsage(t *testing.T) {
t.Run("invalid push returns error", func(t *testing.T) {
tracker := &mockUsageTracker{}
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker)
+
+ i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
@@ -1116,7 +1126,7 @@ func TestStreamShardingUsage(t *testing.T) {
})
t.Run("valid push returns no error", func(t *testing.T) {
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go
index daa1fe7aec8da..1ed3a3ea27163 100644
--- a/pkg/ingester/limiter.go
+++ b/pkg/ingester/limiter.go
@@ -20,6 +20,8 @@ const (
// to count members
type RingCount interface {
HealthyInstancesCount() int
+ HealthyInstancesInZoneCount() int
+ ZonesCount() int
}
type Limits interface {
@@ -106,22 +108,31 @@ func (l *Limiter) minNonZero(first, second int) int {
}
func (l *Limiter) convertGlobalToLocalLimit(globalLimit int) int {
- if globalLimit == 0 {
+ if globalLimit == 0 || l.replicationFactor == 0 {
return 0
}
- // todo: change to healthyInstancesInZoneCount() once
- // Given we don't need a super accurate count (ie. when the ingesters
- // topology changes) and we prefer to always be in favor of the tenant,
- // we can use a per-ingester limit equal to:
- // (global limit / number of ingesters) * replication factor
- numIngesters := l.ring.HealthyInstancesCount()
- // May happen because the number of ingesters is asynchronously updated.
- // If happens, we just temporarily ignore the global limit.
+ zonesCount := l.ring.ZonesCount()
+ if zonesCount <= 1 {
+ return calculateLimitForSingleZone(globalLimit, l)
+ }
+
+ return calculateLimitForMultipleZones(globalLimit, zonesCount, l)
+}
+
+func calculateLimitForSingleZone(globalLimit int, l *Limiter) int {
+ numIngesters := l.ring.HealthyInstancesCount()
if numIngesters > 0 {
return int((float64(globalLimit) / float64(numIngesters)) * float64(l.replicationFactor))
}
+ return 0
+}
+func calculateLimitForMultipleZones(globalLimit, zonesCount int, l *Limiter) int {
+ ingestersInZone := l.ring.HealthyInstancesInZoneCount()
+ if ingestersInZone > 0 {
+ return int((float64(globalLimit) * float64(l.replicationFactor)) / float64(zonesCount) / float64(ingestersInZone))
+ }
return 0
}
diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go
index b00bede10417d..0d0055d0a0afb 100644
--- a/pkg/ingester/limiter_test.go
+++ b/pkg/ingester/limiter_test.go
@@ -214,6 +214,14 @@ func (m *ringCountMock) HealthyInstancesCount() int {
return m.count
}
+func (m *ringCountMock) ZonesCount() int {
+ return 1
+}
+
+func (m *ringCountMock) HealthyInstancesInZoneCount() int {
+ return m.count
+}
+
// Assert some of the weirder (bug?) behavior of golang.org/x/time/rate
func TestGoLimiter(t *testing.T) {
for _, tc := range []struct {
@@ -254,3 +262,59 @@ func TestGoLimiter(t *testing.T) {
})
}
}
+
+type MockRing struct {
+ zonesCount int
+ healthyInstancesCount int
+ healthyInstancesInZoneCount int
+}
+
+func (m *MockRing) ZonesCount() int {
+ return m.zonesCount
+}
+
+func (m *MockRing) HealthyInstancesCount() int {
+ return m.healthyInstancesCount
+}
+
+func (m *MockRing) HealthyInstancesInZoneCount() int {
+ return m.healthyInstancesInZoneCount
+}
+
+func TestConvertGlobalToLocalLimit(t *testing.T) {
+ tests := []struct {
+ name string
+ globalLimit int
+ zonesCount int
+ healthyInstancesCount int
+ healthyInstancesInZoneCount int
+ replicationFactor int
+ expectedLocalLimit int
+ }{
+ {"GlobalLimitZero", 0, 1, 1, 1, 3, 0},
+ {"SingleZoneMultipleIngesters", 100, 1, 10, 10, 3, 30},
+ {"MultipleZones", 200, 3, 30, 10, 3, 20},
+ {"MultipleZonesNoHealthyIngesters", 200, 2, 0, 0, 3, 0},
+ {"MultipleZonesNoHealthyIngestersInZone", 200, 3, 10, 0, 3, 0},
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockRing := &MockRing{
+ zonesCount: tc.zonesCount,
+ healthyInstancesCount: tc.healthyInstancesCount,
+ healthyInstancesInZoneCount: tc.healthyInstancesInZoneCount,
+ }
+
+ limiter := &Limiter{
+ ring: mockRing,
+ replicationFactor: tc.replicationFactor,
+ }
+
+ localLimit := limiter.convertGlobalToLocalLimit(tc.globalLimit)
+ if localLimit != tc.expectedLocalLimit {
+ t.Errorf("expected %d, got %d", tc.expectedLocalLimit, localLimit)
+ }
+ })
+ }
+}
diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go
index 756eba0ebea74..ad190285ccd08 100644
--- a/pkg/ingester/metrics.go
+++ b/pkg/ingester/metrics.go
@@ -65,7 +65,9 @@ type ingesterMetrics struct {
// Shutdown marker for ingester scale down
shutdownMarker prometheus.Gauge
- flushQueueLength prometheus.Gauge
+ flushQueueLength prometheus.Gauge
+ duplicateLogBytesTotal *prometheus.CounterVec
+ streamsOwnershipCheck prometheus.Histogram
}
// setRecoveryBytesInUse bounds the bytes reports to >= 0.
@@ -293,5 +295,20 @@ func newIngesterMetrics(r prometheus.Registerer, metricsNamespace string) *inges
Name: "flush_queue_length",
Help: "The total number of series pending in the flush queue.",
}),
+
+ streamsOwnershipCheck: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Namespace: constants.Loki,
+ Name: "ingester_streams_ownership_check_duration_ms",
+ Help: "Distribution of streams ownership check durations in milliseconds.",
+ // 100ms to 5s.
+ Buckets: []float64{100, 250, 350, 500, 750, 1000, 1500, 2000, 5000},
+ }),
+
+ duplicateLogBytesTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: "ingester",
+ Name: "duplicate_log_bytes_total",
+ Help: "The total number of bytes that were discarded for duplicate log lines.",
+ }, []string{"tenant"}),
}
}
diff --git a/pkg/ingester/owned_streams.go b/pkg/ingester/owned_streams.go
index 3be6fb40fdd86..3bb729815e718 100644
--- a/pkg/ingester/owned_streams.go
+++ b/pkg/ingester/owned_streams.go
@@ -3,26 +3,37 @@ package ingester
import (
"sync"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/common/model"
"go.uber.org/atomic"
+
+ "github.com/grafana/loki/v3/pkg/util/constants"
)
-type ownedStreamService struct {
- tenantID string
- limiter *Limiter
- fixedLimit *atomic.Int32
+var notOwnedStreamsMetric = promauto.NewGauge(prometheus.GaugeOpts{
+ Namespace: constants.Loki,
+ Name: "ingester_not_owned_streams",
+ Help: "The total number of not owned streams in memory.",
+})
- //todo: implement job to recalculate it
- ownedStreamCount int
- notOwnedStreamCount int
- lock sync.RWMutex
+type ownedStreamService struct {
+ tenantID string
+ limiter *Limiter
+ fixedLimit *atomic.Int32
+ ownedStreamCount int
+ lock sync.RWMutex
+ notOwnedStreams map[model.Fingerprint]any
}
func newOwnedStreamService(tenantID string, limiter *Limiter) *ownedStreamService {
svc := &ownedStreamService{
- tenantID: tenantID,
- limiter: limiter,
- fixedLimit: atomic.NewInt32(0),
+ tenantID: tenantID,
+ limiter: limiter,
+ fixedLimit: atomic.NewInt32(0),
+ notOwnedStreams: make(map[model.Fingerprint]any),
}
+
svc.updateFixedLimit()
return svc
}
@@ -33,27 +44,51 @@ func (s *ownedStreamService) getOwnedStreamCount() int {
return s.ownedStreamCount
}
-func (s *ownedStreamService) updateFixedLimit() {
- limit, _, _, _ := s.limiter.GetStreamCountLimit(s.tenantID)
- s.fixedLimit.Store(int32(limit))
+func (s *ownedStreamService) updateFixedLimit() (old, new int32) {
+ newLimit, _, _, _ := s.limiter.GetStreamCountLimit(s.tenantID)
+ return s.fixedLimit.Swap(int32(newLimit)), int32(newLimit)
+
}
func (s *ownedStreamService) getFixedLimit() int {
return int(s.fixedLimit.Load())
}
-func (s *ownedStreamService) incOwnedStreamCount() {
+func (s *ownedStreamService) trackStreamOwnership(fp model.Fingerprint, owned bool) {
s.lock.Lock()
defer s.lock.Unlock()
- s.ownedStreamCount++
+ if owned {
+ s.ownedStreamCount++
+ return
+ }
+ notOwnedStreamsMetric.Inc()
+ s.notOwnedStreams[fp] = nil
}
-func (s *ownedStreamService) decOwnedStreamCount() {
+func (s *ownedStreamService) trackRemovedStream(fp model.Fingerprint) {
s.lock.Lock()
defer s.lock.Unlock()
- if s.notOwnedStreamCount > 0 {
- s.notOwnedStreamCount--
+
+ if _, notOwned := s.notOwnedStreams[fp]; notOwned {
+ notOwnedStreamsMetric.Dec()
+ delete(s.notOwnedStreams, fp)
return
}
s.ownedStreamCount--
}
+
+func (s *ownedStreamService) resetStreamCounts() {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ s.ownedStreamCount = 0
+ notOwnedStreamsMetric.Sub(float64(len(s.notOwnedStreams)))
+ s.notOwnedStreams = make(map[model.Fingerprint]any)
+}
+
+func (s *ownedStreamService) isStreamNotOwned(fp model.Fingerprint) bool {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ _, notOwned := s.notOwnedStreams[fp]
+ return notOwned
+}
diff --git a/pkg/ingester/owned_streams_test.go b/pkg/ingester/owned_streams_test.go
index 759927a1d0cfe..7f114922fa447 100644
--- a/pkg/ingester/owned_streams_test.go
+++ b/pkg/ingester/owned_streams_test.go
@@ -4,6 +4,7 @@ import (
"sync"
"testing"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/validation"
@@ -28,39 +29,60 @@ func Test_OwnedStreamService(t *testing.T) {
service.updateFixedLimit()
require.Equal(t, 100, service.getFixedLimit())
- service.incOwnedStreamCount()
- service.incOwnedStreamCount()
- service.incOwnedStreamCount()
+ service.trackStreamOwnership(model.Fingerprint(1), true)
+ service.trackStreamOwnership(model.Fingerprint(2), true)
+ service.trackStreamOwnership(model.Fingerprint(3), true)
require.Equal(t, 3, service.getOwnedStreamCount())
+ require.Len(t, service.notOwnedStreams, 0)
- // simulate the effect from the recalculation job
- service.notOwnedStreamCount = 1
- service.ownedStreamCount = 2
+ service.resetStreamCounts()
+ service.trackStreamOwnership(model.Fingerprint(3), true)
+ service.trackStreamOwnership(model.Fingerprint(3), false)
+ require.Equal(t, 1, service.getOwnedStreamCount(),
+ "owned streams count must not be changed because not owned stream can be reported only by recalculate_owned_streams job that resets the counters before checking all the streams")
+ require.Len(t, service.notOwnedStreams, 1)
+ require.True(t, service.isStreamNotOwned(model.Fingerprint(3)))
+
+ service.resetStreamCounts()
+ service.trackStreamOwnership(model.Fingerprint(1), true)
+ service.trackStreamOwnership(model.Fingerprint(2), true)
+ service.trackStreamOwnership(model.Fingerprint(3), false)
- service.decOwnedStreamCount()
- require.Equal(t, 2, service.getOwnedStreamCount(), "owned stream count must be decremented only when notOwnedStreamCount is set to 0")
- require.Equal(t, 0, service.notOwnedStreamCount)
+ service.trackRemovedStream(model.Fingerprint(3))
+ require.Equal(t, 2, service.getOwnedStreamCount(), "owned stream count must be decremented only when notOwnedStream does not contain this fingerprint")
+ require.Len(t, service.notOwnedStreams, 0)
- service.decOwnedStreamCount()
+ service.trackRemovedStream(model.Fingerprint(2))
require.Equal(t, 1, service.getOwnedStreamCount())
- require.Equal(t, 0, service.notOwnedStreamCount, "notOwnedStreamCount must not be decremented lower than 0")
+ require.Len(t, service.notOwnedStreams, 0)
group := sync.WaitGroup{}
- group.Add(200)
+ group.Add(100)
for i := 0; i < 100; i++ {
- go func() {
+ go func(i int) {
defer group.Done()
- service.incOwnedStreamCount()
- }()
+ service.trackStreamOwnership(model.Fingerprint(i+1000), true)
+ }(i)
}
+ group.Wait()
+ group.Add(100)
for i := 0; i < 100; i++ {
- go func() {
+ go func(i int) {
defer group.Done()
- service.decOwnedStreamCount()
- }()
+ service.trackRemovedStream(model.Fingerprint(i + 1000))
+ }(i)
}
group.Wait()
require.Equal(t, 1, service.getOwnedStreamCount(), "owned stream count must not be changed")
+
+ // simulate the effect from the recalculation job
+ service.trackStreamOwnership(model.Fingerprint(44), false)
+ service.trackStreamOwnership(model.Fingerprint(45), true)
+
+ service.resetStreamCounts()
+
+ require.Equal(t, 0, service.getOwnedStreamCount())
+ require.Len(t, service.notOwnedStreams, 0)
}
diff --git a/pkg/ingester/recalculate_owned_streams.go b/pkg/ingester/recalculate_owned_streams.go
new file mode 100644
index 0000000000000..d3bf79d29f743
--- /dev/null
+++ b/pkg/ingester/recalculate_owned_streams.go
@@ -0,0 +1,89 @@
+package ingester
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/grafana/dskit/ring"
+ "github.com/grafana/dskit/services"
+)
+
+type recalculateOwnedStreams struct {
+ services.Service
+
+ logger log.Logger
+
+ instancesSupplier func() []*instance
+ ingesterID string
+ previousRing ring.ReplicationSet
+ ingestersRing ring.ReadRing
+ ticker *time.Ticker
+}
+
+func newRecalculateOwnedStreams(instancesSupplier func() []*instance, ingesterID string, ring ring.ReadRing, ringPollInterval time.Duration, logger log.Logger) *recalculateOwnedStreams {
+ svc := &recalculateOwnedStreams{
+ ingestersRing: ring,
+ instancesSupplier: instancesSupplier,
+ ingesterID: ingesterID,
+ logger: logger,
+ }
+ svc.Service = services.NewTimerService(ringPollInterval, nil, svc.iteration, nil)
+ return svc
+}
+
+func (s *recalculateOwnedStreams) iteration(_ context.Context) error {
+ s.recalculate()
+ return nil
+}
+
+func (s *recalculateOwnedStreams) recalculate() {
+ level.Info(s.logger).Log("msg", "starting recalculate owned streams job")
+ defer func() {
+ s.updateFixedLimitForAll()
+ level.Info(s.logger).Log("msg", "completed recalculate owned streams job")
+ }()
+ ringChanged, err := s.checkRingForChanges()
+ if err != nil {
+ level.Error(s.logger).Log("msg", "failed to check ring for changes", "err", err)
+ return
+ }
+ if !ringChanged {
+ level.Debug(s.logger).Log("msg", "ring is not changed, skipping the job")
+ return
+ }
+ level.Info(s.logger).Log("msg", "detected ring changes, re-evaluating streams ownership")
+
+ for _, instance := range s.instancesSupplier() {
+ if !instance.limiter.limits.UseOwnedStreamCount(instance.instanceID) {
+ continue
+ }
+
+ level.Info(s.logger).Log("msg", "updating streams ownership", "tenant", instance.instanceID)
+ err := instance.updateOwnedStreams(s.ingestersRing, s.ingesterID)
+ if err != nil {
+ level.Error(s.logger).Log("msg", "failed to re-evaluate streams ownership", "tenant", instance.instanceID, "err", err)
+ }
+ }
+}
+
+func (s *recalculateOwnedStreams) updateFixedLimitForAll() {
+ for _, instance := range s.instancesSupplier() {
+ oldLimit, newLimit := instance.ownedStreamsSvc.updateFixedLimit()
+ if oldLimit != newLimit {
+ level.Info(s.logger).Log("msg", "fixed limit has been updated", "tenant", instance.instanceID, "old", oldLimit, "new", newLimit)
+ }
+ }
+}
+
+func (s *recalculateOwnedStreams) checkRingForChanges() (bool, error) {
+ rs, err := s.ingestersRing.GetAllHealthy(ring.WriteNoExtend)
+ if err != nil {
+ return false, err
+ }
+
+ ringChanged := ring.HasReplicationSetChangedWithoutStateOrAddr(s.previousRing, rs)
+ s.previousRing = rs
+ return ringChanged, nil
+}
diff --git a/pkg/ingester/recalculate_owned_streams_test.go b/pkg/ingester/recalculate_owned_streams_test.go
new file mode 100644
index 0000000000000..91b32baef820d
--- /dev/null
+++ b/pkg/ingester/recalculate_owned_streams_test.go
@@ -0,0 +1,197 @@
+package ingester
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/grafana/dskit/ring"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/runtime"
+ lokiring "github.com/grafana/loki/v3/pkg/util/ring"
+ "github.com/grafana/loki/v3/pkg/validation"
+)
+
+func Test_recalculateOwnedStreams_newRecalculateOwnedStreams(t *testing.T) {
+ mockInstancesSupplier := &mockTenantsSuplier{tenants: []*instance{}}
+ mockRing := newReadRingMock([]ring.InstanceDesc{
+ {Addr: "test", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{1, 2, 3}},
+ }, 0)
+ service := newRecalculateOwnedStreams(mockInstancesSupplier.get, "test", mockRing, 50*time.Millisecond, log.NewNopLogger())
+ require.Equal(t, 0, mockRing.getAllHealthyCallsCount, "ring must be called only after service's start up")
+ ctx := context.Background()
+ require.NoError(t, service.StartAsync(ctx))
+ require.NoError(t, service.AwaitRunning(ctx))
+ require.Eventually(t, func() bool {
+ return mockRing.getAllHealthyCallsCount >= 2
+ }, 1*time.Second, 50*time.Millisecond, "expected at least two runs of the iteration")
+}
+
+func Test_recalculateOwnedStreams_recalculate(t *testing.T) {
+ tests := map[string]struct {
+ featureEnabled bool
+ expectedOwnedStreamCount int
+ expectedNotOwnedStreamCount int
+ }{
+ "expected streams ownership to be recalculated": {
+ featureEnabled: true,
+ expectedOwnedStreamCount: 4,
+ expectedNotOwnedStreamCount: 3,
+ },
+ "expected streams ownership recalculation to be skipped": {
+ featureEnabled: false,
+ expectedOwnedStreamCount: 7,
+ },
+ }
+ for testName, testData := range tests {
+ t.Run(testName, func(t *testing.T) {
+ currentIngesterName := "ingester-0"
+ tenantName := "tenant-a"
+
+ mockRing := &mockStreamsOwnershipRing{
+ currentIngesterName: currentIngesterName,
+ tenantName: tenantName,
+ readRingMock: readRingMock{
+ replicationSet: ring.ReplicationSet{
+ Instances: []ring.InstanceDesc{{Addr: currentIngesterName, Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{100, 200, 300}}},
+ },
+ },
+ }
+
+ limits, err := validation.NewOverrides(validation.Limits{
+ MaxGlobalStreamsPerUser: 100,
+ UseOwnedStreamCount: testData.featureEnabled,
+ }, nil)
+ require.NoError(t, err)
+ limiter := NewLimiter(limits, NilMetrics, mockRing, 1)
+
+ tenant, err := newInstance(
+ defaultConfig(),
+ defaultPeriodConfigs,
+ tenantName,
+ limiter,
+ runtime.DefaultTenantConfigs(),
+ noopWAL{},
+ NilMetrics,
+ nil,
+ nil,
+ nil,
+ nil,
+ NewStreamRateCalculator(),
+ nil,
+ nil,
+ )
+ require.NoError(t, err)
+ require.Equal(t, 100, tenant.ownedStreamsSvc.getFixedLimit(), "MaxGlobalStreamsPerUser is 100 at this moment")
+ // not owned streams
+ mockRing.addMapping(createStream(t, tenant, 49), false)
+ mockRing.addMapping(createStream(t, tenant, 101), false)
+ mockRing.addMapping(createStream(t, tenant, 301), false)
+
+ // owned streams
+ mockRing.addMapping(createStream(t, tenant, 50), true)
+ mockRing.addMapping(createStream(t, tenant, 60), true)
+ mockRing.addMapping(createStream(t, tenant, 100), true)
+ mockRing.addMapping(createStream(t, tenant, 250), true)
+
+ require.Equal(t, 7, tenant.ownedStreamsSvc.ownedStreamCount)
+ require.Len(t, tenant.ownedStreamsSvc.notOwnedStreams, 0)
+
+ mockTenantsSupplier := &mockTenantsSuplier{tenants: []*instance{tenant}}
+
+ service := newRecalculateOwnedStreams(mockTenantsSupplier.get, currentIngesterName, mockRing, 50*time.Millisecond, log.NewNopLogger())
+ //change the limit to assert that fixed limit is updated after the recalculation
+ limits.DefaultLimits().MaxGlobalStreamsPerUser = 50
+
+ service.recalculate()
+
+ if testData.featureEnabled {
+ require.Equal(t, 50, tenant.ownedStreamsSvc.getFixedLimit(), "fixed limit must be updated after recalculation")
+ }
+ require.Equal(t, testData.expectedOwnedStreamCount, tenant.ownedStreamsSvc.ownedStreamCount)
+ require.Len(t, tenant.ownedStreamsSvc.notOwnedStreams, testData.expectedNotOwnedStreamCount)
+ })
+ }
+
+}
+
+type mockStreamsOwnershipRing struct {
+ readRingMock
+ currentIngesterName string
+ tenantName string
+ streamMapping map[uint32]ring.ReplicationSet
+}
+
+func (r *mockStreamsOwnershipRing) addMapping(stream *stream, owned bool) {
+ instanceDescs := make([]ring.InstanceDesc, 0, 3)
+ instanceDescs = append(instanceDescs, ring.InstanceDesc{Id: "ingester-444"})
+ instanceDescs = append(instanceDescs, ring.InstanceDesc{Id: "ingester-555"})
+ if owned {
+ instanceDescs = append(instanceDescs, ring.InstanceDesc{Id: r.currentIngesterName})
+ } else {
+ instanceDescs = append(instanceDescs, ring.InstanceDesc{Id: "ingester-333"})
+ }
+ if r.streamMapping == nil {
+ r.streamMapping = make(map[uint32]ring.ReplicationSet)
+ }
+ r.streamMapping[lokiring.TokenFor(r.tenantName, stream.labelsString)] = ring.ReplicationSet{
+ Instances: instanceDescs,
+ }
+}
+
+func (r *mockStreamsOwnershipRing) Get(streamToken uint32, _ ring.Operation, _ []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
+ set, found := r.streamMapping[streamToken]
+ if !found {
+ return ring.ReplicationSet{}, fmt.Errorf("replication set mapping is not found for stream hash: %v", streamToken)
+ }
+ return set, nil
+}
+
+func Test_recalculateOwnedStreams_checkRingForChanges(t *testing.T) {
+ mockRing := &readRingMock{
+ replicationSet: ring.ReplicationSet{
+ Instances: []ring.InstanceDesc{{Addr: "ingester-0", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{100, 200, 300}}},
+ },
+ }
+ mockTenantsSupplier := &mockTenantsSuplier{tenants: []*instance{{}}}
+ service := newRecalculateOwnedStreams(mockTenantsSupplier.get, "ingester-0", mockRing, 50*time.Millisecond, log.NewNopLogger())
+
+ ringChanged, err := service.checkRingForChanges()
+ require.NoError(t, err)
+ require.True(t, ringChanged, "expected ring to be changed because it was not initialized yet")
+
+ ringChanged, err = service.checkRingForChanges()
+ require.NoError(t, err)
+ require.False(t, ringChanged, "expected ring not to be changed because token ranges is not changed")
+
+ anotherIngester := ring.InstanceDesc{Addr: "ingester-1", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{150, 250, 350}}
+ mockRing.replicationSet.Instances = append(mockRing.replicationSet.Instances, anotherIngester)
+
+ ringChanged, err = service.checkRingForChanges()
+ require.NoError(t, err)
+ require.True(t, ringChanged)
+}
+
+func createStream(t *testing.T, inst *instance, fingerprint int) *stream {
+ lbls := labels.Labels{labels.Label{Name: "mock", Value: strconv.Itoa(fingerprint)}}
+
+ stream, _, err := inst.streams.LoadOrStoreNew(lbls.String(), func() (*stream, error) {
+ return inst.createStreamByFP(lbls, model.Fingerprint(fingerprint))
+ }, nil)
+ require.NoError(t, err)
+ return stream
+}
+
+type mockTenantsSuplier struct {
+ tenants []*instance
+}
+
+func (m *mockTenantsSuplier) get() []*instance {
+ return m.tenants
+}
diff --git a/pkg/ingester/recovery_test.go b/pkg/ingester/recovery_test.go
index 9176ff3c6ad2f..4c5a4ce815d8d 100644
--- a/pkg/ingester/recovery_test.go
+++ b/pkg/ingester/recovery_test.go
@@ -228,7 +228,9 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ readRingMock := mockReadRingWithOneActiveIngester()
+
+ i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
mkSample := func(i int) *logproto.PushRequest {
@@ -262,7 +264,7 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) {
require.Equal(t, false, iter.Next())
// create a new ingester now
- i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
+ i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil, readRingMock)
require.NoError(t, err)
// recover the checkpointed series
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index 0aa3c41ea619b..7d37859b1541f 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -8,6 +8,8 @@ import (
"sync"
"time"
+ "github.com/grafana/loki/v3/pkg/runtime"
+
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
@@ -78,6 +80,8 @@ type stream struct {
chunkFormat byte
chunkHeadBlockFormat chunkenc.HeadBlockFmt
+
+ configs *runtime.TenantConfigs
}
type chunkDesc struct {
@@ -107,6 +111,7 @@ func newStream(
streamRateCalculator *StreamRateCalculator,
metrics *ingesterMetrics,
writeFailures *writefailures.Manager,
+ configs *runtime.TenantConfigs,
) *stream {
hashNoShard, _ := labels.HashWithoutLabels(make([]byte, 0, 1024), ShardLbName)
return &stream{
@@ -126,6 +131,8 @@ func newStream(
writeFailures: writeFailures,
chunkFormat: chunkFormat,
chunkHeadBlockFormat: headBlockFmt,
+
+ configs: configs,
}
}
@@ -333,7 +340,8 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usa
}
chunk.lastUpdated = time.Now()
- if err := chunk.chunk.Append(&entries[i]); err != nil {
+ dup, err := chunk.chunk.Append(&entries[i])
+ if err != nil {
invalid = append(invalid, entryWithError{&entries[i], err})
if chunkenc.IsOutOfOrderErr(err) {
s.writeFailures.Log(s.tenant, err)
@@ -342,6 +350,9 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usa
}
continue
}
+ if dup {
+ s.handleLoggingOfDuplicateEntry(entries[i])
+ }
s.entryCt++
s.lastLine.ts = entries[i].Timestamp
@@ -357,6 +368,21 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usa
return bytesAdded, storedEntries, invalid
}
+func (s *stream) handleLoggingOfDuplicateEntry(entry logproto.Entry) {
+ if s.configs == nil {
+ return
+ }
+ if s.configs.LogDuplicateMetrics(s.tenant) {
+ s.metrics.duplicateLogBytesTotal.WithLabelValues(s.tenant).Add(float64(len(entry.Line)))
+ }
+ if s.configs.LogDuplicateStreamInfo(s.tenant) {
+ errMsg := fmt.Sprintf("duplicate log entry with size=%d at timestamp %s for stream %s", len(entry.Line), entry.Timestamp.Format(time.RFC3339), s.labelsString)
+ dupErr := errors.New(errMsg)
+ s.writeFailures.Log(s.tenant, dupErr)
+ }
+
+}
+
func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry, isReplay, rateLimitWholeStream bool, usageTracker push.UsageTracker) ([]logproto.Entry, []entryWithError) {
var (
diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go
index e4dd4a37ab355..68974ae016b39 100644
--- a/pkg/ingester/stream_test.go
+++ b/pkg/ingester/stream_test.go
@@ -9,12 +9,20 @@ import (
"testing"
"time"
+ "github.com/prometheus/client_golang/prometheus/testutil"
+
+ gokitlog "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+
+ "github.com/grafana/loki/v3/pkg/runtime"
+
"github.com/grafana/dskit/httpgrpc"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -69,6 +77,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
_, err := s.Push(context.Background(), []logproto.Entry{
@@ -122,6 +131,7 @@ func TestPushDeduplication(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
written, err := s.Push(context.Background(), []logproto.Entry{
@@ -136,6 +146,76 @@ func TestPushDeduplication(t *testing.T) {
require.Equal(t, len("test"+"newer, better test"), written)
}
+func TestPushDeduplicationExtraMetrics(t *testing.T) {
+ limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
+ require.NoError(t, err)
+ limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
+
+ chunkfmt, headfmt := defaultChunkFormat(t)
+
+ buf := bytes.NewBuffer(nil)
+ logger := gokitlog.NewLogfmtLogger(buf)
+
+ provider := &providerMock{
+ tenantConfig: func(tenantID string) *runtime.Config {
+ if tenantID == "fake" {
+ return &runtime.Config{
+ LogDuplicateMetrics: true,
+ LogDuplicateStreamInfo: true,
+ }
+ }
+
+ return &runtime.Config{}
+ },
+ }
+
+ runtimeCfg, err := runtime.NewTenantConfigs(provider)
+
+ registry := prometheus.NewRegistry()
+ manager := writefailures.NewManager(logger, registry, writefailures.Cfg{LogRate: flagext.ByteSize(1000), AddInsightsLabel: true}, runtimeCfg, "ingester")
+
+ require.NoError(t, err)
+ metrics := newIngesterMetrics(registry, "loki")
+
+ s := newStream(
+ chunkfmt,
+ headfmt,
+ defaultConfig(),
+ limiter,
+ "fake",
+ model.Fingerprint(0),
+ labels.Labels{
+ {Name: "foo", Value: "bar"},
+ },
+ true,
+ NewStreamRateCalculator(),
+ metrics,
+ manager,
+ runtimeCfg,
+ )
+
+ _, err = s.Push(context.Background(), []logproto.Entry{
+ {Timestamp: time.Unix(1, 0), Line: "test"},
+ }, recordPool.GetRecord(), 0, true, false, nil)
+ require.NoError(t, err)
+ _, err = s.Push(context.Background(), []logproto.Entry{
+ {Timestamp: time.Unix(1, 0), Line: "not a test"},
+ }, recordPool.GetRecord(), 0, true, false, nil)
+ require.NoError(t, err)
+ _, err = s.Push(context.Background(), []logproto.Entry{
+ {Timestamp: time.Unix(1, 0), Line: "test"},
+ }, recordPool.GetRecord(), 0, true, false, nil)
+ require.NoError(t, err)
+ require.Len(t, s.chunks, 1)
+ require.Equal(t, 2, s.chunks[0].chunk.Size(), "expected exact duplicate to be dropped and newer content with same timestamp to be appended")
+ require.Equal(t, float64(4), testutil.ToFloat64(metrics.duplicateLogBytesTotal.WithLabelValues("fake")))
+
+ content := buf.String()
+ require.NotEmpty(t, content)
+ require.Contains(t, content, "insight")
+ require.Contains(t, content, "duplicate")
+}
+
func TestPushRejectOldCounter(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
@@ -157,6 +237,7 @@ func TestPushRejectOldCounter(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
// counter should be 2 now since the first line will be deduped
@@ -204,10 +285,11 @@ func TestStreamIterator(t *testing.T) {
chunk := chk.new()
for j := int64(0); j < entries; j++ {
k := i*entries + j
- err := chunk.Append(&logproto.Entry{
+ dup, err := chunk.Append(&logproto.Entry{
Timestamp: time.Unix(k, 0),
Line: fmt.Sprintf("line %d", k),
})
+ require.False(t, dup)
require.NoError(t, err)
}
s.chunks = append(s.chunks, chunkDesc{chunk: chunk})
@@ -263,6 +345,7 @@ func TestEntryErrorCorrectlyReported(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
s.highestTs = time.Now()
@@ -301,6 +384,7 @@ func TestUnorderedPush(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
for _, x := range []struct {
@@ -403,6 +487,7 @@ func TestPushRateLimit(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
entries := []logproto.Entry{
@@ -443,6 +528,7 @@ func TestPushRateLimitAllOrNothing(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
entries := []logproto.Entry{
@@ -482,6 +568,7 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
)
base := time.Now()
@@ -532,7 +619,7 @@ func Benchmark_PushStream(b *testing.B) {
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
chunkfmt, headfmt := defaultChunkFormat(b)
- s := newStream(chunkfmt, headfmt, &Config{MaxChunkAge: 24 * time.Hour}, limiter, "fake", model.Fingerprint(0), ls, true, NewStreamRateCalculator(), NilMetrics, nil)
+ s := newStream(chunkfmt, headfmt, &Config{MaxChunkAge: 24 * time.Hour}, limiter, "fake", model.Fingerprint(0), ls, true, NewStreamRateCalculator(), NilMetrics, nil, nil)
expr, err := syntax.ParseLogSelector(`{namespace="loki-dev"}`, true)
require.NoError(b, err)
t, err := newTailer("foo", expr, &fakeTailServer{}, 10)
@@ -566,3 +653,11 @@ func defaultChunkFormat(t testing.TB) (byte, chunkenc.HeadBlockFmt) {
return chunkfmt, headfmt
}
+
+type providerMock struct {
+ tenantConfig func(string) *runtime.Config
+}
+
+func (m *providerMock) TenantConfig(userID string) *runtime.Config {
+ return m.tenantConfig(userID)
+}
diff --git a/pkg/ingester/streams_map_test.go b/pkg/ingester/streams_map_test.go
index d98369ff152a9..b14b3e07e497f 100644
--- a/pkg/ingester/streams_map_test.go
+++ b/pkg/ingester/streams_map_test.go
@@ -31,6 +31,7 @@ func TestStreamsMap(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
),
newStream(
chunkfmt,
@@ -46,6 +47,7 @@ func TestStreamsMap(t *testing.T) {
NewStreamRateCalculator(),
NilMetrics,
nil,
+ nil,
),
}
var s *stream
diff --git a/pkg/iter/sample_iterator.go b/pkg/iter/sample_iterator.go
index 12947bbdf893b..be55678ce6285 100644
--- a/pkg/iter/sample_iterator.go
+++ b/pkg/iter/sample_iterator.go
@@ -119,25 +119,31 @@ func (it *peekingSampleIterator) Error() error {
return it.iter.Error()
}
-type sampleIteratorHeap struct {
+type SampleIteratorHeap struct {
its []SampleIterator
}
-func (h sampleIteratorHeap) Len() int { return len(h.its) }
-func (h sampleIteratorHeap) Swap(i, j int) { h.its[i], h.its[j] = h.its[j], h.its[i] }
-func (h sampleIteratorHeap) Peek() SampleIterator { return h.its[0] }
-func (h *sampleIteratorHeap) Push(x interface{}) {
+func NewSampleIteratorHeap(its []SampleIterator) SampleIteratorHeap {
+ return SampleIteratorHeap{
+ its: its,
+ }
+}
+
+func (h SampleIteratorHeap) Len() int { return len(h.its) }
+func (h SampleIteratorHeap) Swap(i, j int) { h.its[i], h.its[j] = h.its[j], h.its[i] }
+func (h SampleIteratorHeap) Peek() SampleIterator { return h.its[0] }
+func (h *SampleIteratorHeap) Push(x interface{}) {
h.its = append(h.its, x.(SampleIterator))
}
-func (h *sampleIteratorHeap) Pop() interface{} {
+func (h *SampleIteratorHeap) Pop() interface{} {
n := len(h.its)
x := h.its[n-1]
h.its = h.its[0 : n-1]
return x
}
-func (h sampleIteratorHeap) Less(i, j int) bool {
+func (h SampleIteratorHeap) Less(i, j int) bool {
s1, s2 := h.its[i].Sample(), h.its[j].Sample()
if s1.Timestamp == s2.Timestamp {
if h.its[i].StreamHash() == 0 {
@@ -150,7 +156,7 @@ func (h sampleIteratorHeap) Less(i, j int) bool {
// mergeSampleIterator iterates over a heap of iterators by merging samples.
type mergeSampleIterator struct {
- heap *sampleIteratorHeap
+ heap *SampleIteratorHeap
is []SampleIterator
prefetched bool
stats *stats.Context
@@ -170,7 +176,7 @@ type mergeSampleIterator struct {
// This means using this iterator with a single iterator will result in the same result as the input iterator.
// If you don't need to deduplicate sample, use `NewSortSampleIterator` instead.
func NewMergeSampleIterator(ctx context.Context, is []SampleIterator) SampleIterator {
- h := sampleIteratorHeap{
+ h := SampleIteratorHeap{
its: make([]SampleIterator, 0, len(is)),
}
return &mergeSampleIterator{
@@ -350,7 +356,7 @@ func (i *mergeSampleIterator) Close() error {
// sortSampleIterator iterates over a heap of iterators by sorting samples.
type sortSampleIterator struct {
- heap *sampleIteratorHeap
+ heap *SampleIteratorHeap
is []SampleIterator
prefetched bool
@@ -369,7 +375,7 @@ func NewSortSampleIterator(is []SampleIterator) SampleIterator {
if len(is) == 1 {
return is[0]
}
- h := sampleIteratorHeap{
+ h := SampleIteratorHeap{
its: make([]SampleIterator, 0, len(is)),
}
return &sortSampleIterator{
@@ -378,7 +384,7 @@ func NewSortSampleIterator(is []SampleIterator) SampleIterator {
}
}
-// init initialize the underlaying heap
+// init initialize the underlying heap
func (i *sortSampleIterator) init() {
if i.prefetched {
return
diff --git a/pkg/logcli/output/loki.go b/pkg/logcli/output/loki.go
new file mode 100644
index 0000000000000..ad89311bbcb34
--- /dev/null
+++ b/pkg/logcli/output/loki.go
@@ -0,0 +1 @@
+package output
diff --git a/pkg/loghttp/samples.go b/pkg/loghttp/samples.go
new file mode 100644
index 0000000000000..9266dc2285014
--- /dev/null
+++ b/pkg/loghttp/samples.go
@@ -0,0 +1,35 @@
+package loghttp
+
+import (
+ "net/http"
+
+ "github.com/grafana/loki/v3/pkg/logproto"
+)
+
+func ParseSamplesQuery(r *http.Request) (*logproto.QuerySamplesRequest, error) {
+ req := &logproto.QuerySamplesRequest{}
+
+ req.Query = query(r)
+ start, end, err := bounds(r)
+ if err != nil {
+ return nil, err
+ }
+ req.Start = start
+ req.End = end
+
+ calculatedStep, err := step(r, start, end)
+ if err != nil {
+ return nil, err
+ }
+ if calculatedStep <= 0 {
+ return nil, errZeroOrNegativeStep
+ }
+ // For safety, limit the number of returned points per timeseries.
+ // This is sufficient for 60s resolution for a week or 1h resolution for a year.
+ if (req.End.Sub(req.Start) / calculatedStep) > 11000 {
+ return nil, errStepTooSmall
+ }
+ req.Step = calculatedStep.Milliseconds()
+
+ return req, nil
+}
diff --git a/pkg/loghttp/samples_test.go b/pkg/loghttp/samples_test.go
new file mode 100644
index 0000000000000..971b009045b1a
--- /dev/null
+++ b/pkg/loghttp/samples_test.go
@@ -0,0 +1,122 @@
+package loghttp
+
+import (
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/logproto"
+)
+
+func TestParseSamplesQuery(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ path string
+ want *logproto.QuerySamplesRequest
+ wantErr bool
+ }{
+ {
+ name: "should correctly parse valid params",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=3600000000000&step=5s",
+ want: &logproto.QuerySamplesRequest{
+ Query: "{}",
+ Start: time.Unix(100, 0),
+ End: time.Unix(3600, 0),
+ Step: (5 * time.Second).Milliseconds(),
+ },
+ },
+ {
+ name: "should default empty step param to sensible step for the range",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=3600000000000",
+ want: &logproto.QuerySamplesRequest{
+ Query: "{}",
+ Start: time.Unix(100, 0),
+ End: time.Unix(3600, 0),
+ Step: (14 * time.Second).Milliseconds(),
+ },
+ },
+ {
+ name: "should default start to zero for empty start param",
+ path: "/loki/api/v1/patterns?query={}&end=3600000000000",
+ want: &logproto.QuerySamplesRequest{
+ Query: "{}",
+ Start: time.Unix(0, 0),
+ End: time.Unix(3600, 0),
+ Step: (14 * time.Second).Milliseconds(),
+ },
+ },
+ {
+ name: "should accept step with no units as seconds",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=3600000000000&step=10",
+ want: &logproto.QuerySamplesRequest{
+ Query: "{}",
+ Start: time.Unix(100, 0),
+ End: time.Unix(3600, 0),
+ Step: (10 * time.Second).Milliseconds(),
+ },
+ },
+ {
+ name: "should accept step as string duration in seconds",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=3600000000000&step=15s",
+ want: &logproto.QuerySamplesRequest{
+ Query: "{}",
+ Start: time.Unix(100, 0),
+ End: time.Unix(3600, 0),
+ Step: (15 * time.Second).Milliseconds(),
+ },
+ },
+ {
+ name: "should correctly parse long duration for step",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=3600000000000&step=10h",
+ want: &logproto.QuerySamplesRequest{
+ Query: "{}",
+ Start: time.Unix(100, 0),
+ End: time.Unix(3600, 0),
+ Step: (10 * time.Hour).Milliseconds(),
+ },
+ },
+ {
+ name: "should reject negative step value",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=3600000000000&step=-5s",
+ want: nil,
+ wantErr: true,
+ },
+ {
+ name: "should reject very small step for big range",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=3600000000000&step=50ms",
+ want: nil,
+ wantErr: true,
+ },
+ {
+ name: "should accept very small step for small range",
+ path: "/loki/api/v1/patterns?query={}&start=100000000000&end=110000000000&step=50ms",
+ want: &logproto.QuerySamplesRequest{
+ Query: "{}",
+ Start: time.Unix(100, 0),
+ End: time.Unix(110, 0),
+ Step: (50 * time.Millisecond).Milliseconds(),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ req, err := http.NewRequest(http.MethodGet, tt.path, nil)
+ require.NoError(t, err)
+ err = req.ParseForm()
+ require.NoError(t, err)
+
+ got, err := ParseSamplesQuery(req)
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ assert.Equalf(t, tt.want, got, "Incorrect response from input path: %s", tt.path)
+ })
+ }
+}
diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go
index a11467584b58f..fd2923b7a06c7 100644
--- a/pkg/logproto/compat.go
+++ b/pkg/logproto/compat.go
@@ -82,6 +82,10 @@ func FromMetricsToLabelAdapters(metric model.Metric) []LabelAdapter {
return result
}
+func FromMetricsToLabels(metric model.Metric) labels.Labels {
+ return FromLabelAdaptersToLabels(FromMetricsToLabelAdapters(metric))
+}
+
type byLabel []LabelAdapter
func (s byLabel) Len() int { return len(s) }
@@ -506,6 +510,33 @@ func (m *ShardsRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(fields...)
}
+func (m *DetectedFieldsRequest) GetCachingOptions() (res definitions.CachingOptions) { return }
+
+func (m *DetectedFieldsRequest) WithStartEnd(start, end time.Time) definitions.Request {
+ clone := *m
+ clone.Start = start
+ clone.End = end
+ return &clone
+}
+
+func (m *DetectedFieldsRequest) WithQuery(query string) definitions.Request {
+ clone := *m
+ clone.Query = query
+ return &clone
+}
+
+func (m *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) {
+ fields := []otlog.Field{
+ otlog.String("query", m.GetQuery()),
+ otlog.String("start", m.Start.String()),
+ otlog.String("end", m.End.String()),
+ otlog.String("step", time.Duration(m.Step).String()),
+ otlog.String("field_limit", fmt.Sprintf("%d", m.FieldLimit)),
+ otlog.String("line_limit", fmt.Sprintf("%d", m.LineLimit)),
+ }
+ sp.LogFields(fields...)
+}
+
func (m *QueryPatternsRequest) GetCachingOptions() (res definitions.CachingOptions) { return }
func (m *QueryPatternsRequest) WithStartEnd(start, end time.Time) definitions.Request {
@@ -534,3 +565,62 @@ func (m *QueryPatternsRequest) LogToSpan(sp opentracing.Span) {
}
sp.LogFields(fields...)
}
+
+func (m *DetectedLabelsRequest) GetStep() int64 { return 0 }
+
+func (m *DetectedLabelsRequest) GetCachingOptions() (res definitions.CachingOptions) { return }
+
+func (m *DetectedLabelsRequest) WithStartEnd(start, end time.Time) definitions.Request {
+ clone := *m
+ clone.Start = start
+ clone.End = end
+ return &clone
+}
+
+func (m *DetectedLabelsRequest) WithQuery(query string) definitions.Request {
+ clone := *m
+ clone.Query = query
+ return &clone
+}
+
+func (m *DetectedLabelsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request {
+ return m.WithStartEnd(start, end).(resultscache.Request)
+}
+
+func (m *DetectedLabelsRequest) LogToSpan(sp opentracing.Span) {
+ fields := []otlog.Field{
+ otlog.String("query", m.GetQuery()),
+ otlog.String("start", m.Start.String()),
+ otlog.String("end", m.End.String()),
+ }
+ sp.LogFields(fields...)
+}
+
+func (m *QuerySamplesRequest) GetCachingOptions() (res definitions.CachingOptions) { return }
+
+func (m *QuerySamplesRequest) WithStartEnd(start, end time.Time) definitions.Request {
+ clone := *m
+ clone.Start = start
+ clone.End = end
+ return &clone
+}
+
+func (m *QuerySamplesRequest) WithStartEndForCache(start, end time.Time) resultscache.Request {
+ return m.WithStartEnd(start, end).(resultscache.Request)
+}
+
+func (m *QuerySamplesRequest) WithQuery(query string) definitions.Request {
+ clone := *m
+ clone.Query = query
+ return &clone
+}
+
+func (m *QuerySamplesRequest) LogToSpan(sp opentracing.Span) {
+ fields := []otlog.Field{
+ otlog.String("query", m.GetQuery()),
+ otlog.String("start", m.Start.String()),
+ otlog.String("end", m.End.String()),
+ otlog.String("step", time.Duration(m.Step).String()),
+ }
+ sp.LogFields(fields...)
+}
diff --git a/pkg/logproto/extensions.go b/pkg/logproto/extensions.go
index 3de5c0fd75801..284ac792ebb95 100644
--- a/pkg/logproto/extensions.go
+++ b/pkg/logproto/extensions.go
@@ -1,10 +1,12 @@
package logproto
import (
+ "encoding/json"
"sort"
"strings"
"sync/atomic" //lint:ignore faillint we can't use go.uber.org/atomic with a protobuf struct without wrapping it.
+ "github.com/buger/jsonparser"
"github.com/cespare/xxhash/v2"
"github.com/dustin/go-humanize"
jsoniter "github.com/json-iterator/go"
@@ -188,3 +190,47 @@ func (m *ShardsResponse) Merge(other *ShardsResponse) {
m.ChunkGroups = append(m.ChunkGroups, other.ChunkGroups...)
m.Statistics.Merge(other.Statistics)
}
+
+func NewPatternSeries(pattern string, samples []*PatternSample) *PatternSeries {
+ return &PatternSeries{Pattern: pattern, Samples: samples}
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+// QuerySamplesResponse json representation is different from the proto
+func (r *QuerySamplesResponse) UnmarshalJSON(data []byte) error {
+ return jsonparser.ObjectEach(
+ data,
+ func(key, value []byte, dataType jsonparser.ValueType, offset int) error {
+ if string(key) == "data" {
+ var m []model.SampleStream
+ if err := json.Unmarshal(value, &m); err != nil {
+ return err
+ }
+ series := make([]Series, len(m))
+
+ for i, s := range m {
+ lbls := FromMetricsToLabels(s.Metric)
+
+ newSeries := Series{
+ Labels: s.Metric.String(),
+ StreamHash: lbls.Hash(),
+ Samples: make([]Sample, len(s.Values)),
+ }
+
+ for j, samplePair := range s.Values {
+ newSeries.Samples[j] = Sample{
+ Timestamp: samplePair.Timestamp.UnixNano(),
+ Value: float64(samplePair.Value),
+ }
+ }
+
+ series[i] = newSeries
+ }
+
+ r.Series = series
+ }
+
+ return nil
+ },
+ )
+}
diff --git a/pkg/logproto/extensions_test.go b/pkg/logproto/extensions_test.go
index d1c96c76bbed3..35823d3ca5542 100644
--- a/pkg/logproto/extensions_test.go
+++ b/pkg/logproto/extensions_test.go
@@ -3,7 +3,10 @@ package logproto
import (
"testing"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/logql/syntax"
)
func TestShard_SpaceFor(t *testing.T) {
@@ -40,3 +43,91 @@ func TestShard_SpaceFor(t *testing.T) {
})
}
}
+
+func TestQueryPatternsResponse_UnmarshalJSON(t *testing.T) {
+ mockData := []byte(`{
+ "status": "success",
+ "data": [
+ {
+ "pattern": "foo <*> bar",
+ "samples": [[1609459200, 10], [1609545600, 15]]
+ },
+ {
+ "pattern": "foo <*> buzz",
+ "samples": [[1609459200, 20], [1609545600, 25]]
+ }
+ ]
+ }`)
+
+ expectedSeries := []*PatternSeries{
+ NewPatternSeries("foo <*> bar", []*PatternSample{
+ {Timestamp: model.TimeFromUnix(1609459200), Value: 10},
+ {Timestamp: model.TimeFromUnix(1609545600), Value: 15},
+ }),
+ NewPatternSeries("foo <*> buzz", []*PatternSample{
+ {Timestamp: model.TimeFromUnix(1609459200), Value: 20},
+ {Timestamp: model.TimeFromUnix(1609545600), Value: 25},
+ }),
+ }
+
+ r := &QueryPatternsResponse{}
+ err := r.UnmarshalJSON(mockData)
+
+ require.Nil(t, err)
+ require.Equal(t, expectedSeries, r.Series)
+}
+
+func TestQuerySamplesResponse_UnmarshalJSON(t *testing.T) {
+ mockData := []byte(`{
+ "status": "success",
+ "data": [{
+ "metric": {
+ "foo": "bar"
+ },
+ "values": [
+ [0.001, "1"],
+ [0.002, "2"]
+ ]
+ },
+ {
+ "metric": {
+ "foo": "baz",
+ "bar": "qux"
+ },
+ "values": [
+ [0.003, "3"],
+ [0.004, "4"]
+ ]
+ }]
+ }`)
+
+ lbls1, err := syntax.ParseLabels(`{foo="bar"}`)
+ require.NoError(t, err)
+ lbls2, err := syntax.ParseLabels(`{bar="qux", foo="baz"}`)
+ require.NoError(t, err)
+
+ expectedSamples := []Series{
+ {
+ Labels: lbls1.String(),
+ Samples: []Sample{
+ {Timestamp: 1e6, Value: 1}, // 1ms after epoch in ns
+ {Timestamp: 2e6, Value: 2}, // 2ms after epoch in ns
+ },
+ StreamHash: lbls1.Hash(),
+ },
+ {
+ Labels: lbls2.String(),
+ Samples: []Sample{
+ {Timestamp: 3e6, Value: 3}, // 3ms after epoch in ns
+ {Timestamp: 4e6, Value: 4}, // 4ms after epoch in ns
+ },
+ StreamHash: lbls2.Hash(),
+ },
+ }
+
+ r := &QuerySamplesResponse{}
+ err = r.UnmarshalJSON(mockData)
+
+ require.Nil(t, err)
+ require.Equal(t, expectedSamples, r.Series)
+}
diff --git a/pkg/logproto/pattern.pb.go b/pkg/logproto/pattern.pb.go
index a666a32850127..eb3cfce9e03e8 100644
--- a/pkg/logproto/pattern.pb.go
+++ b/pkg/logproto/pattern.pb.go
@@ -11,6 +11,7 @@ import (
_ "github.com/gogo/protobuf/types"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
push "github.com/grafana/loki/pkg/push"
+ stats "github.com/grafana/loki/v3/pkg/logqlmodel/stats"
github_com_prometheus_common_model "github.com/prometheus/common/model"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
@@ -240,48 +241,176 @@ func (m *PatternSample) GetValue() int64 {
return 0
}
+type QuerySamplesRequest struct {
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"`
+ End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"`
+ Step int64 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"`
+}
+
+func (m *QuerySamplesRequest) Reset() { *m = QuerySamplesRequest{} }
+func (*QuerySamplesRequest) ProtoMessage() {}
+func (*QuerySamplesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_aaf4192acc66a4ea, []int{4}
+}
+func (m *QuerySamplesRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QuerySamplesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QuerySamplesRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QuerySamplesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuerySamplesRequest.Merge(m, src)
+}
+func (m *QuerySamplesRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QuerySamplesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuerySamplesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuerySamplesRequest proto.InternalMessageInfo
+
+func (m *QuerySamplesRequest) GetQuery() string {
+ if m != nil {
+ return m.Query
+ }
+ return ""
+}
+
+func (m *QuerySamplesRequest) GetStart() time.Time {
+ if m != nil {
+ return m.Start
+ }
+ return time.Time{}
+}
+
+func (m *QuerySamplesRequest) GetEnd() time.Time {
+ if m != nil {
+ return m.End
+ }
+ return time.Time{}
+}
+
+func (m *QuerySamplesRequest) GetStep() int64 {
+ if m != nil {
+ return m.Step
+ }
+ return 0
+}
+
+type QuerySamplesResponse struct {
+ Series []Series `protobuf:"bytes,1,rep,name=series,proto3,customtype=Series" json:"series,omitempty"`
+ Stats stats.Ingester `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats"`
+ Warnings []string `protobuf:"bytes,3,rep,name=warnings,proto3" json:"warnings,omitempty"`
+}
+
+func (m *QuerySamplesResponse) Reset() { *m = QuerySamplesResponse{} }
+func (*QuerySamplesResponse) ProtoMessage() {}
+func (*QuerySamplesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_aaf4192acc66a4ea, []int{5}
+}
+func (m *QuerySamplesResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QuerySamplesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QuerySamplesResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QuerySamplesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuerySamplesResponse.Merge(m, src)
+}
+func (m *QuerySamplesResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QuerySamplesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuerySamplesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuerySamplesResponse proto.InternalMessageInfo
+
+func (m *QuerySamplesResponse) GetStats() stats.Ingester {
+ if m != nil {
+ return m.Stats
+ }
+ return stats.Ingester{}
+}
+
+func (m *QuerySamplesResponse) GetWarnings() []string {
+ if m != nil {
+ return m.Warnings
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*QueryPatternsRequest)(nil), "logproto.QueryPatternsRequest")
proto.RegisterType((*QueryPatternsResponse)(nil), "logproto.QueryPatternsResponse")
proto.RegisterType((*PatternSeries)(nil), "logproto.PatternSeries")
proto.RegisterType((*PatternSample)(nil), "logproto.PatternSample")
+ proto.RegisterType((*QuerySamplesRequest)(nil), "logproto.QuerySamplesRequest")
+ proto.RegisterType((*QuerySamplesResponse)(nil), "logproto.QuerySamplesResponse")
}
func init() { proto.RegisterFile("pkg/logproto/pattern.proto", fileDescriptor_aaf4192acc66a4ea) }
var fileDescriptor_aaf4192acc66a4ea = []byte{
- // 483 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xb1, 0x6e, 0xd3, 0x40,
- 0x18, 0xf6, 0xd5, 0x49, 0xd3, 0x5e, 0xc5, 0x72, 0xa4, 0x60, 0x19, 0xe9, 0x1c, 0x79, 0x21, 0x93,
- 0x0f, 0x52, 0x09, 0x24, 0xc6, 0x4c, 0x0c, 0x20, 0x15, 0xc3, 0x84, 0x60, 0x70, 0xda, 0xbf, 0xb6,
- 0x55, 0xdb, 0xe7, 0xfa, 0xee, 0x2a, 0xb1, 0xf1, 0x08, 0x79, 0x0c, 0x1e, 0x80, 0x87, 0xe8, 0x98,
- 0xb1, 0x62, 0x28, 0xc4, 0x59, 0x18, 0xfb, 0x08, 0xc8, 0x77, 0x76, 0x93, 0x56, 0x74, 0xe8, 0x92,
- 0xdc, 0xff, 0x7f, 0xdf, 0xff, 0xf9, 0xbb, 0xff, 0x3b, 0xec, 0x96, 0xa7, 0x31, 0xcb, 0x78, 0x5c,
- 0x56, 0x5c, 0x72, 0x56, 0x46, 0x52, 0x42, 0x55, 0x04, 0xba, 0x22, 0x3b, 0x5d, 0xdf, 0x1d, 0xc6,
- 0x3c, 0xe6, 0x86, 0xd2, 0x9c, 0x0c, 0xee, 0x7a, 0x31, 0xe7, 0x71, 0x06, 0x4c, 0x57, 0x33, 0x75,
- 0xc2, 0x64, 0x9a, 0x83, 0x90, 0x51, 0x5e, 0xb6, 0x84, 0x67, 0xb7, 0xc4, 0xbb, 0x43, 0x0b, 0x3e,
- 0x6e, 0xc0, 0x52, 0x89, 0x44, 0xff, 0x98, 0xa6, 0xff, 0x13, 0xe1, 0xe1, 0x07, 0x05, 0xd5, 0xb7,
- 0x43, 0xe3, 0x44, 0x84, 0x70, 0xa6, 0x40, 0x48, 0x32, 0xc4, 0xfd, 0xb3, 0xa6, 0xef, 0xa0, 0x11,
- 0x1a, 0xef, 0x86, 0xa6, 0x20, 0x6f, 0x70, 0x5f, 0xc8, 0xa8, 0x92, 0xce, 0xd6, 0x08, 0x8d, 0xf7,
- 0x26, 0x6e, 0x60, 0x1c, 0x05, 0x9d, 0xa3, 0xe0, 0x53, 0xe7, 0x68, 0xba, 0x73, 0x71, 0xe5, 0x59,
- 0xf3, 0xdf, 0x1e, 0x0a, 0xcd, 0x08, 0x79, 0x85, 0x6d, 0x28, 0x8e, 0x1d, 0xfb, 0x01, 0x93, 0xcd,
- 0x00, 0x21, 0xb8, 0x27, 0x24, 0x94, 0x4e, 0x6f, 0x84, 0xc6, 0x76, 0xa8, 0xcf, 0xfe, 0x5b, 0xbc,
- 0x7f, 0xc7, 0xb5, 0x28, 0x79, 0x21, 0x80, 0x30, 0xbc, 0x2d, 0xa0, 0x4a, 0x41, 0x38, 0x68, 0x64,
- 0x8f, 0xf7, 0x26, 0x4f, 0x83, 0x9b, 0x2d, 0xb4, 0xdc, 0x8f, 0x1a, 0x0e, 0x5b, 0x9a, 0xff, 0x05,
- 0x3f, 0xba, 0x05, 0x10, 0x07, 0x0f, 0xda, 0x54, 0xda, 0xab, 0x77, 0x25, 0x79, 0x89, 0x07, 0x22,
- 0xca, 0xcb, 0x0c, 0x84, 0xb3, 0x75, 0x9f, 0xb8, 0xc6, 0xc3, 0x8e, 0xe7, 0xcb, 0xb5, 0xba, 0xee,
- 0x90, 0xf7, 0x78, 0xf7, 0x26, 0x34, 0xad, 0x6f, 0x4f, 0x59, 0x73, 0xdd, 0x5f, 0x57, 0xde, 0xf3,
- 0x38, 0x95, 0x89, 0x9a, 0x05, 0x47, 0x3c, 0x6f, 0x12, 0xce, 0x41, 0x26, 0xa0, 0x04, 0x3b, 0xe2,
- 0x79, 0xce, 0x0b, 0x96, 0xf3, 0x63, 0xc8, 0xf4, 0x92, 0xc2, 0xb5, 0x42, 0x93, 0xd2, 0x79, 0x94,
- 0x29, 0xd0, 0x79, 0xd8, 0xa1, 0x29, 0x26, 0x73, 0x84, 0x07, 0xed, 0x67, 0xc9, 0x6b, 0xdc, 0x3b,
- 0x54, 0x22, 0x21, 0xfb, 0x1b, 0x5e, 0x95, 0x48, 0xda, 0x98, 0xdd, 0x27, 0x77, 0xdb, 0x66, 0x8f,
- 0xbe, 0x45, 0xde, 0xe1, 0xbe, 0x5e, 0x31, 0xa1, 0x6b, 0xca, 0xff, 0x5e, 0x8a, 0xeb, 0xdd, 0x8b,
- 0x77, 0x5a, 0x2f, 0xd0, 0xf4, 0xeb, 0x62, 0x49, 0xad, 0xcb, 0x25, 0xb5, 0xae, 0x97, 0x14, 0x7d,
- 0xaf, 0x29, 0xfa, 0x51, 0x53, 0x74, 0x51, 0x53, 0xb4, 0xa8, 0x29, 0xfa, 0x53, 0x53, 0xf4, 0xb7,
- 0xa6, 0xd6, 0x75, 0x4d, 0xd1, 0x7c, 0x45, 0xad, 0xc5, 0x8a, 0x5a, 0x97, 0x2b, 0x6a, 0x7d, 0xde,
- 0x5c, 0x49, 0x5c, 0x45, 0x27, 0x51, 0x11, 0xb1, 0x8c, 0x9f, 0xa6, 0xec, 0xfc, 0x80, 0x6d, 0x3e,
- 0xf5, 0xd9, 0xb6, 0xfe, 0x3b, 0xf8, 0x17, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x4f, 0x5c, 0x50, 0x5e,
- 0x03, 0x00, 0x00,
+ // 597 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x54, 0xbb, 0x8e, 0xd3, 0x40,
+ 0x14, 0xf5, 0xac, 0xb3, 0xaf, 0x59, 0xf1, 0xd0, 0xec, 0x2e, 0x58, 0x46, 0x8c, 0x2d, 0x37, 0x44,
+ 0x42, 0xf2, 0x40, 0x56, 0x02, 0x44, 0x99, 0x0a, 0x24, 0x90, 0x82, 0xa1, 0x42, 0x50, 0x38, 0xbb,
+ 0xb3, 0x4e, 0xb4, 0xb6, 0xc7, 0xf1, 0x8c, 0x17, 0xd1, 0xf1, 0x09, 0xdb, 0xf2, 0x07, 0xf4, 0xf0,
+ 0x11, 0x29, 0x53, 0xae, 0xb6, 0x08, 0xc4, 0x69, 0x28, 0xf7, 0x13, 0x90, 0x67, 0xc6, 0x79, 0x89,
+ 0x14, 0x94, 0x34, 0xce, 0xdc, 0x7b, 0xce, 0xbd, 0x39, 0xf7, 0x9e, 0xb1, 0xa1, 0x9d, 0x9d, 0x45,
+ 0x24, 0x66, 0x51, 0x96, 0x33, 0xc1, 0x48, 0x16, 0x0a, 0x41, 0xf3, 0xd4, 0x97, 0x11, 0xda, 0xa9,
+ 0xf3, 0xf6, 0x41, 0xc4, 0x22, 0xa6, 0x28, 0xd5, 0x49, 0xe1, 0xb6, 0x13, 0x31, 0x16, 0xc5, 0x94,
+ 0xc8, 0xa8, 0x5b, 0x9c, 0x12, 0xd1, 0x4f, 0x28, 0x17, 0x61, 0x92, 0x69, 0xc2, 0xbd, 0xa5, 0xe6,
+ 0xf5, 0x41, 0x83, 0xae, 0x06, 0x07, 0x71, 0xc2, 0x4e, 0x68, 0x4c, 0xb8, 0x08, 0x05, 0x57, 0x4f,
+ 0xcd, 0xd8, 0xaf, 0x18, 0x59, 0xc1, 0x7b, 0xf2, 0xa1, 0x92, 0xde, 0x0f, 0x00, 0x0f, 0xde, 0x14,
+ 0x34, 0xff, 0xdc, 0x51, 0x5a, 0x79, 0x40, 0x07, 0x05, 0xe5, 0x02, 0x1d, 0xc0, 0xcd, 0x41, 0x95,
+ 0xb7, 0x80, 0x0b, 0x9a, 0xbb, 0x81, 0x0a, 0xd0, 0x73, 0xb8, 0xc9, 0x45, 0x98, 0x0b, 0x6b, 0xc3,
+ 0x05, 0xcd, 0xbd, 0x96, 0xed, 0x2b, 0xcd, 0x7e, 0xad, 0xd9, 0x7f, 0x57, 0x6b, 0x6e, 0xef, 0x0c,
+ 0xc7, 0x8e, 0x71, 0xf1, 0xd3, 0x01, 0x81, 0x2a, 0x41, 0x4f, 0xa0, 0x49, 0xd3, 0x13, 0xcb, 0xfc,
+ 0x87, 0xca, 0xaa, 0x00, 0x21, 0xd8, 0xe0, 0x82, 0x66, 0x56, 0xc3, 0x05, 0x4d, 0x33, 0x90, 0x67,
+ 0xef, 0x05, 0x3c, 0x5c, 0x51, 0xcd, 0x33, 0x96, 0x72, 0x8a, 0x08, 0xdc, 0xe2, 0x34, 0xef, 0x53,
+ 0x6e, 0x01, 0xd7, 0x6c, 0xee, 0xb5, 0xee, 0xfa, 0xb3, 0x3d, 0x69, 0xee, 0x5b, 0x09, 0x07, 0x9a,
+ 0xe6, 0x7d, 0x80, 0x37, 0x96, 0x00, 0x64, 0xc1, 0x6d, 0xed, 0x9b, 0x1e, 0xbd, 0x0e, 0xd1, 0x63,
+ 0xb8, 0xcd, 0xc3, 0x24, 0x8b, 0x29, 0xb7, 0x36, 0xd6, 0x35, 0x97, 0x78, 0x50, 0xf3, 0x3c, 0x31,
+ 0xef, 0x2e, 0x33, 0xe8, 0x35, 0xdc, 0x9d, 0xd9, 0x2a, 0xfb, 0x9b, 0x6d, 0x52, 0x8d, 0x7b, 0x35,
+ 0x76, 0x1e, 0x44, 0x7d, 0xd1, 0x2b, 0xba, 0xfe, 0x31, 0x4b, 0xaa, 0x3b, 0x90, 0x50, 0xd1, 0xa3,
+ 0x05, 0x27, 0xc7, 0x2c, 0x49, 0x58, 0x4a, 0xa4, 0xab, 0x72, 0x49, 0xc1, 0xbc, 0x43, 0xe5, 0xd2,
+ 0x79, 0x18, 0x17, 0x54, 0xfa, 0x61, 0x06, 0x2a, 0xf0, 0xbe, 0x03, 0xb8, 0x2f, 0xd7, 0xa3, 0xfe,
+ 0xf4, 0x3f, 0xf1, 0xf4, 0x6b, 0x7d, 0x15, 0x67, 0xaa, 0xb5, 0xa7, 0xcf, 0x56, 0x3c, 0xbd, 0x3d,
+ 0x5f, 0xbb, 0xf2, 0xac, 0x7d, 0x73, 0x38, 0x76, 0xc0, 0xd5, 0xd8, 0xd9, 0x5a, 0x36, 0x17, 0x3d,
+ 0x94, 0xa3, 0x09, 0xae, 0x47, 0xbb, 0xe5, 0xab, 0xf7, 0xe1, 0x65, 0x1a, 0x51, 0x2e, 0x68, 0xde,
+ 0x6e, 0x54, 0xaa, 0x02, 0xc5, 0x41, 0x36, 0xdc, 0xf9, 0x14, 0xe6, 0x69, 0x3f, 0x8d, 0xb8, 0x65,
+ 0xba, 0x66, 0x73, 0x37, 0x98, 0xc5, 0xad, 0x12, 0xc0, 0x6d, 0x6d, 0x24, 0x7a, 0x0a, 0x1b, 0x9d,
+ 0x82, 0xf7, 0xd0, 0xe1, 0x82, 0xfb, 0x05, 0xef, 0xe9, 0x25, 0xdb, 0x77, 0x56, 0xd3, 0x6a, 0x0a,
+ 0xcf, 0x40, 0xaf, 0xe0, 0xa6, 0x9c, 0x0f, 0xe1, 0x39, 0xe5, 0x6f, 0xef, 0x9e, 0xed, 0xac, 0xc5,
+ 0xeb, 0x5e, 0x8f, 0x00, 0xea, 0xc0, 0xbd, 0x85, 0x6d, 0xa1, 0xfb, 0x2b, 0x35, 0xcb, 0xd6, 0xdb,
+ 0x78, 0x1d, 0x3c, 0xef, 0xd8, 0xfe, 0x38, 0x9a, 0x60, 0xe3, 0x72, 0x82, 0x8d, 0xeb, 0x09, 0x06,
+ 0x5f, 0x4a, 0x0c, 0xbe, 0x95, 0x18, 0x0c, 0x4b, 0x0c, 0x46, 0x25, 0x06, 0xbf, 0x4a, 0x0c, 0x7e,
+ 0x97, 0xd8, 0xb8, 0x2e, 0x31, 0xb8, 0x98, 0x62, 0x63, 0x34, 0xc5, 0xc6, 0xe5, 0x14, 0x1b, 0xef,
+ 0x17, 0xaf, 0x6d, 0x94, 0x87, 0xa7, 0x61, 0x1a, 0x92, 0x98, 0x9d, 0xf5, 0xc9, 0xf9, 0x11, 0x59,
+ 0xfc, 0x60, 0x75, 0xb7, 0xe4, 0xcf, 0xd1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xa2, 0x72,
+ 0x12, 0x24, 0x05, 0x00, 0x00,
}
func (this *QueryPatternsRequest) Equal(that interface{}) bool {
@@ -405,6 +534,79 @@ func (this *PatternSample) Equal(that interface{}) bool {
}
return true
}
+func (this *QuerySamplesRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*QuerySamplesRequest)
+ if !ok {
+ that2, ok := that.(QuerySamplesRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Query != that1.Query {
+ return false
+ }
+ if !this.Start.Equal(that1.Start) {
+ return false
+ }
+ if !this.End.Equal(that1.End) {
+ return false
+ }
+ if this.Step != that1.Step {
+ return false
+ }
+ return true
+}
+func (this *QuerySamplesResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*QuerySamplesResponse)
+ if !ok {
+ that2, ok := that.(QuerySamplesResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if len(this.Series) != len(that1.Series) {
+ return false
+ }
+ for i := range this.Series {
+ if !this.Series[i].Equal(that1.Series[i]) {
+ return false
+ }
+ }
+ if !this.Stats.Equal(&that1.Stats) {
+ return false
+ }
+ if len(this.Warnings) != len(that1.Warnings) {
+ return false
+ }
+ for i := range this.Warnings {
+ if this.Warnings[i] != that1.Warnings[i] {
+ return false
+ }
+ }
+ return true
+}
func (this *QueryPatternsRequest) GoString() string {
if this == nil {
return "nil"
@@ -454,6 +656,31 @@ func (this *PatternSample) GoString() string {
s = append(s, "}")
return strings.Join(s, "")
}
+func (this *QuerySamplesRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&logproto.QuerySamplesRequest{")
+ s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n")
+ s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n")
+ s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n")
+ s = append(s, "Step: "+fmt.Sprintf("%#v", this.Step)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *QuerySamplesResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&logproto.QuerySamplesResponse{")
+ s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n")
+ s = append(s, "Stats: "+strings.Replace(this.Stats.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "Warnings: "+fmt.Sprintf("%#v", this.Warnings)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
func valueToGoStringPattern(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -477,6 +704,7 @@ const _ = grpc.SupportPackageIsVersion4
type PatternClient interface {
Push(ctx context.Context, in *push.PushRequest, opts ...grpc.CallOption) (*push.PushResponse, error)
Query(ctx context.Context, in *QueryPatternsRequest, opts ...grpc.CallOption) (Pattern_QueryClient, error)
+ QuerySample(ctx context.Context, in *QuerySamplesRequest, opts ...grpc.CallOption) (Pattern_QuerySampleClient, error)
}
type patternClient struct {
@@ -528,10 +756,43 @@ func (x *patternQueryClient) Recv() (*QueryPatternsResponse, error) {
return m, nil
}
+func (c *patternClient) QuerySample(ctx context.Context, in *QuerySamplesRequest, opts ...grpc.CallOption) (Pattern_QuerySampleClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Pattern_serviceDesc.Streams[1], "/logproto.Pattern/QuerySample", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &patternQuerySampleClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Pattern_QuerySampleClient interface {
+ Recv() (*QuerySamplesResponse, error)
+ grpc.ClientStream
+}
+
+type patternQuerySampleClient struct {
+ grpc.ClientStream
+}
+
+func (x *patternQuerySampleClient) Recv() (*QuerySamplesResponse, error) {
+ m := new(QuerySamplesResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
// PatternServer is the server API for Pattern service.
type PatternServer interface {
Push(context.Context, *push.PushRequest) (*push.PushResponse, error)
Query(*QueryPatternsRequest, Pattern_QueryServer) error
+ QuerySample(*QuerySamplesRequest, Pattern_QuerySampleServer) error
}
// UnimplementedPatternServer can be embedded to have forward compatible implementations.
@@ -544,6 +805,9 @@ func (*UnimplementedPatternServer) Push(ctx context.Context, req *push.PushReque
func (*UnimplementedPatternServer) Query(req *QueryPatternsRequest, srv Pattern_QueryServer) error {
return status.Errorf(codes.Unimplemented, "method Query not implemented")
}
+func (*UnimplementedPatternServer) QuerySample(req *QuerySamplesRequest, srv Pattern_QuerySampleServer) error {
+ return status.Errorf(codes.Unimplemented, "method QuerySample not implemented")
+}
func RegisterPatternServer(s *grpc.Server, srv PatternServer) {
s.RegisterService(&_Pattern_serviceDesc, srv)
@@ -588,6 +852,27 @@ func (x *patternQueryServer) Send(m *QueryPatternsResponse) error {
return x.ServerStream.SendMsg(m)
}
+func _Pattern_QuerySample_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(QuerySamplesRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(PatternServer).QuerySample(m, &patternQuerySampleServer{stream})
+}
+
+type Pattern_QuerySampleServer interface {
+ Send(*QuerySamplesResponse) error
+ grpc.ServerStream
+}
+
+type patternQuerySampleServer struct {
+ grpc.ServerStream
+}
+
+func (x *patternQuerySampleServer) Send(m *QuerySamplesResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
var _Pattern_serviceDesc = grpc.ServiceDesc{
ServiceName: "logproto.Pattern",
HandlerType: (*PatternServer)(nil),
@@ -603,6 +888,11 @@ var _Pattern_serviceDesc = grpc.ServiceDesc{
Handler: _Pattern_Query_Handler,
ServerStreams: true,
},
+ {
+ StreamName: "QuerySample",
+ Handler: _Pattern_QuerySample_Handler,
+ ServerStreams: true,
+ },
},
Metadata: "pkg/logproto/pattern.proto",
}
@@ -772,51 +1062,158 @@ func (m *PatternSample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func encodeVarintPattern(dAtA []byte, offset int, v uint64) int {
- offset -= sovPattern(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
+func (m *QuerySamplesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- dAtA[offset] = uint8(v)
- return base
+ return dAtA[:n], nil
}
-func (m *QueryPatternsRequest) Size() (n int) {
- if m == nil {
- return 0
- }
+
+func (m *QuerySamplesRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuerySamplesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Query)
- if l > 0 {
- n += 1 + l + sovPattern(uint64(l))
- }
- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
- n += 1 + l + sovPattern(uint64(l))
- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
- n += 1 + l + sovPattern(uint64(l))
if m.Step != 0 {
- n += 1 + sovPattern(uint64(m.Step))
+ i = encodeVarintPattern(dAtA, i, uint64(m.Step))
+ i--
+ dAtA[i] = 0x20
}
- return n
-}
-
-func (m *QueryPatternsResponse) Size() (n int) {
- if m == nil {
- return 0
+ n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):])
+ if err3 != nil {
+ return 0, err3
}
- var l int
- _ = l
- if len(m.Series) > 0 {
- for _, e := range m.Series {
- l = e.Size()
- n += 1 + l + sovPattern(uint64(l))
- }
+ i -= n3
+ i = encodeVarintPattern(dAtA, i, uint64(n3))
+ i--
+ dAtA[i] = 0x1a
+ n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):])
+ if err4 != nil {
+ return 0, err4
}
- return n
-}
+ i -= n4
+ i = encodeVarintPattern(dAtA, i, uint64(n4))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Query) > 0 {
+ i -= len(m.Query)
+ copy(dAtA[i:], m.Query)
+ i = encodeVarintPattern(dAtA, i, uint64(len(m.Query)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QuerySamplesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QuerySamplesResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuerySamplesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Warnings) > 0 {
+ for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Warnings[iNdEx])
+ copy(dAtA[i:], m.Warnings[iNdEx])
+ i = encodeVarintPattern(dAtA, i, uint64(len(m.Warnings[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintPattern(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Series) > 0 {
+ for iNdEx := len(m.Series) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size := m.Series[iNdEx].Size()
+ i -= size
+ if _, err := m.Series[iNdEx].MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintPattern(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintPattern(dAtA []byte, offset int, v uint64) int {
+ offset -= sovPattern(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *QueryPatternsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Query)
+ if l > 0 {
+ n += 1 + l + sovPattern(uint64(l))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovPattern(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
+ n += 1 + l + sovPattern(uint64(l))
+ if m.Step != 0 {
+ n += 1 + sovPattern(uint64(m.Step))
+ }
+ return n
+}
+
+func (m *QueryPatternsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for _, e := range m.Series {
+ l = e.Size()
+ n += 1 + l + sovPattern(uint64(l))
+ }
+ }
+ return n
+}
func (m *PatternSeries) Size() (n int) {
if m == nil {
@@ -852,6 +1249,49 @@ func (m *PatternSample) Size() (n int) {
return n
}
+func (m *QuerySamplesRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Query)
+ if l > 0 {
+ n += 1 + l + sovPattern(uint64(l))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start)
+ n += 1 + l + sovPattern(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End)
+ n += 1 + l + sovPattern(uint64(l))
+ if m.Step != 0 {
+ n += 1 + sovPattern(uint64(m.Step))
+ }
+ return n
+}
+
+func (m *QuerySamplesResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Series) > 0 {
+ for _, e := range m.Series {
+ l = e.Size()
+ n += 1 + l + sovPattern(uint64(l))
+ }
+ }
+ l = m.Stats.Size()
+ n += 1 + l + sovPattern(uint64(l))
+ if len(m.Warnings) > 0 {
+ for _, s := range m.Warnings {
+ l = len(s)
+ n += 1 + l + sovPattern(uint64(l))
+ }
+ }
+ return n
+}
+
func sovPattern(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -913,6 +1353,31 @@ func (this *PatternSample) String() string {
}, "")
return s
}
+func (this *QuerySamplesRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&QuerySamplesRequest{`,
+ `Query:` + fmt.Sprintf("%v", this.Query) + `,`,
+ `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
+ `Step:` + fmt.Sprintf("%v", this.Step) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *QuerySamplesResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&QuerySamplesResponse{`,
+ `Series:` + fmt.Sprintf("%v", this.Series) + `,`,
+ `Stats:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Stats), "Ingester", "stats.Ingester", 1), `&`, ``, 1) + `,`,
+ `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringPattern(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -1388,6 +1853,328 @@ func (m *PatternSample) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *QuerySamplesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QuerySamplesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QuerySamplesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPattern
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Query = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPattern
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPattern
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType)
+ }
+ m.Step = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Step |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPattern(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QuerySamplesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QuerySamplesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QuerySamplesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPattern
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Series = append(m.Series, Series{})
+ if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPattern
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPattern
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPattern
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPattern(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthPattern
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipPattern(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/pkg/logproto/pattern.proto b/pkg/logproto/pattern.proto
index e92a201b3a8b1..c3e4caaffc5ce 100644
--- a/pkg/logproto/pattern.proto
+++ b/pkg/logproto/pattern.proto
@@ -5,6 +5,7 @@ package logproto;
import "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "pkg/logproto/logproto.proto";
+import "pkg/logqlmodel/stats/stats.proto";
import "pkg/push/push.proto";
option go_package = "github.com/grafana/loki/v3/pkg/logproto";
@@ -12,6 +13,7 @@ option go_package = "github.com/grafana/loki/v3/pkg/logproto";
service Pattern {
rpc Push(PushRequest) returns (PushResponse) {}
rpc Query(QueryPatternsRequest) returns (stream QueryPatternsResponse) {}
+ rpc QuerySample(QuerySamplesRequest) returns (stream QuerySamplesResponse) {}
}
message QueryPatternsRequest {
@@ -43,3 +45,25 @@ message PatternSample {
];
int64 value = 2;
}
+
+message QuerySamplesRequest {
+ string query = 1;
+ google.protobuf.Timestamp start = 2 [
+ (gogoproto.stdtime) = true,
+ (gogoproto.nullable) = false
+ ];
+ google.protobuf.Timestamp end = 3 [
+ (gogoproto.stdtime) = true,
+ (gogoproto.nullable) = false
+ ];
+ int64 step = 4;
+}
+
+message QuerySamplesResponse {
+ repeated Series series = 1 [
+ (gogoproto.customtype) = "Series",
+ (gogoproto.nullable) = true
+ ];
+ stats.Ingester stats = 2 [(gogoproto.nullable) = false];
+ repeated string warnings = 3;
+}
diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go
index 948aef03876bc..ae313ea1fc48f 100644
--- a/pkg/logql/downstream_test.go
+++ b/pkg/logql/downstream_test.go
@@ -57,6 +57,9 @@ func TestMappingEquivalence(t *testing.T) {
{`sum(rate({a=~".+"} |= "foo" != "foo"[1s]) or vector(1))`, false, nil},
{`avg_over_time({a=~".+"} | logfmt | unwrap value [1s])`, false, nil},
{`avg_over_time({a=~".+"} | logfmt | unwrap value [1s]) by (a)`, true, nil},
+ {`avg_over_time({a=~".+"} | logfmt | unwrap value [1s]) without (stream)`, true, nil},
+ {`avg_over_time({a=~".+"} | logfmt | drop level | unwrap value [1s])`, true, nil},
+ {`avg_over_time({a=~".+"} | logfmt | drop level | unwrap value [1s]) without (stream)`, true, nil},
{`quantile_over_time(0.99, {a=~".+"} | logfmt | unwrap value [1s])`, true, []string{ShardQuantileOverTime}},
{
`
diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go
index fcbfcb450e683..8b46ed4d833fb 100644
--- a/pkg/logql/engine.go
+++ b/pkg/logql/engine.go
@@ -36,7 +36,6 @@ import (
"github.com/grafana/loki/v3/pkg/util/httpreq"
logutil "github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/util/server"
- "github.com/grafana/loki/v3/pkg/util/spanlogger"
"github.com/grafana/loki/v3/pkg/util/validation"
)
@@ -231,7 +230,6 @@ func (q *query) resultLength(res promql_parser.Value) int {
func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "query.Exec")
defer sp.Finish()
- spLogger := spanlogger.FromContext(ctx)
sp.LogKV(
"type", GetRangeType(q.params),
@@ -265,7 +263,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) {
queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration)
statResult := statsCtx.Result(time.Since(start), queueTime, q.resultLength(data))
- statResult.Log(level.Debug(spLogger))
+ sp.LogKV(statResult.KVList()...)
status, _ := server.ClientHTTPStatusAndError(err)
diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go
index e50d8739c30ad..cdf05829c200e 100644
--- a/pkg/logql/evaluator.go
+++ b/pkg/logql/evaluator.go
@@ -410,6 +410,20 @@ type VectorAggEvaluator struct {
lb *labels.Builder
}
+func NewVectorAggEvaluator(
+ nextEvaluator StepEvaluator,
+ expr *syntax.VectorAggregationExpr,
+ buf []byte,
+ lb *labels.Builder,
+) *VectorAggEvaluator {
+ return &VectorAggEvaluator{
+ nextEvaluator: nextEvaluator,
+ expr: expr,
+ buf: buf,
+ lb: lb,
+ }
+}
+
func (e *VectorAggEvaluator) Next() (bool, int64, StepResult) {
next, ts, r := e.nextEvaluator.Next()
@@ -684,9 +698,7 @@ func newRangeAggEvaluator(
return nil, err
}
- return &RangeVectorEvaluator{
- iter: iter,
- }, nil
+ return NewRangeVectorEvaluator(iter), nil
}
}
@@ -696,6 +708,12 @@ type RangeVectorEvaluator struct {
err error
}
+func NewRangeVectorEvaluator(iter RangeVectorIterator) *RangeVectorEvaluator {
+ return &RangeVectorEvaluator{
+ iter: iter,
+ }
+}
+
func (r *RangeVectorEvaluator) Next() (bool, int64, StepResult) {
next := r.iter.Next()
if !next {
@@ -805,7 +823,7 @@ func newBinOpStepEvaluator(
var lse, rse StepEvaluator
- ctx, cancel := context.WithCancel(ctx)
+ ctx, cancel := context.WithCancelCause(ctx)
g := errgroup.Group{}
// We have two non-literal legs,
@@ -814,7 +832,7 @@ func newBinOpStepEvaluator(
var err error
lse, err = evFactory.NewStepEvaluator(ctx, evFactory, expr.SampleExpr, q)
if err != nil {
- cancel()
+ cancel(fmt.Errorf("new step evaluator for left leg errored: %w", err))
}
return err
})
@@ -822,7 +840,7 @@ func newBinOpStepEvaluator(
var err error
rse, err = evFactory.NewStepEvaluator(ctx, evFactory, expr.RHS, q)
if err != nil {
- cancel()
+ cancel(fmt.Errorf("new step evaluator for right leg errored: %w", err))
}
return err
})
diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go
index 052446c6b5b74..6f35f84b3a3ae 100644
--- a/pkg/logql/metrics.go
+++ b/pkg/logql/metrics.go
@@ -377,7 +377,6 @@ func RecordStatsQueryMetrics(ctx context.Context, log log.Logger, start, end tim
"query", query,
"query_hash", util.HashedQuery(query),
"total_entries", stats.Summary.TotalEntriesReturned)
-
level.Info(logger).Log(logValues...)
execLatency.WithLabelValues(status, queryType, "").Observe(stats.Summary.ExecTime)
diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go
index 44094e27f5d4b..577627a202036 100644
--- a/pkg/logql/metrics_test.go
+++ b/pkg/logql/metrics_test.go
@@ -92,8 +92,8 @@ func TestLogSlowQuery(t *testing.T) {
func TestLogLabelsQuery(t *testing.T) {
buf := bytes.NewBufferString("")
- logger := log.NewLogfmtLogger(buf)
tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter())
+ logger := log.NewLogfmtLogger(buf)
defer c.Close()
opentracing.SetGlobalTracer(tr)
sp := opentracing.StartSpan("")
diff --git a/pkg/logql/range_vector.go b/pkg/logql/range_vector.go
index 44a8651577549..180a1bde27cac 100644
--- a/pkg/logql/range_vector.go
+++ b/pkg/logql/range_vector.go
@@ -75,21 +75,18 @@ func newRangeVectorIterator(
if err != nil {
return nil, err
}
- return &batchRangeVectorIterator{
- iter: it,
- step: step,
- end: end,
- selRange: selRange,
- metrics: map[string]labels.Labels{},
- window: map[string]*promql.Series{},
- agg: vectorAggregator,
- current: start - step, // first loop iteration will set it to start
- offset: offset,
- }, nil
-}
-
-//batch
-
+ return NewBatchRangeVectorIterator(
+ it,
+ selRange,
+ step,
+ start,
+ end,
+ offset,
+ vectorAggregator,
+ ), nil
+}
+
+// batch
type batchRangeVectorIterator struct {
iter iter.PeekingSampleIterator
selRange, step, end, current, offset int64
@@ -99,6 +96,24 @@ type batchRangeVectorIterator struct {
agg BatchRangeVectorAggregator
}
+func NewBatchRangeVectorIterator(
+ it iter.PeekingSampleIterator,
+ selRange, step, start, end, offset int64,
+ agg BatchRangeVectorAggregator,
+) RangeVectorIterator {
+ return &batchRangeVectorIterator{
+ iter: it,
+ selRange: selRange,
+ step: step,
+ end: end,
+ current: start - step, // first loop iteration will set it to start
+ offset: offset,
+ metrics: map[string]labels.Labels{},
+ window: map[string]*promql.Series{},
+ agg: agg,
+ }
+}
+
func (r *batchRangeVectorIterator) Next() bool {
// slides the range window to the next position
r.current = r.current + r.step
@@ -178,6 +193,7 @@ func (r *batchRangeVectorIterator) load(start, end int64) {
series.Metric = metric
r.window[lbs] = series
}
+ // TODO(twhitney): Everywhere else, an FPoint.T is in milliseconds, but here it's in nanoseconds.
p := promql.FPoint{
T: sample.Timestamp,
F: sample.Value,
diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go
index e55b01504537f..003362913171d 100644
--- a/pkg/logql/shardmapper.go
+++ b/pkg/logql/shardmapper.go
@@ -397,13 +397,18 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
return m.mapSampleExpr(expr, r)
}
+ grouping := expr.Grouping
+ if grouping == nil {
+ grouping = &syntax.Grouping{Without: true}
+ }
+
// avg_over_time() by (foo) -> sum by (foo) (sum_over_time()) / sum by (foo) (count_over_time())
lhs, lhsBytesPerShard, err := m.mapVectorAggregationExpr(&syntax.VectorAggregationExpr{
Left: &syntax.RangeAggregationExpr{
Left: expr.Left,
Operation: syntax.OpRangeTypeSum,
},
- Grouping: expr.Grouping,
+ Grouping: grouping,
Operation: syntax.OpTypeSum,
}, r, false)
if err != nil {
@@ -416,12 +421,21 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
return nil, 0, err
}
+ // labelSampleExtractor includes the unwrap identifier in without() list if no grouping is specified
+ // similar change is required for the RHS here to ensure the resulting label sets match
+ rhsGrouping := *grouping
+ if rhsGrouping.Without {
+ if expr.Left.Unwrap != nil {
+ rhsGrouping.Groups = append(rhsGrouping.Groups, expr.Left.Unwrap.Identifier)
+ }
+ }
+
rhs, rhsBytesPerShard, err := m.mapVectorAggregationExpr(&syntax.VectorAggregationExpr{
Left: &syntax.RangeAggregationExpr{
Left: countOverTimeSelector,
Operation: syntax.OpRangeTypeCount,
},
- Grouping: expr.Grouping,
+ Grouping: &rhsGrouping,
Operation: syntax.OpTypeSum,
}, r, false)
if err != nil {
diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go
index 784301928583b..9bdd128b6e493 100644
--- a/pkg/logql/shardmapper_test.go
+++ b/pkg/logql/shardmapper_test.go
@@ -392,6 +392,38 @@ func TestMappingStrings(t *testing.T) {
)
)`,
},
+ {
+ in: `avg_over_time({job=~"myapps.*"} |= "stats" | json | keep busy | unwrap busy [5m])`,
+ out: `(
+ sum without() (
+ downstream
+ ++
+ downstream
+ )
+ /
+ sum without(busy) (
+ downstream
+ ++
+ downstream
+ )
+ )`,
+ },
+ {
+ in: `avg_over_time({job=~"myapps.*"} |= "stats" | json | keep busy | unwrap busy [5m]) without (foo)`,
+ out: `(
+ sum without(foo) (
+ downstream
+ ++
+ downstream
+ )
+ /
+ sum without(foo,busy) (
+ downstream
+ ++
+ downstream
+ )
+ )`,
+ },
// should be noop if VectorExpr
{
in: `vector(0)`,
diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go
index 6e3f18b7cc8e6..e5e80b4d0c172 100644
--- a/pkg/logql/syntax/ast.go
+++ b/pkg/logql/syntax/ast.go
@@ -366,14 +366,6 @@ func newLineFilterExpr(ty log.LineMatchType, op, match string) *LineFilterExpr {
func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr {
right.Ty = left.Ty
- if left.Ty == log.LineMatchEqual || left.Ty == log.LineMatchRegexp || left.Ty == log.LineMatchPattern {
- left.Or = right
- right.IsOrChild = true
- return left
- }
-
- // !(left or right) == (!left and !right).
-
// NOTE: Consider, we have chain of "or", != "foo" or "bar" or "baz"
// we parse from right to left, so first time left="bar", right="baz", and we don't know the actual `Ty` (equal: |=, notequal: !=, regex: |~, etc). So
// it will have default (0, LineMatchEqual).
@@ -385,6 +377,13 @@ func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr {
tmp = tmp.Or
}
+ if left.Ty == log.LineMatchEqual || left.Ty == log.LineMatchRegexp || left.Ty == log.LineMatchPattern {
+ left.Or = right
+ right.IsOrChild = true
+ return left
+ }
+
+ // !(left or right) == (!left and !right).
return newNestedLineFilterExpr(left, right)
}
diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go
index 9090fc98b7558..d75ff2d0261b6 100644
--- a/pkg/logql/syntax/ast_test.go
+++ b/pkg/logql/syntax/ast_test.go
@@ -545,11 +545,18 @@ func Test_FilterMatcher(t *testing.T) {
[]linecheck{{"foo", false}, {"bar", true}, {"127.0.0.2", true}, {"127.0.0.1", false}},
},
{
- `{app="foo"} |> "foo" or "bar"`,
+ `{app="foo"} |> "<_>foo<_>" or "<_>bar<_>"`,
[]*labels.Matcher{
mustNewMatcher(labels.MatchEqual, "app", "foo"),
},
- []linecheck{{"foo", true}, {"bar", true}, {"none", false}},
+ []linecheck{{"test foo test", true}, {"test bar test", true}, {"none", false}},
+ },
+ {
+ `{app="foo"} |> "<_>foo<_>" or "<_>bar<_>" or "<_>baz<_>"`,
+ []*labels.Matcher{
+ mustNewMatcher(labels.MatchEqual, "app", "foo"),
+ },
+ []linecheck{{"test foo test", true}, {"test bar test", true}, {"test baz test", true}, {"none", false}},
},
{
`{app="foo"} !> "foo" or "bar"`,
@@ -618,6 +625,18 @@ func TestOrLineFilterTypes(t *testing.T) {
_ = newOrLineFilter(left, right)
require.Equal(t, tt.ty, right.Ty)
+ require.Equal(t, tt.ty, left.Ty)
+ })
+
+ t.Run("right inherits left's type with multiple or filters", func(t *testing.T) {
+ f1 := &LineFilterExpr{LineFilter: LineFilter{Ty: tt.ty, Match: "something"}}
+ f2 := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: "something"}}
+ f3 := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: "something"}}
+
+ _ = newOrLineFilter(f1, newOrLineFilter(f2, f3))
+ require.Equal(t, tt.ty, f1.Ty)
+ require.Equal(t, tt.ty, f2.Ty)
+ require.Equal(t, tt.ty, f3.Ty)
})
}
}
diff --git a/pkg/logql/syntax/parser_test.go b/pkg/logql/syntax/parser_test.go
index f12309f2b24a5..4c2a85203938b 100644
--- a/pkg/logql/syntax/parser_test.go
+++ b/pkg/logql/syntax/parser_test.go
@@ -3173,6 +3173,66 @@ var ParseTestCases = []struct {
},
},
},
+ {
+ in: `{app="foo"} |= "foo" or "bar" or "baz"`,
+ exp: &PipelineExpr{
+ Left: newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "app", "foo")}),
+ MultiStages: MultiStageExpr{
+ &LineFilterExpr{
+ LineFilter: LineFilter{
+ Ty: log.LineMatchEqual,
+ Match: "foo",
+ },
+ Or: newOrLineFilter(
+ &LineFilterExpr{
+ LineFilter: LineFilter{
+ Ty: log.LineMatchEqual,
+ Match: "bar",
+ },
+ IsOrChild: true,
+ },
+ &LineFilterExpr{
+ LineFilter: LineFilter{
+ Ty: log.LineMatchEqual,
+ Match: "baz",
+ },
+ IsOrChild: true,
+ }),
+ IsOrChild: false,
+ },
+ },
+ },
+ },
+ {
+ in: `{app="foo"} |> "foo" or "bar" or "baz"`,
+ exp: &PipelineExpr{
+ Left: newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "app", "foo")}),
+ MultiStages: MultiStageExpr{
+ &LineFilterExpr{
+ LineFilter: LineFilter{
+ Ty: log.LineMatchPattern,
+ Match: "foo",
+ },
+ Or: newOrLineFilter(
+ &LineFilterExpr{
+ LineFilter: LineFilter{
+ Ty: log.LineMatchPattern,
+ Match: "bar",
+ },
+ IsOrChild: true,
+ },
+ &LineFilterExpr{
+ LineFilter: LineFilter{
+ Ty: log.LineMatchPattern,
+ Match: "baz",
+ },
+ IsOrChild: true,
+ }),
+ IsOrChild: false,
+ },
+ },
+ },
+ },
}
func TestParse(t *testing.T) {
diff --git a/pkg/logqlmodel/stats/context.go b/pkg/logqlmodel/stats/context.go
index 18794fb137fe8..a0509be31f6d2 100644
--- a/pkg/logqlmodel/stats/context.go
+++ b/pkg/logqlmodel/stats/context.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/dustin/go-humanize"
+
"github.com/go-kit/log"
)
@@ -518,9 +519,12 @@ func (c *Context) getCacheStatsByType(t CacheType) *Cache {
return stats
}
-// Log logs a query statistics result.
-func (r Result) Log(log log.Logger) {
- _ = log.Log(
+func (r Result) Log(logger log.Logger) {
+ logger.Log(r.KVList()...)
+}
+
+func (r Result) KVList() []any {
+ result := []any{
"Ingester.TotalReached", r.Ingester.TotalReached,
"Ingester.TotalChunksMatched", r.Ingester.TotalChunksMatched,
"Ingester.TotalBatches", r.Ingester.TotalBatches,
@@ -549,13 +553,14 @@ func (r Result) Log(log log.Logger) {
"Querier.CompressedBytes", humanize.Bytes(uint64(r.Querier.Store.Chunk.CompressedBytes)),
"Querier.TotalDuplicates", r.Querier.Store.Chunk.TotalDuplicates,
"Querier.QueryReferencedStructuredMetadata", r.Querier.Store.QueryReferencedStructured,
- )
- r.Caches.Log(log)
- r.Summary.Log(log)
+ }
+
+ result = append(result, r.Caches.kvList()...)
+ return append(result, r.Summary.kvList()...)
}
-func (s Summary) Log(log log.Logger) {
- _ = log.Log(
+func (s Summary) kvList() []any {
+ return []any{
"Summary.BytesProcessedPerSecond", humanize.Bytes(uint64(s.BytesProcessedPerSecond)),
"Summary.LinesProcessedPerSecond", s.LinesProcessedPerSecond,
"Summary.TotalBytesProcessed", humanize.Bytes(uint64(s.TotalBytesProcessed)),
@@ -563,11 +568,11 @@ func (s Summary) Log(log log.Logger) {
"Summary.PostFilterLines", s.TotalPostFilterLines,
"Summary.ExecTime", ConvertSecondsToNanoseconds(s.ExecTime),
"Summary.QueueTime", ConvertSecondsToNanoseconds(s.QueueTime),
- )
+ }
}
-func (c Caches) Log(log log.Logger) {
- _ = log.Log(
+func (c Caches) kvList() []any {
+ return []any{
"Cache.Chunk.Requests", c.Chunk.Requests,
"Cache.Chunk.EntriesRequested", c.Chunk.EntriesRequested,
"Cache.Chunk.EntriesFound", c.Chunk.EntriesFound,
@@ -620,5 +625,5 @@ func (c Caches) Log(log log.Logger) {
"Cache.InstantMetricResult.BytesSent", humanize.Bytes(uint64(c.InstantMetricResult.BytesSent)),
"Cache.InstantMetricResult.BytesReceived", humanize.Bytes(uint64(c.InstantMetricResult.BytesReceived)),
"Cache.InstantMetricResult.DownloadTime", c.InstantMetricResult.CacheDownloadTime(),
- )
+ }
}
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 9446b351aab82..0b2f2a3c91058 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -108,6 +108,7 @@ type Config struct {
OperationalConfig runtime.Config `yaml:"operational_config,omitempty"`
Tracing tracing.Config `yaml:"tracing"`
Analytics analytics.Config `yaml:"analytics"`
+ Profiling ProfilingConfig `yaml:"profiling,omitempty"`
LegacyReadTarget bool `yaml:"legacy_read_target,omitempty" doc:"hidden|deprecated"`
@@ -179,6 +180,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.QueryScheduler.RegisterFlags(f)
c.Analytics.RegisterFlags(f)
c.OperationalConfig.RegisterFlags(f)
+ c.Profiling.RegisterFlags(f)
}
func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) {
@@ -331,13 +333,13 @@ type Loki struct {
distributor *distributor.Distributor
Ingester ingester.Interface
PatternIngester *pattern.Ingester
- PatternRingClient *pattern.RingClient
+ PatternRingClient pattern.RingClient
Querier querier.Querier
cacheGenerationLoader queryrangebase.CacheGenNumberLoader
querierAPI *querier.QuerierAPI
ingesterQuerier *querier.IngesterQuerier
Store storage.Store
- BloomStore bloomshipper.StoreWithMetrics
+ BloomStore bloomshipper.Store
tableManager *index.TableManager
frontend Frontend
ruler *base_ruler.Ruler
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 22cd46743ea27..204cecd0ce3ad 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -40,6 +40,7 @@ import (
"github.com/grafana/loki/v3/pkg/analytics"
"github.com/grafana/loki/v3/pkg/bloombuild/builder"
"github.com/grafana/loki/v3/pkg/bloombuild/planner"
+ bloomprotos "github.com/grafana/loki/v3/pkg/bloombuild/protos"
"github.com/grafana/loki/v3/pkg/bloomgateway"
"github.com/grafana/loki/v3/pkg/compactor"
compactorclient "github.com/grafana/loki/v3/pkg/compactor/client"
@@ -79,6 +80,7 @@ import (
"github.com/grafana/loki/v3/pkg/util/httpreq"
"github.com/grafana/loki/v3/pkg/util/limiter"
util_log "github.com/grafana/loki/v3/pkg/util/log"
+ "github.com/grafana/loki/v3/pkg/util/mempool"
"github.com/grafana/loki/v3/pkg/util/querylimits"
lokiring "github.com/grafana/loki/v3/pkg/util/ring"
serverutil "github.com/grafana/loki/v3/pkg/util/server"
@@ -521,6 +523,7 @@ func (t *Loki) initQuerier() (services.Service, error) {
router.Path("/loki/api/v1/index/volume").Methods("GET", "POST").Handler(volumeHTTPMiddleware.Wrap(httpHandler))
router.Path("/loki/api/v1/index/volume_range").Methods("GET", "POST").Handler(volumeRangeHTTPMiddleware.Wrap(httpHandler))
router.Path("/loki/api/v1/patterns").Methods("GET", "POST").Handler(httpHandler)
+ router.Path("/loki/api/v1/explore/query_range").Methods("GET", "POST").Handler(httpHandler)
router.Path("/api/prom/query").Methods("GET", "POST").Handler(
middleware.Merge(
@@ -587,7 +590,7 @@ func (t *Loki) initIngester() (_ services.Service, err error) {
level.Warn(util_log.Logger).Log("msg", "The config setting shutdown marker path is not set. The /ingester/prepare_shutdown endpoint won't work")
}
- t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker)
+ t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker, t.ring)
if err != nil {
return
}
@@ -712,9 +715,9 @@ func (t *Loki) initStore() (services.Service, error) {
}
func (t *Loki) initBloomStore() (services.Service, error) {
- // BloomStore is a dependency of IndexGateway, even when the BloomGateway is not enabled.
- // Do not instantiate store and do not create a service.
- if !t.Cfg.BloomGateway.Enabled {
+ // BloomStore is a dependency of IndexGateway and Bloom Planner & Builder.
+ // Do not instantiate store and do not create a service if neither ar enabled.
+ if !t.Cfg.BloomGateway.Enabled && !t.Cfg.BloomBuild.Enabled {
return nil, nil
}
@@ -753,7 +756,24 @@ func (t *Loki) initBloomStore() (services.Service, error) {
level.Warn(logger).Log("msg", "failed to preload blocks cache", "err", err)
}
- t.BloomStore, err = bloomshipper.NewBloomStore(t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.ClientMetrics, metasCache, blocksCache, reg, logger)
+ var pageAllocator mempool.Allocator
+
+ // Set global BloomPageAllocator variable
+ switch bsCfg.MemoryManagement.BloomPageAllocationType {
+ case "simple":
+ pageAllocator = &mempool.SimpleHeapAllocator{}
+ case "dynamic":
+ // sync buffer pool for bloom pages
+ // 128KB 256KB 512KB 1MB 2MB 4MB 8MB 16MB 32MB 64MB 128MB
+ pageAllocator = mempool.NewBytePoolAllocator(128<<10, 128<<20, 2)
+ case "fixed":
+ pageAllocator = mempool.New("bloom-page-pool", bsCfg.MemoryManagement.BloomPageMemPoolBuckets, reg)
+ default:
+ // should not happen as the type is validated upfront
+ return nil, fmt.Errorf("failed to create bloom store: invalid allocator type")
+ }
+
+ t.BloomStore, err = bloomshipper.NewBloomStore(t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.ClientMetrics, metasCache, blocksCache, pageAllocator, reg, logger)
if err != nil {
return nil, fmt.Errorf("failed to create bloom store: %w", err)
}
@@ -1105,6 +1125,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
t.Server.HTTP.Path("/loki/api/v1/label/{name}/values").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/series").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/patterns").Methods("GET", "POST").Handler(frontendHandler)
+ t.Server.HTTP.Path("/loki/api/v1/explore/query_range").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/detected_labels").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/detected_fields").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/index/stats").Methods("GET", "POST").Handler(frontendHandler)
@@ -1564,7 +1585,7 @@ func (t *Loki) initBloomPlanner() (services.Service, error) {
logger := log.With(util_log.Logger, "component", "bloom-planner")
- return planner.New(
+ p, err := planner.New(
t.Cfg.BloomBuild.Planner,
t.Overrides,
t.Cfg.SchemaConfig,
@@ -1574,6 +1595,12 @@ func (t *Loki) initBloomPlanner() (services.Service, error) {
logger,
prometheus.DefaultRegisterer,
)
+ if err != nil {
+ return nil, err
+ }
+
+ bloomprotos.RegisterPlannerForBuilderServer(t.Server.GRPC, p)
+ return p, nil
}
func (t *Loki) initBloomBuilder() (services.Service, error) {
@@ -1581,7 +1608,7 @@ func (t *Loki) initBloomBuilder() (services.Service, error) {
return nil, nil
}
- logger := log.With(util_log.Logger, "component", "bloom-worker")
+ logger := log.With(util_log.Logger, "component", "bloom-builder")
return builder.New(
t.Cfg.BloomBuild.Builder,
diff --git a/pkg/loki/profiling_config.go b/pkg/loki/profiling_config.go
new file mode 100644
index 0000000000000..30162f2b00bd0
--- /dev/null
+++ b/pkg/loki/profiling_config.go
@@ -0,0 +1,21 @@
+package loki
+
+import "flag"
+
+type ProfilingConfig struct {
+ BlockProfileRate int `yaml:"block_profile_rate"`
+ CPUProfileRate int `yaml:"cpu_profile_rate"`
+ MutexProfileFraction int `yaml:"mutex_profile_fraction"`
+}
+
+// RegisterFlags registers flag.
+func (c *ProfilingConfig) RegisterFlags(f *flag.FlagSet) {
+ c.RegisterFlagsWithPrefix("profiling.", f)
+}
+
+// RegisterFlagsWithPrefix registers flag with a common prefix.
+func (c *ProfilingConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.IntVar(&c.BlockProfileRate, prefix+"block-profile-rate", 0, "Sets the value for runtime.SetBlockProfilingRate")
+ f.IntVar(&c.CPUProfileRate, prefix+"cpu-profile-rate", 0, "Sets the value for runtime.SetCPUProfileRate")
+ f.IntVar(&c.MutexProfileFraction, prefix+"mutex-profile-fraction", 0, "Sets the value for runtime.SetMutexProfileFraction")
+}
diff --git a/pkg/loki/runtime_config_test.go b/pkg/loki/runtime_config_test.go
index 36126841dc47b..81081a856ca2b 100644
--- a/pkg/loki/runtime_config_test.go
+++ b/pkg/loki/runtime_config_test.go
@@ -91,8 +91,12 @@ configs:
"1":
log_push_request: false
limited_log_push_errors: false
+ log_duplicate_metrics: false
+ log_duplicate_stream_info: false
"2":
log_push_request: true
+ log_duplicate_metrics: true
+ log_duplicate_stream_info: true
`)
tenantConfigs, err := runtime.NewTenantConfigs(runtimeGetter)
@@ -104,6 +108,12 @@ configs:
require.Equal(t, true, tenantConfigs.LogPushRequest("2"))
require.Equal(t, true, tenantConfigs.LimitedLogPushErrors("3"))
require.Equal(t, false, tenantConfigs.LogPushRequest("3"))
+ require.Equal(t, false, tenantConfigs.LogDuplicateMetrics("1"))
+ require.Equal(t, true, tenantConfigs.LogDuplicateMetrics("2"))
+ require.Equal(t, false, tenantConfigs.LogDuplicateMetrics("3"))
+ require.Equal(t, false, tenantConfigs.LogDuplicateStreamInfo("1"))
+ require.Equal(t, true, tenantConfigs.LogDuplicateStreamInfo("2"))
+ require.Equal(t, false, tenantConfigs.LogDuplicateStreamInfo("3"))
}
func newTestRuntimeconfig(t *testing.T, yaml string) runtime.TenantConfigProvider {
diff --git a/pkg/pattern/chunk/util.go b/pkg/pattern/chunk/util.go
new file mode 100644
index 0000000000000..8cbde3fb0474b
--- /dev/null
+++ b/pkg/pattern/chunk/util.go
@@ -0,0 +1,14 @@
+package chunk
+
+import (
+ "time"
+
+ "github.com/prometheus/common/model"
+)
+
+const (
+ TimeResolution = model.Time(int64(time.Second*10) / 1e6)
+ MaxChunkTime = 1 * time.Hour
+)
+
+func TruncateTimestamp(ts, step model.Time) model.Time { return ts - ts%step }
diff --git a/pkg/pattern/drain/chunk.go b/pkg/pattern/drain/chunk.go
index 1333299467585..cf5192bf8b63e 100644
--- a/pkg/pattern/drain/chunk.go
+++ b/pkg/pattern/drain/chunk.go
@@ -7,15 +7,12 @@ import (
"github.com/prometheus/common/model"
"github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/pattern/chunk"
"github.com/grafana/loki/v3/pkg/pattern/iter"
)
const (
- TimeResolution = model.Time(int64(time.Second*10) / 1e6)
-
defaultVolumeSize = 500
-
- maxChunkTime = 1 * time.Hour
)
type Chunks []Chunk
@@ -25,7 +22,7 @@ type Chunk struct {
}
func newChunk(ts model.Time) Chunk {
- maxSize := int(maxChunkTime.Nanoseconds()/TimeResolution.UnixNano()) + 1
+ maxSize := int(chunk.MaxChunkTime.Nanoseconds()/chunk.TimeResolution.UnixNano()) + 1
v := Chunk{Samples: make([]logproto.PatternSample, 1, maxSize)}
v.Samples[0] = logproto.PatternSample{
Timestamp: ts,
@@ -39,7 +36,7 @@ func (c Chunk) spaceFor(ts model.Time) bool {
return true
}
- return ts.Sub(c.Samples[0].Timestamp) < maxChunkTime
+ return ts.Sub(c.Samples[0].Timestamp) < chunk.MaxChunkTime
}
// ForRange returns samples with only the values
@@ -71,12 +68,12 @@ func (c Chunk) ForRange(start, end, step model.Time) []logproto.PatternSample {
return nil
}
- if step == TimeResolution {
+ if step == chunk.TimeResolution {
return c.Samples[lo:hi]
}
// Re-scale samples into step-sized buckets
- currentStep := truncateTimestamp(c.Samples[lo].Timestamp, step)
+ currentStep := chunk.TruncateTimestamp(c.Samples[lo].Timestamp, step)
aggregatedSamples := make([]logproto.PatternSample, 0, ((c.Samples[hi-1].Timestamp-currentStep)/step)+1)
aggregatedSamples = append(aggregatedSamples, logproto.PatternSample{
Timestamp: currentStep,
@@ -84,7 +81,7 @@ func (c Chunk) ForRange(start, end, step model.Time) []logproto.PatternSample {
})
for _, sample := range c.Samples[lo:hi] {
if sample.Timestamp >= currentStep+step {
- stepForSample := truncateTimestamp(sample.Timestamp, step)
+ stepForSample := chunk.TruncateTimestamp(sample.Timestamp, step)
for i := currentStep + step; i <= stepForSample; i += step {
aggregatedSamples = append(aggregatedSamples, logproto.PatternSample{
Timestamp: i,
@@ -100,7 +97,7 @@ func (c Chunk) ForRange(start, end, step model.Time) []logproto.PatternSample {
}
func (c *Chunks) Add(ts model.Time) {
- t := truncateTimestamp(ts, TimeResolution)
+ t := chunk.TruncateTimestamp(ts, chunk.TimeResolution)
if len(*c) == 0 {
*c = append(*c, newChunk(t))
@@ -205,5 +202,3 @@ func (c *Chunks) size() int {
}
return size
}
-
-func truncateTimestamp(ts, step model.Time) model.Time { return ts - ts%step }
diff --git a/pkg/pattern/drain/chunk_test.go b/pkg/pattern/drain/chunk_test.go
index 17429da594e19..7f3cb4066b2c5 100644
--- a/pkg/pattern/drain/chunk_test.go
+++ b/pkg/pattern/drain/chunk_test.go
@@ -9,31 +9,32 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/pattern/chunk"
)
func TestAdd(t *testing.T) {
cks := Chunks{}
- cks.Add(TimeResolution + 1)
- cks.Add(TimeResolution + 2)
- cks.Add(2*TimeResolution + 1)
+ cks.Add(chunk.TimeResolution + 1)
+ cks.Add(chunk.TimeResolution + 2)
+ cks.Add(2*chunk.TimeResolution + 1)
require.Equal(t, 1, len(cks))
require.Equal(t, 2, len(cks[0].Samples))
- cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) + TimeResolution + 1)
+ cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) + chunk.TimeResolution + 1)
require.Equal(t, 2, len(cks))
require.Equal(t, 1, len(cks[1].Samples))
- cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) - TimeResolution)
+ cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) - chunk.TimeResolution)
require.Equal(t, 2, len(cks))
require.Equalf(t, 1, len(cks[1].Samples), "Older samples should not be added if they arrive out of order")
}
func TestIterator(t *testing.T) {
cks := Chunks{}
- cks.Add(TimeResolution + 1)
- cks.Add(TimeResolution + 2)
- cks.Add(2*TimeResolution + 1)
- cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) + TimeResolution + 1)
+ cks.Add(chunk.TimeResolution + 1)
+ cks.Add(chunk.TimeResolution + 2)
+ cks.Add(2*chunk.TimeResolution + 1)
+ cks.Add(model.TimeFromUnixNano(time.Hour.Nanoseconds()) + chunk.TimeResolution + 1)
- it := cks.Iterator("test", model.Time(0), model.Time(time.Hour.Nanoseconds()), TimeResolution)
+ it := cks.Iterator("test", model.Time(0), model.Time(time.Hour.Nanoseconds()), chunk.TimeResolution)
require.NotNil(t, it)
var samples []logproto.PatternSample
diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go
index 4d7c52bebf0c6..d783e9d1919e3 100644
--- a/pkg/pattern/drain/drain.go
+++ b/pkg/pattern/drain/drain.go
@@ -25,7 +25,9 @@ package drain
import (
"math"
"strconv"
+ "strings"
"unicode"
+ "unsafe"
"github.com/hashicorp/golang-lru/v2/simplelru"
"github.com/prometheus/common/model"
@@ -139,7 +141,7 @@ func DefaultConfig() *Config {
// MaxClusterDepth and SimTh, the less the chance that there will be
// "similar" clusters, but the greater the footprint.
SimTh: 0.3,
- MaxChildren: 100,
+ MaxChildren: 15,
ParamString: `<_>`,
MaxClusters: 300,
}
@@ -156,22 +158,24 @@ func New(config *Config, metrics *Metrics) *Drain {
}
d := &Drain{
- config: config,
- rootNode: createNode(),
- idToCluster: createLogClusterCache(config.MaxClusters, evictFn),
- metrics: metrics,
- tokenizer: splittingTokenizer{}, // Default to this for now
+ config: config,
+ rootNode: createNode(),
+ idToCluster: createLogClusterCache(config.MaxClusters, evictFn),
+ metrics: metrics,
+ tokenizer: newPunctuationTokenizer(),
+ maxAllowedLineLength: 3000,
}
return d
}
type Drain struct {
- config *Config
- rootNode *Node
- idToCluster *LogClusterCache
- clustersCounter int
- metrics *Metrics
- tokenizer LineTokenizer
+ config *Config
+ rootNode *Node
+ idToCluster *LogClusterCache
+ clustersCounter int
+ metrics *Metrics
+ tokenizer LineTokenizer
+ maxAllowedLineLength int
}
func (d *Drain) Clusters() []*LogCluster {
@@ -183,10 +187,14 @@ func (d *Drain) TrainTokens(tokens []string, stringer func([]string) string, ts
}
func (d *Drain) Train(content string, ts int64) *LogCluster {
- return d.train(d.tokenizer.Tokenize(content), d.tokenizer.Join, ts)
+ if len(content) > d.maxAllowedLineLength {
+ return nil
+ }
+ tokens, state := d.tokenizer.Tokenize(content)
+ return d.train(tokens, state, ts)
}
-func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64) *LogCluster {
+func (d *Drain) train(tokens []string, state interface{}, ts int64) *LogCluster {
if len(tokens) < 4 {
return nil
}
@@ -196,11 +204,12 @@ func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64)
d.clustersCounter++
clusterID := d.clustersCounter
matchCluster = &LogCluster{
- Tokens: tokens,
- id: clusterID,
- Size: 1,
- Stringer: stringer,
- Chunks: Chunks{},
+ Tokens: tokens,
+ TokenState: state,
+ id: clusterID,
+ Size: 1,
+ Stringer: d.tokenizer.Join,
+ Chunks: Chunks{},
}
matchCluster.append(model.TimeFromUnixNano(ts))
d.idToCluster.Set(clusterID, matchCluster)
@@ -219,15 +228,16 @@ func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64)
}
func (d *Drain) TrainPattern(content string, samples []*logproto.PatternSample) *LogCluster {
- tokens := deduplicatePlaceholders(d.tokenizer.Tokenize(content), d.config.ParamString)
+ tokens, state := d.tokenizer.Tokenize(content)
matchCluster := d.treeSearch(d.rootNode, tokens, d.config.SimTh, true)
// Match no existing log cluster
if matchCluster == nil {
d.clustersCounter++
clusterID := d.clustersCounter
matchCluster = &LogCluster{
- Tokens: tokens,
- id: clusterID,
+ Tokens: tokens,
+ TokenState: state,
+ id: clusterID,
}
d.idToCluster.Set(clusterID, matchCluster)
d.addSeqToPrefixTree(d.rootNode, matchCluster)
@@ -241,37 +251,67 @@ func (d *Drain) TrainPattern(content string, samples []*logproto.PatternSample)
return matchCluster
}
-func deduplicatePlaceholders(tokens []string, param string) []string {
- if len(tokens) < 2 {
- return tokens
+func deduplicatePlaceholders(line string, placeholder string) string {
+ first := strings.Index(line, "<_><_>")
+ if first == -1 {
+ return line
}
- i := 1
- for k := 1; k < len(tokens); k++ {
- if tokens[k] != param || tokens[k] != tokens[k-1] {
- if i != k {
- tokens[i] = tokens[k]
+ builder := make([]byte, 0, len(line))
+ low := 0
+ for i := first; i < len(line)-5; i++ {
+ if line[i:i+len(placeholder)] == placeholder {
+ high := i + 3
+ for ; high < len(line)-2; high += 3 {
+ if line[high:high+len(placeholder)] != placeholder {
+ break
+ }
}
- i++
+ builder = append(builder, line[low:i+len(placeholder)]...)
+ low = high
+ i = high
}
}
- return tokens[:i]
+ builder = append(builder, line[low:]...)
+
+ return unsafe.String(unsafe.SliceData(builder), len(builder))
}
func (d *Drain) PatternString(c *LogCluster) string {
- s := d.tokenizer.Join(deduplicatePlaceholders(c.Tokens, d.config.ParamString))
+ s := deduplicatePlaceholders(d.tokenizer.Join(c.Tokens, c.TokenState), d.config.ParamString)
if s == d.config.ParamString {
return ""
}
return s
}
+func (d *Drain) Prune() {
+ d.pruneTree(d.rootNode)
+}
+
+func (d *Drain) pruneTree(node *Node) int {
+ for key, child := range node.keyToChildNode {
+ if d.pruneTree(child) == 0 {
+ delete(node.keyToChildNode, key)
+ }
+ }
+
+ validClusterIds := 0
+ for _, clusterID := range node.clusterIDs {
+ cluster := d.idToCluster.Get(clusterID)
+ if cluster != nil {
+ validClusterIds++
+ }
+ }
+ return len(node.keyToChildNode) + validClusterIds
+}
+
func (d *Drain) Delete(cluster *LogCluster) {
d.idToCluster.cache.Remove(cluster.id)
}
// Match against an already existing cluster. Match shall be perfect (sim_th=1.0). New cluster will not be created as a result of this call, nor any cluster modifications.
func (d *Drain) Match(content string) *LogCluster {
- contentTokens := d.tokenizer.Tokenize(content)
+ contentTokens, _ := d.tokenizer.Tokenize(content)
matchCluster := d.treeSearch(d.rootNode, contentTokens, 1.0, true)
return matchCluster
}
@@ -413,6 +453,7 @@ func (d *Drain) addSeqToPrefixTree(rootNode *Node, cluster *LogCluster) {
// if token not matched in this layer of existing tree.
if _, ok = curNode.keyToChildNode[token]; !ok {
if !d.hasNumbers(token) {
+ // Numbers in token: Prioritize the param string path
if _, ok = curNode.keyToChildNode[d.config.ParamString]; ok {
if len(curNode.keyToChildNode) < d.config.MaxChildren {
newNode := createNode()
@@ -435,6 +476,7 @@ func (d *Drain) addSeqToPrefixTree(rootNode *Node, cluster *LogCluster) {
}
}
} else {
+ // No numbers, use the key as-is to traverse
if _, ok = curNode.keyToChildNode[d.config.ParamString]; !ok {
newNode := createNode()
curNode.keyToChildNode[d.config.ParamString] = newNode
diff --git a/pkg/pattern/drain/drain_benchmark_test.go b/pkg/pattern/drain/drain_benchmark_test.go
index e03770f613c04..35ec024af138e 100644
--- a/pkg/pattern/drain/drain_benchmark_test.go
+++ b/pkg/pattern/drain/drain_benchmark_test.go
@@ -39,8 +39,8 @@ func BenchmarkDrain_TrainExtractsPatterns(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
+ drain := New(DefaultConfig(), nil)
for _, line := range lines {
- drain := New(DefaultConfig(), nil)
drain.Train(line, 0)
}
}
diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go
index cc16f0b7fd64c..690db7da29ee8 100644
--- a/pkg/pattern/drain/drain_test.go
+++ b/pkg/pattern/drain/drain_test.go
@@ -4,7 +4,9 @@ import (
"bufio"
"fmt"
"os"
+ "strings"
"testing"
+ "time"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
@@ -27,34 +29,34 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
drain: New(DefaultConfig(), nil),
inputFile: `testdata/agent-logfmt.txt`,
patterns: []string{
- `ts=2024-04-16T15:10:42.556278698Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/grafana/*.log:{app=\"grafana\", conprof=\"true\", container=\"grafana\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`,
- `ts=2024-04-16T15:10:42.556706613Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hgrun/*.log:{app=\"grafana\", conprof=\"true\", container=\"hgrun\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`,
- `ts=2024-04-16T15:10:42.556930066Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hg-plugins/*.log:{app=\"grafana\", conprof=\"true\", container=\"hg-plugins\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`,
- `ts=2024-04-16T15:10:42.557102408Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hosted-grafana-security/*.log:{app=\"grafana\", conprof=\"true\", container=\"hosted-grafana-security\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`,
+ `ts=2024-04-16T15:10:42.<_> level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.<_> duration=<_>.<_>`,
`ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", batch_kubernetes_io_job_name=\"testcoordinator-job-2665838\", container=\"testcoordinator\", controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", job=\"k6-cloud/testcoordinator\", job_name=\"testcoordinator-job-2665838\", name=\"testcoordinator\", namespace=\"k6-cloud\", pod=\"testcoordinator-job-2665838-9g8ds\"}"`,
- `ts=2024-04-16T15:10:43.551543875Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*35649bfd-52ff-4281-9294-5f65fd5a89fc/marketplaces-api/*.log:{container=\"marketplaces-api\", job=\"grafana-com/marketplaces-api\", name=\"marketplaces-api\", namespace=\"grafana-com\", pod=\"marketplaces-api-f67ff7567-gqrvb\", pod_template_hash=\"f67ff7567\"}"`,
- `ts=<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget:watcher closed, tailer stopped, positions saved" path=<_>`,
- `ts=<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=<_>`,
- `ts=<_> caller=filetarget.go:326 level=info component=logs logs_config=default msg="removing directory from watcher" directory=<_>`,
- `ts=<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=<_> op=CREATE`,
- `ts=<_> caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key=<_> \"kube-proxy\", container=\"kube-proxy\", job=<_> namespace=\"kube-system\", pod=\"kube-proxy-gke-ops-us-east-0-main-n2s32-1-1dd39c-32ae1dde-hmhw\", tier=\"node\"}"`,
- `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"grafana\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`,
- `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"hg-plugins\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`,
- `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"hgrun\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`,
- `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"hosted-grafana-security\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`,
- `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file <_> ..."`,
- `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked <_> - &{Offset:0 Whence:0}"`,
- `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened <_>`,
- `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for <_> to appear..."`,
- `ts=<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="bufio.Scanner:token too long"`,
- `ts=<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="logfmt syntax error at pos <_> on line 1:unexpected '\"'"`,
- `ts=<_> caller=tailer.go:118 level=info component=logs logs_config=default component=tailer msg="position timer:exited" path=<_>`,
- `ts=<_> caller=tailer.go:147 level=info component=logs logs_config=default component=tailer msg="tail routine:started" path=<_>`,
- `ts=<_> caller=tailer.go:155 level=info component=logs logs_config=default component=tailer msg="tail routine:exited" path=<_>`,
- `ts=<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg="tail routine:tail channel closed, stopping tailer" path=<_> reason=null`,
- `ts=<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg="skipping update of position for a file which does not currently exist" path=<_>`,
- `ts=<_> caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=<_>`,
- `ts=<_> level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=<_> duration=<_>`,
+ `ts=2024-04-16T15:10:43.551782223Z caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=/var/log/pods/grafana-com_marketplaces-api-f67ff7567-gqrvb_35649bfd-52ff-4281-9294-5f65fd5a89fc/marketplaces-api/0.log`,
+ `ts=2024-04-16T15:10:43.<_> caller=filetargetmanager.go:<_> level=info component=logs logs_config=default msg="<_> target" key="/var/log/pods/*<_>/<_>/*.log:{<_>=\"<_>\", <_>=\"<_><_><_><_><_><_> <_><_><_><_><_>\", namespace=\"<_>\", pod=\"<_>\", <_>=\"<_>\"}"`,
+ `ts=2024-04-16T15:10:43.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_><_> <_> <_> <_><_> <_> <_><_> <_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_> <_><_><_>`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget: watcher closed, tailer stopped, positions saved" path=/var/log/pods/*<_>/<_>/*.log`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=/var/log/pods/<_>/<_>`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=/var/log/pods/hosted-grafana_.<_>/<_>`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:326 level=info component=logs logs_config=default msg="removing directory from watcher" directory=/var/log/pods/hosted-grafana_.<_>/<_>`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/<_>/<_>/<_>.log op=CREATE`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/<_><_><_>/<_><_><_>.<_> op=CREATE`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/<_><_><_>/<_><_><_>.<_>.<_> op=CREATE`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/hosted-grafana_.<_>/<_>/0.log.<_>.<_> op=CREATE`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:<_> level=info component=logs logs_config=default msg="<_> target" key="/var/log/pods/*<_>/<_>/*.log:{app=\"grafana\", conprof=\"true\", container=\"<_>\", instanceId=\"<_>\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"<_>\", plan=\"free\", pod=\"<_>\", pod_template_hash=\"<_>\", resource_version=\"<_>\", slug=\"<_>\", stackId=\"<_>\"}"`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file /var/log/pods/<_>/<_>/<_>.log ..."`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file /var/log/pods/hosted-grafana_.<_>/<_>/0.log ..."`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked /var/log/pods/<_>/<_>/0.log - &{Offset:0 Whence:0}"`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked /var/log/pods/hosted-grafana_.<_>/<_>/0.log - &{Offset:0 Whence:0}"`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened /var/log/pods/<_>/<_>/<_>.log"`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened /var/log/pods/hosted-grafana_.<_>/<_>/0.log"`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for /var/log/pods/<_>/<_>/0.log to appear..."`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for /var/log/pods/hosted-grafana_.<_>/<_>/0.log to appear..."`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="bufio.Scanner: token too long"`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="logfmt syntax error at pos <_> on line 1: unexpected '\"'"`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=/var/log/pods/hosted-grafana_.<_>/<_>/0.log`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_>: <_>" path=/var/log/pods/<_>/<_>/0.log`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_>: <_>" path=/var/log/pods/hosted-grafana_.<_>/<_>/0.log`,
+ `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_><_> <_> <_> <_><_> <_> <_><_> <_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_> <_><_><_>`,
},
},
{
@@ -62,126 +64,103 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
inputFile: `testdata/ingester-logfmt.txt`,
patterns: []string{
`ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg="GET /debug/pprof/delta_mutex (200) 1.161082ms"`,
- `ts=<_> caller=head.go:216 level=debug tenant=987678 msg="profile is empty after delta computation" metricName=memory`,
- `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /ingester.v1.IngesterService/Push (200) <_>`,
+ `ts=2024-04-17T09:52:46.<_> caller=head.go:216 level=debug tenant=987678 msg="profile is empty after delta computation" metricName=memory`,
+ `ts=2024-04-17T09:52:46.<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /ingester.v1.IngesterService/Push (200) <_>.<_>"`,
},
},
{
drain: New(DefaultConfig(), nil),
inputFile: `testdata/drone-json.txt`,
patterns: []string{
- `{"duration":<_> "debug","method":"GET","msg":"request completed","referer":"","remote":"10.136.105.40:52702","request":"/metrics","status":200,"time":<_> <_> <_> "GrafanaAgent/v0.40.3 (flow; linux; helm)"}`,
- `{"id":<_> "debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":<_> <_> <_>`,
- `{"id":<_> "debug","msg":"calculate server capacity","time":<_> <_> <_>`,
- `{"id":<_> "debug","msg":"calculate unfinished jobs","time":<_> <_> <_>`,
- `{"id":<_> "debug","msg":"check capacity complete","time":<_> <_> <_>`,
- `{"id":<_> "debug","msg":"no capacity changes required","time":<_> <_> <_>`,
+ `{"duration":<_>,"level":"debug","method":"GET","msg":"request completed","referer":"","remote":"10.136.105.40:52702","request":"/metrics","status":200,"time":"<_>:<_>:<_>","user-agent":"GrafanaAgent/v0.40.3 (flow; linux; helm)"}`,
+ `{"id":"<_>","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"<_>:<_>:<_>"}`,
+ `{"id":"<_>","level":"debug","msg":"calculate server capacity","time":"<_>:<_>:<_>"}`,
+ `{"id":"<_>","level":"debug","msg":"calculate unfinished jobs","time":"<_>:<_>:<_>"}`,
+ `{"id":"<_>","level":"debug","msg":"check capacity complete","time":"<_>:<_>:<_>"}`,
+ `{"id":"<_>","level":"debug","msg":"no capacity changes required","time":"<_>:<_>:<_>"}`,
},
},
{
drain: New(DefaultConfig(), nil),
inputFile: "testdata/distributor-logfmt.txt",
patterns: []string{
- `ts=2024-05-02T12:17:22.115385619Z caller=http.go:194 level=debug traceID=7836a12bb7f1964e orgID=75 msg="POST /ingest?aggregationType=sum&from=1714652227107641016&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=100&spyName=gospy&units=samples&until=1714652242109516917 (200) 1.562143ms"`,
- `ts=2024-05-02T12:17:22.242343806Z caller=http.go:194 level=debug traceID=404c6a83a18e66a4 orgID=75 msg="POST /ingest?aggregationType=average&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=0&spyName=gospy&units=goroutines&until=1714652242232506798 (200) 2.902485ms"`,
- `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=<_> 0&spyName=scrape&units=samples&until=1714652240 (200) <_>`,
- `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=<_> gospy&units=&until=1714652242232506798 (200) <_>`,
- `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /push.v1.PusherService/Push <_> <_>`,
+ `ts=2024-05-02T12:17:22.851228301Z caller=http.go:194 level=debug traceID=1e1fe5ba1756bc38 orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=flamegraph.com%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%3D10.0.11.146%3A8001%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%3Dflamegraph-com-backend-79c858c7bf-jw2hn%2Cpod_template_hash%3D79c858c7bf%2Cpyroscope_tenant%3Dpyroscope%2Ctier%3Dbackend%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) 22.345191ms"`,
+ `ts=2024-05-02T12:17:22.<_> caller=http.go:194 level=debug traceID=<_> orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=<_>&spyName=gospy&units=&until=1714652242232506798 (200) <_>.<_>"`,
+ `ts=2024-05-02T12:17:22.<_> caller=http.go:194 level=debug traceID=<_> orgID=75 msg="POST /ingest?aggregationType=<_>&from=<_>&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=<_>&spyName=gospy&units=<_>&until=<_> (200) <_>.<_>"`,
+ `ts=2024-05-02T12:17:<_>.<_> caller=http.go:194 level=debug traceID=<_> orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=flamegraph.com.frontend%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%3D10.0.9.115%3A9091%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%3Dflamegraph-com-frontend-6fb87f8785-pd87k%2Cpod_template_hash%3D6fb87f8785%2Cpyroscope_tenant%3Dpyroscope%2Ctier%3Dfrontend%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) <_>.<_>"`,
+ `ts=2024-05-02T12:17:<_>.<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /push.v1.PusherService/Push (<_>) <_>.<_>"`,
},
},
{
drain: New(DefaultConfig(), nil),
inputFile: "testdata/journald.txt",
patterns: []string{
- ` exec /bin/hgrun -log.level=debug launch -bundledPluginsManifest /proc/$(pidof plugins-pause)/root/manifest.json -bundledPluginsDir /proc/$(pidof plugins-pause)/root/plugins],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:10000,Protocol:TCP,HostIP:,},ContainerPort{Name:profiling,HostPort:0,ContainerPort:6060,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:HG_API,Value:http://hosted-grafana-api,ValueFrom:nil,},EnvVar{Name:HG_INSTANCE_SLUG,Value:<_> nil,},EnvVar{Name:HG_INSTANCE_SECRET,Value:<_> nil,},EnvVar{Name:EXTRA_OPTIONS,Value:-profile -profile-port=6060 -profile-addr=0.0.0.0,ValueFrom:nil,},EnvVar{Name:HG_CREATE_TIME_MS,Value:<_> nil,},EnvVar{Name:HG_PULL_POLICY,Value:Always,ValueFrom:nil,},EnvVar{Name:HG_START_REASON,Value:active,ValueFrom:nil,},EnvVar{Name:HGRUN_SECURE_PLUGINS,Value:false,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_RUNNER_ROOT_CA,Value:false,ValueFrom:nil,},EnvVar{Name:OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,Value:http://jaeger-agent.jaeger.svc.cluster.local:4317,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_PARAM,Value:1,ValueFrom:nil,},EnvVar{Name:OTEL_RESOURCE_ATTRIBUTES,Value:cluster=dev-us-central-0,namespace=hosted-grafana,ValueFrom:nil,},EnvVar{Name:HG_PROBE_PATH,Value:/api/health,ValueFrom:nil,},EnvVar{Name:HGRUN_EXIT_ON_PLUGIN_FAIL,Value:true,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_RETRIES,Value:2,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_CONCURRENCY,Value:1,ValueFrom:nil,},EnvVar{Name:HGRUN_LAUNCH_TIMEOUT,Value:3m0s,ValueFrom:nil,},EnvVar{Name:GOMEMLIMIT,Value:429496730,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{26 -3} {} 26m DecimalSI},memory: {{293601280 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/api/health,Port:{0 80 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:10,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/bin/hgrun check],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/hgrun drain -timeout 1m0s -waitTime 55s],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[SYS_PTRACE],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImagePull: [rpc error: code =NotFound desc =failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found, failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden]`,
` ln --force -s /proc/$(pidof hgrun-pause)/root/bin/hgrun /bin/hgrun;`,
` while [ "$(pidof plugins-pause)" = "" ]; do sleep 0.5; done;`,
` ts=2024-05-07T11:59:32.025687537Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request`,
- ` ts=2024-05-07T11:59:<_> level=error caller=http_client.go:56 app=hgrun <_> msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health`,
+ ` ts=2024-05-07T11:59:<_>.<_> level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.<_> msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health`,
`2024-05-07T11:59:43.484606Z INFO ExtHandler ExtHandler Downloading agent manifest`,
- `2024-05-07T11:59:<_> INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript`,
- `<_> Consumed <_> CPU time.`,
- `<_> Deactivated successfully.`,
+ `2024-05-07T11:59:<_>.<_> INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript`,
+ `<_>.scope: Consumed <_>.<_> CPU time.`,
+ `<_>.scope: Deactivated successfully.`,
`AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=<_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`,
- `E0507 11:59:29.725681 3089 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"azure-resourcemanager-exporter\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=azure-resourcemanager-exporter pod=azure-resourcemanager-exporter-6b5b58c666-rsttd_infra-exporters(5a95f801-309c-4f33-864a-406262c6ece6)\"" pod="infra-exporters/azure-resourcemanager-exporter-6b5b58c666-rsttd" podUID="5a95f801-309c-4f33-864a-406262c6ece6"`,
- `E0507 11:59:31.554203 4531 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"frontend\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=frontend pod=otel-demo-alt-dev-frontend-79ccf98858-mbj4x_otel-demo-alt(d08e620e-00d0-49f1-a195-820a62e8de8f)\"" pod="otel-demo-alt/otel-demo-alt-dev-frontend-79ccf98858-mbj4x" podUID="d08e620e-00d0-49f1-a195-820a62e8de8f"`,
- `E0507 11:59:31.928148 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[terraform-drift-detector-data], unattached volumes=[terraform-drift-detector-data], failed to process volumes=[]:context deadline exceeded" pod="terraform-drift-detector/terraform-drift-detector-d68b4c545-jg2vj" podUID="6c607496-ef26-454e-b2f2-4cb75b233fa3"`,
- `E0507 11:59:34.856101 4727 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana-render-security\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-security:0.1.181\\\"\"" pod="integration/grafana-render-service-cbff479fc-cj9tp" podUID="0e3114d1-2f3a-49d6-a71d-dbc75050d8e0"`,
+ `E0507 11:59:31.928148 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[terraform-drift-detector-data], unattached volumes=[terraform-drift-detector-data], failed to process volumes=[]: context deadline exceeded" pod="terraform-drift-detector/terraform-drift-detector-d68b4c545-jg2vj" podUID="6c607496-ef26-454e-b2f2-4cb75b233fa3"`,
`E0507 11:59:34.923938 3027 kuberuntime_manager.go:1261] container &Container{Name:mysqld-exporter,Image:prom/mysqld-exporter:v0.13.0,Command:[],Args:[--collect.info_schema.innodb_metrics],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:MYSQL_USER,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:username,Optional:nil,},},},EnvVar{Name:MYSQL_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:password,Optional:nil,},},},EnvVar{Name:MYSQL_HOST,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:endpoint,Optional:nil,},},},EnvVar{Name:MYSQL_PORT,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:port,Optional:nil,},},},EnvVar{Name:MYSQL_TLS_MODE,Value:preferred,ValueFrom:nil,},EnvVar{Name:DATA_SOURCE_NAME,Value:$(MYSQL_USER):$(MYSQL_PASSWORD)@tcp($(MYSQL_HOST):$(MYSQL_PORT))/?tls=$(MYSQL_TLS_MODE),ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzx7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod testcrossplane-exporter-c67cfc58f-vbzl4_crossplane-playground(3d49134d-3378-4ec3-824c-5ff4ea2590a5): CreateContainerConfigError: secret "testcrossplane-user-exporter" not found`,
- `E0507 11:59:34.923984 3027 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysqld-exporter\" with CreateContainerConfigError: \"secret \\\"testcrossplane-user-exporter\\\" not found\"" pod="crossplane-playground/testcrossplane-exporter-c67cfc58f-vbzl4" podUID="3d49134d-3378-4ec3-824c-5ff4ea2590a5"`,
- `E0507 11:59:35.928465 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[custom-grafana-agent], unattached volumes=[], failed to process volumes=[]:context deadline exceeded" pod="loki-dev-010/custom-grafana-agent-856948968f-6jfks" podUID="17b244cc-ecb9-4fbc-beaa-8fa47fafe013"`,
- `E0507 11:59:37.252214 4736 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ksm\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=ksm pod=new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2_integration(f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c)\"" pod="integration/new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2" podUID="f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c"`,
- `E0507 11:59:39.149450 4729 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-agent\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=cluster-agent pod=appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv_integration(69bc5e6c-0451-443e-af8a-c831871afbb8)\"" pod="integration/appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv" podUID="69bc5e6c-0451-443e-af8a-c831871afbb8"`,
- `E0507 11:59:41.375655 4736 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.12.0,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml -distributor.remote-timeout=10s],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:alloy-otlp.alloy-otlp.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://alloy-otlp.alloy-otlp.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},EnvVar{Name:JAEGER_REPORTER_MAX_QUEUE_SIZE,Value:1000,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-jtnbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-5f56f7846b-fgxdm_ge-metrics-federation(07c06e21-137b-4fdd-b7d3-703f0a567720): CreateContainerConfigError: secret "ruler-alertmanager-token" not found`,
- `E0507 11:59:<_> 4731 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"overrides-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/enterprise-logs:callum-shard-firstlast-08\\\"\"" pod="loki-dev-010/overrides-exporter-98c77fd66-6zj6m" podUID="1ff5bf3e-5856-4f6f-ae04-273f2dee170b"`,
- `E0507 11:59:<_> <_> kuberuntime_manager.go:1256] container &Container{Name:grafana,Image:us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> [/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" ="" ]; do sleep 0.5; done;`,
- `E0507 11:59:<_> <_> kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {} 250m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:pdc-certs,ReadOnly:true,MountPath:/var/run/secrets/pdc-certs,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:<_> true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImageNeverPull: Container image "us.gcr.io/hosted-grafana/pdc:0.1.415" is not present with pull policy of Never`,
- `E0507 11:59:<_> <_> kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.11.1,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:<_> nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http:<_> 5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:<_> true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> CreateContainerConfigError: secret "ruler-alertmanager-token" not found`,
- `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" <_> <_>`,
- `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code =NotFound desc =failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found, failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden]\"" <_> <_>`,
- `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> <_> <_>`,
- `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" <_> <_>`,
- `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" <_> <_>`,
- `E0507 11:59:<_> <_> prober.go:104] "Probe errored" err="rpc error: code =NotFound desc =failed to exec in container: failed to load task: no running task found: task <_> not found: not found" probeType="Readiness" <_> <_> containerName="grafana"`,
- `E0507 11:59:<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code =NotFound desc =failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>`,
- `E0507 11:59:<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code =Unknown desc =failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>`,
- `E0507 11:59:<_> <_> remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code =NotFound desc =an error occurred when try to find container <_> not found" <_>`,
- `E0507 11:59:<_> <_> remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code =NotFound desc =failed to exec in container: failed to load task: no running task found: task <_> not found: not found" <_> cmd=["/bin/hgrun","check"]`,
- `E0507 <_> 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=prometheus pod=bryan-prometheus-0_bryan-prometheus(6dadfe71-eb19-4231-a96e-c64bb5499a1e)\"" pod="bryan-prometheus/bryan-prometheus-0" podUID="6dadfe71-eb19-4231-a96e-c64bb5499a1e"`,
- `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"agent\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=agent pod=<_> pod=<_> podUID=<_>`,
- `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=cortex-gw pod=<_> pod=<_> podUID=<_>`,
- `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"goldpinger\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=goldpinger pod=<_> pod=<_> podUID=<_>`,
- `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff:\"back-off <_> restarting failed container=grafana pod=<_> pod=<_> podUID=<_>`,
- `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=support-agent pod=<_> pod=<_> podUID=<_>`,
- `E0507 <_> <_> prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=<_> actualBytes=10240`,
- `I0507 11:59:29.320184 1537502 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-controller-0" secret="" err="secret \"not-needed\" not found"`,
+ `E0507 11:59:35.928465 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[custom-grafana-agent], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="loki-dev-010/custom-grafana-agent-856948968f-6jfks" podUID="17b244cc-ecb9-4fbc-beaa-8fa47fafe013"`,
+ `E0507 11:59:<_>.<_> <_> kuberuntime_manager.go:1256] container &Container{Name:grafana,Image:us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>,Command:[/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" = "" ]; do sleep 0.5; done;`,
+ `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with CrashLoopBackOff: \"back-off <_> restarting failed container=<_> pod=<_>(<_>)\"" pod="<_>/<_>" podUID="<_>"`,
+ `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with CreateContainerConfigError: \"secret \\\"<_>\\\" not found\"" pod="<_>/<_>" podUID="<_>"`,
+ `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/<_>:<_>.<_>.<_>\\\"\"" pod="<_>/<_>" podUID="<_>"`,
+ `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/<_>:<_>\\\"\"" pod="<_>/<_>" podUID="<_>"`,
+ `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code = NotFound desc = failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>: not found, failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_>.1.<_>: 403 Forbidden]\"" pod="hosted-grafana/<_>" podUID="<_>"`,
+ `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="pdc/<_>" podUID="<_>"`,
+ `E0507 11:59:<_>.<_> <_> prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" probeType="Readiness" pod="hosted-grafana/<_>" podUID="<_>" containerName="grafana"`,
+ `E0507 11:59:<_>.<_> <_> prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=<_> actualBytes=10240`,
+ `E0507 11:59:<_>.<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>: not found" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>"`,
+ `E0507 11:59:<_>.<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_>.1.<_>: 403 Forbidden" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>"`,
+ `E0507 11:59:<_>.<_> <_> remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found" containerID="<_>"`,
+ `E0507 11:59:<_>.<_> <_> remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" containerID="<_>" cmd=["/bin/hgrun","check"]`,
`I0507 11:59:31.815514 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hosted-grafana-pro) is not from ACR, return empty authentication`,
- `I0507 11:59:32.409568 581823 cache.go:40] re-using cached key and certificate`,
- `I0507 11:59:33.422254 1537502 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x28r" status="Running"`,
`I0507 11:59:34.518822 3224 kuberuntime_container.go:745] "Killing container with a grace period" pod="hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4" podUID="25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" containerName="hgapi" containerID="containerd://c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" gracePeriod=30`,
`I0507 11:59:34.834734 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95j2t\" (UniqueName: \"kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`,
`I0507 11:59:34.834794 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`,
`I0507 11:59:34.834835 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"gcs-serviceaccount\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`,
- `I0507 11:59:34.836955 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs" (OuterVolumeSpecName: "pdc-certs") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "pdc-certs". PluginName "kubernetes.io/secret", VolumeGidValue ""`,
`I0507 11:59:34.841404 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t" (OuterVolumeSpecName: "kube-api-access-95j2t") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "kube-api-access-95j2t". PluginName "kubernetes.io/projected", VolumeGidValue ""`,
- `I0507 11:59:34.841447 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount" (OuterVolumeSpecName: "gcs-serviceaccount") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "gcs-serviceaccount". PluginName "kubernetes.io/secret", VolumeGidValue ""`,
- `I0507 11:59:34.854084 4727 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="integration/grafana-render-service-cbff479fc-cj9tp" secret="" err="secret \"us-gcr-io-hosted-grafana\" not found"`,
`I0507 11:59:34.936025 3224 reconciler_common.go:300] "Volume detached for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`,
- `I0507 11:59:37.133005 3782 prober.go:107] "Probe failed" probeType="Readiness" pod="loki-dev-014/loki-dev-014-rollout-operator-58fc68b876-2qhmp" podUID="e6504036-2514-4ecc-b78c-c47061f60c9f" containerName="rollout-operator" probeResult="failure" output="HTTP probe failed with statuscode:500"`,
- `I0507 11:59:37.915108 4726 prober.go:107] "Probe failed" probeType="Readiness" pod="agent-management-dev-002/agent-management-api-7ff7b9b9-k9nft" podUID="9893f9ac-f3e4-41fb-8da7-592061d2386c" containerName="agent-management-api" probeResult="failure" output="HTTP probe failed with statuscode:400"`,
+ `I0507 11:59:34.<_> 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/<_>" (OuterVolumeSpecName: "<_>") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "<_>". PluginName "kubernetes.io/secret", VolumeGidValue ""`,
+ `I0507 11:59:34.<_> 3224 reconciler_common.go:300] "Volume detached for volume \"<_>\" (UniqueName: \"kubernetes.io/<_>/<_>\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`,
+ `I0507 11:59:37.<_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="<_>/<_>" podUID="<_>" containerName="<_>" probeResult="failure" output="HTTP probe failed with statuscode: <_>"`,
`I0507 11:59:38.116658 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hg-plugins) is not from ACR, return empty authentication`,
`I0507 11:59:39.168633 2776 kubelet.go:2493] "SyncLoop (probe)" probe="readiness" status="" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q"`,
- `I0507 11:59:39.560605 4739 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-exporter-766c6757b5-bggf6" secret="" err="secret \"not-needed\" not found"`,
- `I0507 11:59:<_> 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication`,
- `I0507 11:59:<_> 3224 reconciler_common.go:300] "Volume detached for volume <_> (UniqueName: <_> on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`,
- `I0507 11:59:<_> 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused"`,
- `I0507 11:59:<_> <_> generic.go:334] "Generic (PLEG): container finished" <_> <_> exitCode=1`,
- `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop (PLEG): event for pod" <_> event={"ID":<_> "ContainerDied","Data":<_>`,
- `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop (PLEG): event for pod" <_> event={"ID":<_> "ContainerStarted","Data":<_>`,
- `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop DELETE" source="api" <_>`,
- `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop REMOVE" source="api" <_>`,
- `I0507 11:59:<_> <_> kubelet_getters.go:187] "Pod status updated" <_> status="Running"`,
- `I0507 11:59:<_> <_> kubelet_volumes.go:<_> "Cleaned up orphaned pod volumes dir" <_> <_>`,
- `I0507 11:59:<_> <_> pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":<_> err="failed to get container status <_> rpc error: code =NotFound desc =an error occurred when try to find container <_> not found"`,
- `I0507 11:59:<_> <_> scope.go:117] "RemoveContainer" <_>`,
- `I0507 11:59:<_> <_> cache.go:40] re-using cached key and certificate`,
- `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod=<_>`,
- `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod=<_>`,
- `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod=<_> secret="" err="secret \"dockerhub\" not found"`,
- `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod=<_> secret="" err="secret \"gcr\" not found"`,
- `I0507 <_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod=<_> podUID=<_> containerName="grafana" probeResult="failure" output=<`,
- `IPv4: martian source <_> from <_> on dev eth0`,
+ `I0507 11:59:<_>.<_> 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication`,
+ `I0507 11:59:<_>.<_> 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused"`,
+ `I0507 11:59:<_>.<_> <_> generic.go:334] "Generic (PLEG): container finished" podID="<_>" containerID="<_>" exitCode=1`,
+ `I0507 11:59:<_>.<_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hosted-grafana/<_>"`,
+ `I0507 11:59:<_>.<_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/<_>"`,
+ `I0507 11:59:<_>.<_> <_> kubelet.go:<_>] "SyncLoop (PLEG): event for pod" pod="<_>/<_>" event={"ID":"<_>","Type":"<_>","Data":"<_>"}`,
+ `I0507 11:59:<_>.<_> <_> kubelet.go:<_>] "SyncLoop DELETE" source="api" pods=["hosted-grafana/<_>"]`,
+ `I0507 11:59:<_>.<_> <_> kubelet.go:<_>] "SyncLoop REMOVE" source="api" pods=["hosted-grafana/<_>"]`,
+ `I0507 11:59:<_>.<_> <_> kubelet_getters.go:187] "Pod status updated" pod="kube-system/<_>" status="Running"`,
+ `I0507 11:59:<_>.<_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="<_>/<_>" secret="" err="secret \"<_>\" not found"`,
+ `I0507 11:59:<_>.<_> <_> kubelet_volumes.go:<_>] "Cleaned up orphaned pod volumes dir" podUID="<_>" path="/var/lib/kubelet/pods/<_>/volumes"`,
+ `I0507 11:59:<_>.<_> <_> pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"<_>"} err="failed to get container status \"<_>\": rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found"`,
+ `I0507 11:59:<_>.<_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="hosted-grafana/<_>" podUID="<_>" containerName="grafana" probeResult="failure" output=<`,
+ `I0507 11:59:<_>.<_> <_> scope.go:117] "RemoveContainer" containerID="<_>"`,
+ `I0507 11:59:<_>.<_> <_> cache.go:40] re-using cached key and certificate`,
+ `IPv4: martian source 10.132.<_>.<_> from 10.132.<_>.<_>, on dev eth0`,
`PRC: Renewing lease on eth0.`,
`RCV: Reply message on eth0 from fe80::e9:7eff:fedf:3d37.`,
`Removed slice libcontainer container kubepods-burstable-pod25cb986c_3d6c_4ed0_abf3_ee59ed6175f9.slice.`,
- `Started libcontainer container <_>`,
+ `Started cri-containerd-95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9.scope.`,
+ `Started libcontainer container <_>.`,
`XMT: Renew on eth0, interval 9700ms.`,
- `XMT: Solicit on eth0, interval <_>`,
- `audit:type=1400 <_> apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=<_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`,
+ `XMT: Solicit on eth0, interval <_>.`,
+ `audit: type=1400 audit(<_>.<_>:<_>): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=<_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`,
`kauditd_printk_skb: <_> callbacks suppressed`,
`ll header: 00000000: 42 01 0a 80 00 <_> 42 01 0a 80 00 01 08 00`,
`net_ratelimit: 2 callbacks suppressed`,
- `time="2024-05-07T11:59:32.755926053Z" level=info msg="CreateContainer within sandbox \"81e019a0248a0300a328fd59f9939c3eaa1b98aa7f325a7f6e00592633275ef6\" for container &ContainerMetadata{Name:checkoutservice,Attempt:3417,}"`,
+ `run-containerd-io.containerd.runtime.v2.task-k8s.<_>.mount: Deactivated successfully.`,
+ `run-containerd-runc-k8s.io-e5f17d69eee483ec8d43b26d5d628246984ba92f794ee5f3748935f5b6448b9b-runc.6eAyHn.mount: Deactivated successfully.`,
`time="2024-05-07T11:59:34.519591759Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with timeout 30 (s)"`,
`time="2024-05-07T11:59:34.520032214Z" level=info msg="Stop container \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with signal terminated"`,
`time="2024-05-07T11:59:34.591282703Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" returns successfully"`,
@@ -189,34 +168,33 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
`time="2024-05-07T11:59:34.592084495Z" level=info msg="Container to stop \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" must be in running or unknown state, current state \"CONTAINER_EXITED\""`,
`time="2024-05-07T11:59:34.706960850Z" level=info msg="TearDown network for sandbox \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" successfully"`,
`time="2024-05-07T11:59:34.707025668Z" level=info msg="StopPodSandbox for \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" returns successfully"`,
- `time="2024-05-07T11:59:36.177858616Z" level=info msg="CreateContainer within sandbox \"81e019a0248a0300a328fd59f9939c3eaa1b98aa7f325a7f6e00592633275ef6\" for &ContainerMetadata{Name:checkoutservice,Attempt:3417,} returns container id \"95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9\""`,
- `time="2024-05-07T11:59:38.484586527Z" level=error msg="Failed to delete exec process \"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\" for container \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\"" error="ttrpc:closed:unknown"`,
- `time="2024-05-07T11:59:43.941729092Z" level=info msg="CreateContainer within sandbox \"ee9dc07bca79ef7dffe2a6eb326e27236e9e97c35913c7aae16ee0a62632fc25\" for container &ContainerMetadata{Name:cortex-gw,Attempt:1660,}"`,
- `time="2024-05-07T11:59:43.954289531Z" level=info msg="CreateContainer within sandbox \"ee9dc07bca79ef7dffe2a6eb326e27236e9e97c35913c7aae16ee0a62632fc25\" for &ContainerMetadata{Name:cortex-gw,Attempt:1660,} returns container id \"93fa5decd62691912f90c9b27526f5e00183239bfa4d3f4ea8578a7873b9c2b4\""`,
- `time="2024-05-07T11:59:<_> level=error msg="ExecSync for <_> failed" error="rpc error: code =NotFound desc =failed to exec in container: failed to load task: no running task found: task <_> not found: not found"`,
- `time="2024-05-07T11:59:<_> level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed" error="failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden"`,
- `time="2024-05-07T11:59:<_> level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed" error="rpc error: code =NotFound desc =failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found"`,
- `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for &ContainerMetadata{Name:grafana,Attempt:<_> returns container id <_>`,
- `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for &ContainerMetadata{Name:hgrun,Attempt:0,} returns container id <_>`,
- `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for container &ContainerMetadata{Name:grafana,Attempt:<_>`,
- `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for container &ContainerMetadata{Name:hgrun,Attempt:0,}"`,
- `time="2024-05-07T11:59:<_> level=info msg="ImageCreate event name:<_> <_> labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
- `time="2024-05-07T11:59:<_> level=info msg="ImageUpdate event name:<_> <_> labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
- `time="2024-05-07T11:59:<_> level=info msg="PullImage \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" returns image reference \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\""`,
- `time="2024-05-07T11:59:<_> level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" returns image reference \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\""`,
- `time="2024-05-07T11:59:<_> level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" with image id \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\", repo tag \"us.gcr.io/hosted-grafana/hgrun:0.1.452\", repo digest \"us.gcr.io/hosted-grafana/hgrun@sha256:b492dbbbee9faf9dba63c9fd89e6f9e148239765454c6a54c4284a2828dec153\", size \"19109699\" in <_>`,
- `time="2024-05-07T11:59:<_> level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" with image id \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\", repo tag \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\", repo digest \"us.gcr.io/hosted-grafana/hosted-grafana-pro@sha256:0853965a142fb95648de3281a7c71de0d05fb51616bc32b523dc2f1da6ca06dc\", size \"173405048\" in <_>`,
- `time=<_> level=error msg="ContainerStatus for <_> failed" error="rpc error:code = NotFound desc = an error occurred when try to find container <_> not found"`,
- `time=<_> level=info msg="PullImage <_>`,
- `time=<_> level=info msg="RemoveContainer for <_>`,
- `time=<_> level=info msg="RemoveContainer for <_> returns successfully"`,
- `time=<_> level=info msg="StartContainer for <_>`,
- `time=<_> level=info msg="StartContainer for <_> returns successfully"`,
- `time=<_> level=info msg="cleaning up dead shim" namespace=k8s.io`,
- `time=<_> level=info msg="shim disconnected" id=<_> namespace=k8s.io`,
- `time=<_> level=info msg="stop pulling image <_> active requests=0, bytes read=<_>`,
- `time=<_> level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io`,
- `time=<_> level=warning msg="cleaning up after shim disconnected" id=<_> namespace=k8s.io`,
+ `time="2024-05-07T11:59:38.117772842Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hg-plugins:2024-05-07-v545244-f51851984\""`,
+ `time="2024-05-07T11:59:38.484586527Z" level=error msg="Failed to delete exec process \"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\" for container \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\"" error="ttrpc: closed: unknown"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=error msg="ContainerStatus for \"<_>\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=error msg="ExecSync for \"<_>\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\" failed" error="failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_>.1.<_>: 403 Forbidden"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\" failed" error="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>: not found"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="CreateContainer within sandbox \"<_>\" for &ContainerMetadata{Name:<_>,Attempt:<_>,} returns container id \"<_>\""`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="CreateContainer within sandbox \"<_>\" for container &ContainerMetadata{Name:<_>,Attempt:<_>,}"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageCreate event name:\"sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/<_>@sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageUpdate event name:\"sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/<_>@sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="PullImage \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" returns image reference \"sha256:<_>\""`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="PullImage \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\""`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="Pulled image \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" with image id \"sha256:<_>\", repo tag \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\", repo digest \"us.gcr.io/hosted-grafana/<_>@sha256:<_>\", size \"<_>\" in <_>.<_>"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="RemoveContainer for \"<_>\" returns successfully"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="RemoveContainer for \"<_>\""`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="StartContainer for \"<_>\" returns successfully"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="StartContainer for \"<_>\""`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="cleaning up dead shim" namespace=k8s.io`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="shim disconnected" id=<_> namespace=k8s.io`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="stop pulling image us.gcr.io/hosted-grafana/<_>:<_>.1.<_>: active requests=0, bytes read=<_>"`,
+ `time="2024-05-07T11:59:<_>.<_>" level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io`,
+ `time="2024-05-07T11:59:<_>.<_>" level=warning msg="cleaning up after shim disconnected" id=<_> namespace=k8s.io`,
+ `var-lib-containerd-tmpmounts-containerd\<_>.mount: Deactivated successfully.`,
},
},
{
@@ -224,21 +202,19 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
inputFile: "testdata/kafka.txt",
patterns: []string{
`[2024-05-07 10:55:40,626] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180391157, size=16991045, lastModifiedTime=1715075754780, largestRecordTimestamp=Some(1715075754774)),LogSegment(baseOffset=180393429, size=16997692, lastModifiedTime=1715075760206, largestRecordTimestamp=Some(1715075760186)),LogSegment(baseOffset=180395889, size=16998200, lastModifiedTime=1715075765542, largestRecordTimestamp=Some(1715075765526)),LogSegment(baseOffset=180398373, size=16977347, lastModifiedTime=1715075770515, largestRecordTimestamp=Some(1715075770504)) (kafka.log.LocalLog$)`,
- `[2024-05-07 10:55:40,638] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180400817, size=16997594, lastModifiedTime=1715075775780, largestRecordTimestamp=Some(1715075775771)),LogSegment(baseOffset=180403261, size=16992344, lastModifiedTime=1715075781053, largestRecordTimestamp=Some(1715075781021)),LogSegment(baseOffset=180405723, size=16989895, lastModifiedTime=1715075786205, largestRecordTimestamp=Some(1715075786174)),LogSegment(baseOffset=180408118, size=16998698, lastModifiedTime=1715075791681, largestRecordTimestamp=Some(1715075791673)),LogSegment(baseOffset=180410608, size=16995676, lastModifiedTime=1715075796438, largestRecordTimestamp=Some(1715075796430)),LogSegment(baseOffset=180412733, size=16963278, lastModifiedTime=1715075800534, largestRecordTimestamp=Some(1715075800511)),LogSegment(baseOffset=180414883, size=16984328, lastModifiedTime=1715075805272, largestRecordTimestamp=Some(1715075805230)),LogSegment(baseOffset=180417063, size=16989109, lastModifiedTime=1715075810381, largestRecordTimestamp=Some(1715075810372)),LogSegment(baseOffset=180419267, size=16996871, lastModifiedTime=1715075815153, largestRecordTimestamp=Some(1715075815125)),LogSegment(baseOffset=180421560, size=16988558, lastModifiedTime=1715075819785, largestRecordTimestamp=Some(1715075819763)),LogSegment(baseOffset=180424008, size=16999292, lastModifiedTime=1715075825336, largestRecordTimestamp=Some(1715075825303)),LogSegment(baseOffset=180426459, size=16990595, lastModifiedTime=1715075830839, largestRecordTimestamp=Some(1715075830827)),LogSegment(baseOffset=180428944, size=16995859, lastModifiedTime=1715075835942, largestRecordTimestamp=Some(1715075835904)),LogSegment(baseOffset=180431327, size=16992294, lastModifiedTime=1715075841219, largestRecordTimestamp=Some(1715075841214)),LogSegment(baseOffset=180433867, size=16966736, lastModifiedTime=1715075846443, largestRecordTimestamp=Some(1715075846401)),LogSegment(baseOffset=180436204, size=16894731, lastModifiedTime=1715075853273, largestRecordTimestamp=Some(1715075853244)),LogSegment(baseOffset=180438984, size=16983529, lastModifiedTime=1715075858911, largestRecordTimestamp=Some(1715075858891)),LogSegment(baseOffset=180441466, size=16996933, lastModifiedTime=1715075863566, largestRecordTimestamp=Some(1715075863554)),LogSegment(baseOffset=180443778, size=16999841, lastModifiedTime=1715075866199, largestRecordTimestamp=Some(1715075866185)),LogSegment(baseOffset=180445367, size=16992471, lastModifiedTime=1715075870385, largestRecordTimestamp=Some(1715075870347)),LogSegment(baseOffset=180447366, size=16999996, lastModifiedTime=1715075875102, largestRecordTimestamp=Some(1715075875091)),LogSegment(baseOffset=180449601, size=16994426, lastModifiedTime=1715075879927, largestRecordTimestamp=Some(1715075879926)),LogSegment(baseOffset=180452079, size=16998020, lastModifiedTime=1715075885293, largestRecordTimestamp=Some(1715075885263)),LogSegment(baseOffset=180454546, size=16992231, lastModifiedTime=1715075890424, largestRecordTimestamp=Some(1715075890409)),LogSegment(baseOffset=180456986, size=16970315, lastModifiedTime=1715075895719, largestRecordTimestamp=Some(1715075895690)),LogSegment(baseOffset=180459366, size=16990785, lastModifiedTime=1715075900996, largestRecordTimestamp=Some(1715075900985)),LogSegment(baseOffset=180461885, size=16996655, lastModifiedTime=1715075905847, largestRecordTimestamp=Some(1715075905841)),LogSegment(baseOffset=180464299, size=16982181, lastModifiedTime=1715075911052, largestRecordTimestamp=Some(1715075911028)),LogSegment(baseOffset=180466821, size=16997630, lastModifiedTime=1715075915962, largestRecordTimestamp=Some(1715075915953)),LogSegment(baseOffset=180468968, size=16995723, lastModifiedTime=1715075920325, largestRecordTimestamp=Some(1715075920308)),LogSegment(baseOffset=180471046, size=16979316, lastModifiedTime=1715075924724, largestRecordTimestamp=Some(1715075924697)),LogSegment(baseOffset=180473259, size=16995238, lastModifiedTime=1715075929645, largestRecordTimestamp=Some(1715075929624)),LogSegment(baseOffset=180475486, size=16988461, lastModifiedTime=1715075934288, largestRecordTimestamp=Some(1715075934283)),LogSegment(baseOffset=180477735, size=16993767, lastModifiedTime=1715075939277, largestRecordTimestamp=Some(1715075939270)),LogSegment(baseOffset=180480095, size=16995409, lastModifiedTime=1715075944639, largestRecordTimestamp=Some(1715075944635)),LogSegment(baseOffset=180482560, size=16992784, lastModifiedTime=1715075949760, largestRecordTimestamp=Some(1715075949760)),LogSegment(baseOffset=180484967, size=16990838, lastModifiedTime=1715075954937, largestRecordTimestamp=Some(1715075954929)),LogSegment(baseOffset=180487377, size=16976794, lastModifiedTime=1715075960151, largestRecordTimestamp=Some(1715075960119)),LogSegment(baseOffset=180489919, size=16997379, lastModifiedTime=1715075965116, largestRecordTimestamp=Some(1715075965085)),LogSegment(baseOffset=180492304, size=16956613, lastModifiedTime=1715075970448, largestRecordTimestamp=Some(1715075970424)),LogSegment(baseOffset=180494832, size=16895640, lastModifiedTime=1715075975354, largestRecordTimestamp=Some(1715075975341)),LogSegment(baseOffset=180496930, size=16998328, lastModifiedTime=1715075979813, largestRecordTimestamp=Some(1715075979796)),LogSegment(baseOffset=180499079, size=16995699, lastModifiedTime=1715075984309, largestRecordTimestamp=Some(1715075984285)),LogSegment(baseOffset=180501183, size=16993785, lastModifiedTime=1715075989086, largestRecordTimestamp=Some(1715075989064)),LogSegment(baseOffset=180503431, size=16989600, lastModifiedTime=1715075993713, largestRecordTimestamp=Some(1715075993683)),LogSegment(baseOffset=180505674, size=16984790, lastModifiedTime=1715075998337, largestRecordTimestamp=Some(1715075998318)),LogSegment(baseOffset=180508022, size=16982630, lastModifiedTime=1715076003671, largestRecordTimestamp=Some(1715076003660)),LogSegment(baseOffset=180510439, size=16999488, lastModifiedTime=1715076009000, largestRecordTimestamp=Some(1715076008996)),LogSegment(baseOffset=180512848, size=16997845, lastModifiedTime=1715076014033, largestRecordTimestamp=Some(1715076014032)),LogSegment(baseOffset=180515281, size=16990661, lastModifiedTime=1715076019245, largestRecordTimestamp=Some(1715076019216)),LogSegment(baseOffset=180517815, size=16996244, lastModifiedTime=1715076023989, largestRecordTimestamp=Some(1715076023963)),LogSegment(baseOffset=180520112, size=16992012, lastModifiedTime=1715076029243, largestRecordTimestamp=Some(1715076029231)) (kafka.log.LocalLog$)`,
`[2024-05-07 10:55:53,038] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=447957, size=948, lastModifiedTime=1715059232052, largestRecordTimestamp=Some(1715059232002)),LogSegment(baseOffset=447969, size=948, lastModifiedTime=1715059424352, largestRecordTimestamp=Some(1715059424301)) (kafka.log.LocalLog$)`,
- `[2024-05-07 10:55:<_> INFO Deleted log <_> (kafka.log.LogSegment)`,
- `[2024-05-07 10:55:<_> INFO Deleted offset index <_> (kafka.log.LogSegment)`,
- `[2024-05-07 10:55:<_> INFO Deleted producer state snapshot <_> (kafka.log.SnapshotFile)`,
- `[2024-05-07 10:55:<_> INFO Deleted time index <_> (kafka.log.LogSegment)`,
- `[2024-05-07 10:55:<_> INFO [ProducerStateManager <_> Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`,
- `[2024-05-07 <_> INFO [LocalLog partition=<_> dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=<_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.LocalLog$)`,
- `[2024-05-07 <_> INFO [LocalLog partition=<_> dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`,
- `[2024-05-07 <_> INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=<_> size=948, lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.LocalLog$)`,
- `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=<_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> due to retention size <_> breach. Log size after deletion will be <_> (kafka.log.UnifiedLog)`,
- `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach:LogSegment(baseOffset=<_> size=948, lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=948, lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.UnifiedLog)`,
- `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach:LogSegment(baseOffset=<_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.UnifiedLog)`,
- `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to leader offset increment (kafka.log.UnifiedLog)`,
- `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to segment deletion (kafka.log.UnifiedLog)`,
+ `[2024-05-07 10:55:53,<_>] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=<_>, size=948, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) (kafka.log.LocalLog$)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO Deleted log /bitnami/kafka/data/<_>/<_>.log.deleted. (kafka.log.LogSegment)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO Deleted offset index /bitnami/kafka/data/<_>/<_>.index.deleted. (kafka.log.LogSegment)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO Deleted producer state snapshot /bitnami/kafka/data/<_>/<_>.snapshot.deleted (kafka.log.SnapshotFile)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO Deleted time index /bitnami/kafka/data/<_>/<_>.timeindex.deleted. (kafka.log.LogSegment)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO [LocalLog partition=<_>, dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO [ProducerStateManager partition=<_>] Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=<_>, size=<_>, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) due to retention size <_> breach. Log size after deletion will be <_>. (kafka.log.UnifiedLog)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: LogSegment(baseOffset=<_>, size=948, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)),LogSegment(baseOffset=<_>, size=948, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) (kafka.log.UnifiedLog)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: LogSegment(baseOffset=<_>, size=<_>, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) (kafka.log.UnifiedLog)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Incremented log start offset to <_> due to leader offset increment (kafka.log.UnifiedLog)`,
+ `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Incremented log start offset to <_> due to segment deletion (kafka.log.UnifiedLog)`,
},
},
{
@@ -246,20 +222,22 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
inputFile: "testdata/kubernetes.txt",
patterns: []string{
`I0507 12:02:27.947830 1 nodeutilization.go:274] "Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers"`,
- `I0507 12:02:<_> 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" <_>`,
- `I0507 12:02:<_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
- `I0507 12:02:<_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`,
- `I0507 12:02:<_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="pod is a DaemonSet pod"`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, insufficient <_>`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, insufficient <_> insufficient <_>`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, insufficient <_> insufficient <_> insufficient pods]"`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_> insufficient <_>`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]"`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="insufficient cpu"`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:<_> error:="[insufficient <_> insufficient <_>`,
- `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:<_> error:="pod node selector does not match the node label"`,
- `I0507 12:02:<_> 1 node.go:339] "no Pod antiaffinity rule found" <_>`,
+ `I0507 12:02:27.<_> 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" pod="<_>/<_>"`,
+ `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>/<_>" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`,
+ `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="ge-logs/<_>" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="insight-logs/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="loki-dev-ssd/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/<_>" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="pyroscope-ebpf/<_>" checks="pod is a DaemonSet pod"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, <_> <_><_> <_> <_><_> <_> <_>]"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>, insufficient <_>]"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>]"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>, insufficient <_>]"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>]"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="insufficient cpu"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="<_>" error:="[insufficient <_>, insufficient <_>]"`,
+ `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="<_>" error:="pod node selector does not match the node label"`,
+ `I0507 12:02:27.<_> 1 node.go:339] "no Pod antiaffinity rule found" pod="<_>/<_>"`,
`I0507 12:04:17.595169 1 descheduler.go:155] Building a pod evictor`,
`I0507 12:04:17.596431 1 nodeutilization.go:204] "Node is underutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" usage={"cpu":"984m","memory":"611Mi","pods":"16"} usagePercentage={"cpu":12.44,"memory":2.15,"pods":25}`,
`I0507 12:04:17.596484 1 highnodeutilization.go:107] "Criteria for a node below target utilization" CPU=50 Mem=50 Pods=100`,
@@ -267,26 +245,33 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
`I0507 12:04:17.596528 1 nodeutilization.go:260] "Total capacity to be moved" CPU=5060 Mem=112216292800 Pods=163`,
`I0507 12:04:17.596651 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/metrics-server-v0.6.3-68f5b7c4d5-t5mz8" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
`I0507 12:04:17.596803 1 defaultevictor.go:202] "Pod fails the following checks" pod="gadget/gadget-zjjts" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
- `I0507 12:04:<_> 1 nodeutilization.go:207] "Node is overutilized" <_> usage={"cpu":<_> <_> <_> usagePercentage={"cpu":<_> <_> <_>`,
- `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]"`,
- `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
- `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`,
- `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
- `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]"`,
- `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
- `I0507 12:<_> <_> 1 descheduler.go:<_> "Number of evicted pods" <_>`,
- `I0507 12:<_> <_> 1 nodeutilization.go:<_> "Evicting pods from node" <_> usage={"cpu":<_> <_> <_>`,
- `I0507 12:<_> <_> 1 nodeutilization.go:<_> "No removable pods on node, try next node" <_>`,
- `I0507 12:<_> <_> 1 profile.go:<_> "Total number of pods evicted" extension point="Balance" <_>`,
- `I0507 12:<_> <_> 1 reflector.go:<_> k8s.io/client-go/informers/factory.go:<_> Watch close - <_> total <_> items received`,
- `I0507 <_> 1 <_> "Pods on node" node=<_> allPods=<_> nonRemovablePods=<_> removablePods=<_>`,
+ `I0507 12:04:17.<_> 1 nodeutilization.go:207] "Node is overutilized" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"} usagePercentage={"cpu":<_>.<_>,"memory":<_>.<_>,"pods":<_>.<_>}`,
+ `I0507 12:04:17.<_> 1 nodeutilization.go:207] "Node is overutilized" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"} usagePercentage={"cpu":<_>.<_>,"memory":<_>.<_>,"pods":<_>}`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="conntrack-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="goldpinger/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="node-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`,
+ `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`,
+ `I0507 12:<_>:<_>.<_> 1 descheduler.go:<_>] "Number of evicted pods" totalEvicted=<_>`,
+ `I0507 12:<_>:<_>.<_> 1 nodeutilization.go:<_>] "Evicting pods from node" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"}`,
+ `I0507 12:<_>:<_>.<_> 1 nodeutilization.go:<_>] "No removable pods on node, try next node" node="<_>"`,
+ `I0507 12:<_>:<_>.<_> 1 nodeutilization.go:<_>] "Pods on node" node="<_>" allPods=<_> nonRemovablePods=<_> removablePods=<_>`,
+ `I0507 12:<_>:<_>.<_> 1 profile.go:<_>] "Total number of pods evicted" extension point="Balance" evictedPods=<_>`,
+ `I0507 12:<_>:<_>.<_> 1 reflector.go:<_>] k8s.io/client-go/informers/factory.go:<_>: Watch close - *v1.<_> total <_> items received`,
},
},
{
drain: New(DefaultConfig(), nil),
inputFile: "testdata/vault.txt",
patterns: []string{
- `2024-05-07T10:<_> <_> [INFO] expiration: revoked lease: <_>`,
+ `2024-05-07T10:56:38.667Z [INFO] expiration: revoked lease: lease_id=auth/gcp/login/h4c031a99aa555040a0dd99864d828e946c6d4e31f4f5178757183def61f9d104`,
+ `2024-05-07T10:<_>:<_>.<_> [INFO] expiration: revoked lease: lease_id=auth/kubernetes/<_>/login/<_>`,
},
},
{
@@ -294,86 +279,129 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) {
inputFile: "testdata/calico.txt",
patterns: []string{
`2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 699: Finished loading iptables state ipVersion=0x4 table="filter"`,
+ `2024-05-08 15:23:56.403 [INFO][615489] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 600ms: avg=119ms longest=119ms (resync-filter-v4)`,
`2024-05-08 15:23:56.614 [DEBUG][76] felix/int_dataplane.go 1777: Refreshing routes`,
`2024-05-08 15:23:56.615 [DEBUG][76] felix/route_rule.go 179: Queueing a resync of routing rules. ipVersion=4`,
- `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480:Queueing a resync of routing table. ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
- `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480:Queueing a resync of routing table. ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`,
+ `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. ifaceRegex="<_>.<_>" ipVersion=0x4 tableIndex=<_>`,
`2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 533: Check interfaces matching regex`,
`2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 605: Queueing a resync of wireguard configuration ipVersion=0x4`,
`2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 654: Wireguard is not in-sync - verifying wireguard configuration is removed ipVersion=0x4`,
`2024-05-08 15:23:56.617 [DEBUG][76] felix/wireguard.go 1503: Wireguard is disabled and does not exist ifaceName="wireguard.cali" ipVersion=0x4`,
`2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 584: Flag no OIF for full re-sync`,
- `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 614:Synchronised routes on interface ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`,
- `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 661:Syncing interface routes ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`,
- `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 686:Reconcile against kernel programming ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`,
- `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`,
- `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`,
- `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`,
- `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`,
+ `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`,
+ `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`,
+ `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`,
+ `2024-05-08 15:23:56.624 [INFO][76] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 200ms: avg=10ms longest=10ms (resync-routes-v4,resync-routes-v4,resync-rules-v4,resync-wg)`,
+ `2024-05-08 15:23:56.<_> [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="<_>" ipVersion=0x4 table="filter"`,
+ `2024-05-08 15:23:56.<_> [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="<_>.<_>" ipVersion=0x4 table="filter"`,
+ `2024-05-08 15:23:56.<_> [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-ksa.<_>.<_>" ipVersion=0x4 table="filter"`,
+ `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
+ `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
+ `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
+ `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
+ `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 880: Processing route: 254 <_> 10.68.10.<_>/32 ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
+ `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.<_>/32 ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
+ `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`,
+ `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`,
+ `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`,
+ `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`,
+ `2024-05-08 15:23:58.169 [INFO][2333] felix/summary.go 100: Summarising 35 dataplane reconciliation loops over 1m2s: avg=12ms longest=46ms (resync-filter-v4,resync-filter-v6,resync-mangle-v4,resync-mangle-v6,update-filter-v4,update-filter-v6)`,
`2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 957: Examining link for MTU calculation mtu=1500 name="eth0"`,
`2024-05-08 15:23:58.680 [DEBUG][216945] felix/int_dataplane.go 1785: Reschedule kick received`,
`2024-05-08 15:23:58.681 [DEBUG][216945] felix/feature_detect.go 112: Refreshing detected iptables features`,
- `2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 944:Invalidating dataplane cache ipVersion=0x4 reason="refresh timer" table="nat"`,
+ `2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 944: Invalidating dataplane cache ipVersion=0x4 reason="refresh timer" table="nat"`,
`2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 242: Ran iptables --version rawVersion="iptables v1.8.4 (legacy)\n"`,
`2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 255: Parsed iptables version version=1.8.4`,
`2024-05-08 15:23:58.684 [DEBUG][216945] felix/table.go 604: Loading current iptables state and checking it is correct. ipVersion=0x4 table="nat"`,
`2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 110: Raw kernel version rawVersion="Linux version 5.15.0-1057-azure (buildd@lcy02-amd64-033) (gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0, GNU ld (GNU Binutils for Ubuntu) 2.38) #65-Ubuntu SMP Fri Feb 9 18:39:24 UTC 2024\n"`,
`2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 118: Parsed kernel version version=5.15.0-1057`,
`2024-05-08 15:23:58.715 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`,
- `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851:Parsing line ipVersion=0x4 line="*nat" table="nat"`,
+ `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="*nat" table="nat"`,
`2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`,
- `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881:Not an append, skipping ipVersion=0x4 line="*nat" table="nat"`,
- `2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat"`,
- `2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="POSTROUTING" ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat"`,
- `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat"`,
- `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat"`,
- `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="OUTPUT" ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat"`,
- `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="PREROUTING" ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat"`,
- `2024-05-08 15:23:<_> <_> felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{}`,
- `2024-05-08 15:23:<_> <_> felix/health.go 167: Health: <_>`,
- `2024-05-08 15:23:<_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_> ext:<_> loc:(*time.Location)(0x4ce3aa0)}}`,
- `2024-05-08 15:23:<_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_> ext:<_> loc:(*time.Location)(0x4ce3aa0)}}`,
- `2024-05-08 15:23:<_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_> ext:<_> loc:(*time.Location)(0x4ce3aa0)}}`,
- `2024-05-08 15:23:<_> <_> felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"}`,
- `2024-05-08 15:23:<_> <_> felix/health.go <_> GET <_>`,
- `2024-05-08 15:23:<_> <_> felix/int_dataplane.go 1773: Refreshing IP sets state`,
- `2024-05-08 15:23:<_> <_> felix/int_dataplane.go 1807: Applying dataplane updates`,
- `2024-05-08 15:23:<_> <_> felix/int_dataplane.go 2080: Asked to reschedule. <_>`,
- `2024-05-08 15:23:<_> <_> felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet"`,
- `2024-05-08 15:23:<_> <_> felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet"`,
- `2024-05-08 15:23:<_> <_> felix/ipsets.go 426: Parsing IP set. family="inet" <_>`,
- `2024-05-08 15:23:<_> <_> felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" <_>`,
- `2024-05-08 15:23:<_> <_> felix/ipsets.go 643: No dirty IP sets. family="inet"`,
- `2024-05-08 15:23:<_> <_> felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_> <_> <_> <_>`,
- `2024-05-08 15:23:<_> <_> felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`,
- `2024-05-08 15:23:<_> <_> felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`,
- `2024-05-08 15:23:<_> <_> felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_> <_> time.Local)}} type=""`,
- `2024-05-08 15:23:<_> <_> felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 <_>`,
- `2024-05-08 15:23:<_> <_> felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 <_>`,
- `2024-05-08 15:23:<_> <_> felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`,
- `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4`,
- `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4`,
- `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}}`,
- `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{}`,
- `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1798: Processing BPF actions. family="ipv4"`,
- `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4"`,
- `2024-05-08 15:23:<_> <_> felix/xdp_state.go 968: Processing member updates. family=4`,
- `2024-05-08 15:23:<_> [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":<_> - [0:0]" table="nat"`,
- `2024-05-08 15:23:<_> [DEBUG][216945] felix/table.go 870: Found forward-reference <_> ipVersion=0x4 line=":<_> - [0:0]" table="nat"`,
- `2024-05-08 15:23:<_> [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection <_> <_>`,
- `2024-05-08 <_> <_> felix/ipsets.go 366:Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=<_>`,
- `2024-05-08 <_> <_> felix/ipsets.go 467:Found member in dataplane canon=<_> family="inet" member=<_> setID="this-host"`,
- `2024-05-08 <_> <_> felix/ipsets.go 589:Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools"`,
- `2024-05-08 <_> <_> felix/ipsets.go 589:Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools"`,
- `2024-05-08 <_> <_> felix/ipsets.go 589:Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host"`,
- `2024-05-08 <_> [DEBUG][615489] felix/table.go 677:Skipping expected chain chainName=<_> ipVersion=0x4 table="filter"`,
- `2024-05-08 <_> [DEBUG][76] felix/route_table.go 557:Resync:found calico-owned interface ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
- `2024-05-08 <_> [DEBUG][76] felix/route_table.go 614:Synchronised routes on interface ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
- `2024-05-08 <_> [DEBUG][76] felix/route_table.go 661:Syncing interface routes ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
- `2024-05-08 <_> [DEBUG][76] felix/route_table.go 686:Reconcile against kernel programming ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
- `2024-05-08 <_> [DEBUG][76] felix/route_table.go 880:Processing route:254 <_> <_> ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
- `2024-05-08 <_> [DEBUG][76] felix/route_table.go 915:Route is correct dest=<_> ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`,
- `bird: Netlink: No route to host`,
+ `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="*nat" table="nat"`,
+ `2024-05-08 15:23:58.<_> [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":<_> <_> [0:0]" table="nat"`,
+ `2024-05-08 15:23:58.<_> [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="<_>" ipVersion=0x4 line=":<_> <_> [0:0]" table="nat"`,
+ `2024-05-08 15:23:58.<_> [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=<_> name="<_>"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{}`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go 167: Health: <_>`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"<_>", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:<_>, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_>, ext:<_>, loc:(*time.Location)(0x4ce3aa0)}}`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"}`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go <_>: GET /<_>`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/int_dataplane.go 1773: Refreshing IP sets state`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/int_dataplane.go 1807: Applying dataplane updates`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/int_dataplane.go 2080: Asked to reschedule. delay=<_>.<_>`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=<_>.<_>`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 426: Parsing IP set. family="inet" setName="<_>"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 467: Found member in dataplane canon=<_>.<_>.<_>.<_> family="inet" member="<_>.<_>.<_>.<_>" setID="this-host"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 589: Whitelisting IP sets. ID="<_>" family="inet" mainName="<_>"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="<_>"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 643: No dirty IP sets. family="inet"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_>, <_>, time.Local)}} type=""`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="<_>"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="<_>"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}}`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{}`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4"`,
+ `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 968: Processing member updates. family=4`,
+ `2024-05-08 15:23:<_>.<_> [INFO][<_>] felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_>.<_>: avg=<_> longest=<_> (<_>)`,
+ "bird: Netlink: No route to host",
+ },
+ },
+ {
+ drain: New(DefaultConfig(), nil),
+ inputFile: "testdata/grafana-ruler.txt",
+ patterns: []string{
+ `level=debug ts=2024-05-29T13:44:15.804597912Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance"`,
+ `level=debug ts=2024-05-29T13:44:15.<_> caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance"`,
+ `level=debug ts=2024-05-29T13:44:15.<_> caller=remote_instance_store.go:51 user=<_> slug=<_> msg="calling SaveAlertInstance"`,
+ `logger=ngalert.scheduler user=102553 slug=flownative version=1 fingerprint=4ad9e35be0f80ca3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.79499903Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.794695854s EvaluationString:}]" duration=116.038803ms`,
+ `logger=ngalert.scheduler user=473762 slug=intentiq version=35 fingerprint=0bc4b6f46a852420 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.788200731Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.787878355s EvaluationString:}]" duration=15.345212ms`,
+ `logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=65a68c433031b4e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.790598463Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.78875161s EvaluationString:}]" duration=1.693079007s`,
+ `logger=ngalert.state.manager user=102553 slug=flownative instance= t=2024-05-29T13:44:15.795103234Z level=debug msg="Setting next state" handler=resultNormal`,
+ `logger=ngalert.state.manager user=15338 slug=rstsoftwarerc instance= t=2024-05-29T13:44:15.790951656Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z`,
+ `logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance="datasource_uid=p06gSxS7k, ref_id=A" t=2024-05-29T13:44:15.793080651Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:15.79304032Z level=debug msg="State manager processing evaluation results" resultCount=1`,
+ `logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.796750449Z level=debug msg="Setting next state" handler=resultNoData`,
+ `logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish" t=2024-05-29T13:44:15.788780219Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed" t=2024-05-29T13:44:15.788904162Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn" t=2024-05-29T13:44:15.789011178Z level=debug msg="Setting next state" handler=resultNormal`,
+ `logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792756002Z level=debug msg="Setting next state" handler=resultNoData`,
+ `logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792775073Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799932951Z level=debug msg="Setting next state" handler=resultNormal`,
+ `logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799945019Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.<_> level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData`,
+ `logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.<_> level=debug msg="Setting next state" handler=resultNoData`,
+ `logger=ngalert.state.manager user=473762 slug=intentiq t=2024-05-29T13:44:15.788261794Z level=debug msg="State manager processing evaluation results" resultCount=1`,
+ `logger=ngalert.state.manager user=630397 slug=tatin instance= t=2024-05-29T13:44:15.795542988Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.800327814Z level=debug msg="Setting next state" handler=resultNoData`,
+ `logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791100679Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData`,
+ `logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791114955Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791129917Z level=debug msg="Setting next state" handler=resultNoData`,
+ `logger=ngalert.state.manager user=84535 slug=arweave instance= t=2024-05-29T13:44:15.796640981Z level=debug msg="Setting next state" handler=resultNormal`,
+ `logger=ngalert.state.manager user=84535 slug=arweave t=2024-05-29T13:44:15.796542294Z level=debug msg="State manager processing evaluation results" resultCount=1`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=<_>, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=<_>, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Setting next state" handler=resultNormal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d" t=2024-05-29T13:44:15.78870732Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c" t=2024-05-29T13:44:15.790564871Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791738618Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a" t=2024-05-29T13:44:15.79227249Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsdevauthts-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthts-utils-7f54f8d7b4-njddr, uid=352d7df2-7832-41f3-ad3e-cbe1a060c968" t=2024-05-29T13:44:15.793846886Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsqalivets-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivets-utils-75b748978f-r2vkj, uid=1d39d0d7-d483-427b-ba91-45d897674698" t=2024-05-29T13:44:15.794284465Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-web, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthts-web-57f5b6f56b-bdmh9, uid=8f6b5224-94ce-4f5d-ba08-03f9fc2f572f" t=2024-05-29T13:44:15.795397351Z level=debug msg="Keeping state" state=Normal`,
+ `logger=ngalert.state.manager.persist user=14927 slug=rstsoftware t=2024-05-29T13:44:15.798496844Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.340653ms`,
+ `logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:15.806655602Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1`,
+ `logger=ngalert.state.manager.persist user=<_> slug=<_> t=2024-05-29T13:44:15.<_> level=debug msg="Saving alert states" count=<_> max_state_save_concurrency=1`,
},
},
}
@@ -454,7 +482,6 @@ func TestDrain_TrainGeneratesMatchablePatterns(t *testing.T) {
}
})
}
-
}
func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) {
@@ -509,16 +536,17 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
},
},
{
- name: "Unicode characters are matchable",
+ name: "Scheduler patterns are matchable",
drain: New(DefaultConfig(), nil),
inputLines: []string{
- `13:25:18.033470 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_999 0.00 1717075518`,
- `13:25:18.033422 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_99 0.00 1717075518`,
- `13:25:18.033394 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_95 0.00 1717075518`,
- `13:25:18.033364 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_75 0.00 1717075518`,
- `13:25:18.033335 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_50 0.00 1717075518`,
- `13:25:18.033304 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_std 0.00 1717075518`,
- `13:25:18.033281 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_mean 0.00 1717075518`,
+ `ts=2024-05-30T12:50:36.648377186Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
+ `ts=2024-05-30T12:50:36.350575929Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
+ `ts=2024-05-30T12:50:36.335784477Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
+ `ts=2024-05-30T12:50:36.250406732Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
+ `ts=2024-05-30T12:50:36.248030329Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.45.239:9095`,
+ `ts=2024-05-30T12:50:36.176344754Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
+ `ts=2024-05-30T12:50:36.174730772Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
+ `ts=2024-05-30T12:50:36.076517207Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.45.239:9095`,
},
},
}
@@ -541,5 +569,125 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T)
}
})
}
+}
+
+func TestDeduplicatePlaceholders(b *testing.T) {
+ type dedupCase struct {
+ line string
+ want string
+ }
+ cases := []dedupCase{
+ {
+ line: "abcd",
+ want: "abcd",
+ },
+ {
+ line: "<_><_>abcd",
+ want: "<_>abcd",
+ },
+ {
+ line: strings.Repeat("<_>", 100),
+ want: "<_>",
+ },
+ {
+ line: "<_> <_>",
+ want: "<_> <_>",
+ },
+ {
+ line: strings.Repeat("<_> ", 100),
+ want: strings.Repeat("<_> ", 100),
+ },
+ {
+ line: "<_><<_>",
+ want: "<_><<_>",
+ },
+ {
+ line: "<_><->",
+ want: "<_><->",
+ },
+ {
+ line: strings.Repeat(strings.Repeat("<_>", 100)+" ", 100),
+ want: strings.Repeat("<_> ", 100),
+ },
+ {
+ line: "<<<<<<<_><_>>>>>>>>",
+ want: "<<<<<<<_>>>>>>>>",
+ },
+ {
+ line: strings.Repeat("A", 100) + "<_><_>",
+ want: strings.Repeat("A", 100) + "<_>",
+ },
+ }
+ for i, tc := range cases {
+ b.Run(fmt.Sprintf("Dedup %d", i), func(t *testing.T) {
+ got := deduplicatePlaceholders(tc.line, `<_>`)
+ require.Equal(t, tc.want, got)
+ })
+ }
+}
+
+func TestDrain_PruneTreeClearsOldBranches(t *testing.T) {
+ t.Parallel()
+ tests := []struct {
+ name string
+ drain *Drain
+ inputLines []string
+ }{
+ {
+ name: "should prune old branches",
+ drain: New(DefaultConfig(), nil),
+ inputLines: []string{
+ "test test test A",
+ "test test test B",
+ "test test test C",
+ "test test test D",
+ "test test test E",
+ "test test test F",
+ "test test test G",
+ "my name is W",
+ "my name is X",
+ "my name is Y",
+ "my name is Z",
+ },
+ },
+ }
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ now := time.Now()
+ for i, line := range tt.inputLines {
+ ts := now.Add(time.Millisecond * time.Duration(i))
+ if i < 7 {
+ ts = ts.Add(-time.Duration(7-i) * time.Minute)
+ }
+ tt.drain.Train(line, ts.UnixNano())
+ }
+
+ require.Len(t, tt.drain.Clusters(), 2)
+ require.Equal(t, 8, countNodes(tt.drain.rootNode))
+
+ clusters := tt.drain.Clusters()
+ for _, cluster := range clusters {
+ cluster.Prune(time.Second * 10)
+ if cluster.Size == 0 {
+ tt.drain.Delete(cluster)
+ }
+ }
+ require.Len(t, tt.drain.Clusters(), 1)
+ require.Equal(t, 8, countNodes(tt.drain.rootNode), "expected same number of nodes before pruning")
+
+ tt.drain.Prune()
+ require.Len(t, tt.drain.Clusters(), 1)
+ require.Equal(t, 5, countNodes(tt.drain.rootNode), "expected fewer nodes after pruning")
+ })
+ }
+}
+
+func countNodes(node *Node) int {
+ total := 1
+ for _, child := range node.keyToChildNode {
+ total += countNodes(child)
+ }
+ return total
}
diff --git a/pkg/pattern/drain/line_tokenizer.go b/pkg/pattern/drain/line_tokenizer.go
index 1317fbe3fca88..89bf34a5569b5 100644
--- a/pkg/pattern/drain/line_tokenizer.go
+++ b/pkg/pattern/drain/line_tokenizer.go
@@ -1,25 +1,96 @@
package drain
-import "strings"
+import (
+ "strings"
+ "unicode"
+)
type LineTokenizer interface {
- Tokenize(line string) []string
- Join(tokens []string) string
+ Tokenize(line string) ([]string, interface{})
+ Join(tokens []string, state interface{}) string
}
type spacesTokenizer struct{}
-func (spacesTokenizer) Tokenize(line string) []string {
- return strings.Split(line, " ")
+func (spacesTokenizer) Tokenize(line string) ([]string, interface{}) {
+ return strings.Split(line, " "), nil
}
-func (spacesTokenizer) Join(tokens []string) string {
+func (spacesTokenizer) Join(tokens []string, _ interface{}) string {
return strings.Join(tokens, " ")
}
+type punctuationTokenizer struct {
+ includeDelimiters [128]rune
+ excludeDelimiters [128]rune
+}
+
+func newPunctuationTokenizer() *punctuationTokenizer {
+ var included [128]rune
+ var excluded [128]rune
+ included['='] = 1
+ excluded['_'] = 1
+ excluded['-'] = 1
+ return &punctuationTokenizer{
+ includeDelimiters: included,
+ excludeDelimiters: excluded,
+ }
+}
+
+func (p *punctuationTokenizer) Tokenize(line string) ([]string, interface{}) {
+ tokens := make([]string, len(line)) // Maximum size is every character is punctuation
+ spacesAfter := make([]int, strings.Count(line, " ")) // Could be a bitmap, but it's not worth it for a few bytes.
+
+ start := 0
+ nextTokenIdx := 0
+ nextSpaceIdx := 0
+ for i, char := range line {
+ if unicode.IsLetter(char) || unicode.IsNumber(char) || char < 128 && p.excludeDelimiters[char] != 0 {
+ continue
+ }
+ included := char < 128 && p.includeDelimiters[char] != 0
+ if char == ' ' || included || unicode.IsPunct(char) {
+ if i > start {
+ tokens[nextTokenIdx] = line[start:i]
+ nextTokenIdx++
+ }
+ if char == ' ' {
+ spacesAfter[nextSpaceIdx] = nextTokenIdx - 1
+ nextSpaceIdx++
+ } else {
+ tokens[nextTokenIdx] = line[i : i+1]
+ nextTokenIdx++
+ }
+ start = i + 1
+ }
+ }
+
+ if start < len(line) {
+ tokens[nextTokenIdx] = line[start:]
+ nextTokenIdx++
+ }
+
+ return tokens[:nextTokenIdx], spacesAfter[:nextSpaceIdx]
+}
+
+func (p *punctuationTokenizer) Join(tokens []string, state interface{}) string {
+ spacesAfter := state.([]int)
+ strBuilder := strings.Builder{}
+ spacesIdx := 0
+ for i, token := range tokens {
+ strBuilder.WriteString(token)
+ for spacesIdx < len(spacesAfter) && i == spacesAfter[spacesIdx] {
+ // One entry for each space following the token
+ strBuilder.WriteRune(' ')
+ spacesIdx++
+ }
+ }
+ return strBuilder.String()
+}
+
type splittingTokenizer struct{}
-func (splittingTokenizer) Tokenize(line string) []string {
+func (splittingTokenizer) Tokenize(line string) ([]string, interface{}) {
numEquals := strings.Count(line, "=")
numColons := strings.Count(line, ":")
numSpaces := strings.Count(line, " ")
@@ -32,24 +103,31 @@ func (splittingTokenizer) Tokenize(line string) []string {
}
tokens := make([]string, 0, expectedTokens)
+ spacesAfter := make([]int, 0, strings.Count(line, " "))
for _, token := range strings.SplitAfter(line, keyvalSeparator) {
- tokens = append(tokens, strings.Split(token, " ")...)
+ words := strings.Split(token, " ")
+ for i, entry := range words {
+ tokens = append(tokens, entry)
+ if i == len(words)-1 {
+ continue
+ }
+ spacesAfter = append(spacesAfter, len(tokens)-1)
+ }
}
- return tokens
+ return tokens, spacesAfter
}
-func (splittingTokenizer) Join(tokens []string) string {
- var builder strings.Builder
- for _, token := range tokens {
- if strings.HasSuffix(token, "=") || strings.HasSuffix(token, ":") {
- builder.WriteString(token)
- } else {
- builder.WriteString(token + " ")
+func (splittingTokenizer) Join(tokens []string, state interface{}) string {
+ spacesAfter := state.([]int)
+ strBuilder := strings.Builder{}
+ spacesIdx := 0
+ for i, token := range tokens {
+ strBuilder.WriteString(token)
+ for spacesIdx < len(spacesAfter) && i == spacesAfter[spacesIdx] {
+ // One entry for each space following the token
+ strBuilder.WriteRune(' ')
+ spacesIdx++
}
}
- output := builder.String()
- if output[len(output)-1] == ' ' {
- return output[:len(output)-1]
- }
- return output
+ return strBuilder.String()
}
diff --git a/pkg/pattern/drain/line_tokenizer_test.go b/pkg/pattern/drain/line_tokenizer_test.go
index 8cb541a61b629..1eda1b51068a3 100644
--- a/pkg/pattern/drain/line_tokenizer_test.go
+++ b/pkg/pattern/drain/line_tokenizer_test.go
@@ -1,59 +1,163 @@
package drain
import (
- "reflect"
"testing"
+
+ "github.com/stretchr/testify/require"
)
-func TestSplittingTokenizer_Tokenize(t *testing.T) {
- tokenizer := splittingTokenizer{}
+type TestCase struct {
+ name string
+ line string
+ want map[string][]string
+}
- tests := []struct {
- name string
- line string
- want []string
- }{
- {
- name: "Test with equals sign",
- line: "key1=value1 key2=value2",
- want: []string{"key1=", "value1", "key2=", "value2"},
+const typePunctuation = "punctuation"
+const typeSplitting = "splitting"
+
+var testCases = []TestCase{
+ {
+ name: "Test with equals sign",
+ line: "key1=value1 key2=value2",
+ want: map[string][]string{
+ typePunctuation: {"key1", "=", "value1", "key2", "=", "value2"},
+ typeSplitting: {"key1=", "value1", "key2=", "value2"},
},
- {
- name: "Test with colon",
- line: "key1:value1 key2:value2",
- want: []string{"key1:", "value1", "key2:", "value2"},
+ },
+ {
+ name: "Test with colon",
+ line: "key1:value1 key2:value2",
+ want: map[string][]string{
+ typePunctuation: {"key1", ":", "value1", "key2", ":", "value2"},
+ typeSplitting: {"key1:", "value1", "key2:", "value2"},
},
- {
- name: "Test with mixed delimiters, more = than :",
- line: "key1=value1 key2:value2 key3=value3",
- want: []string{"key1=", "value1", "key2:value2", "key3=", "value3"},
+ },
+ {
+ name: "Test with mixed delimiters, more = than :",
+ line: "key1=value1 key2:value2 key3=value3",
+ want: map[string][]string{
+ typePunctuation: {"key1", "=", "value1", "key2", ":", "value2", "key3", "=", "value3"},
+ typeSplitting: {"key1=", "value1", "key2:value2", "key3=", "value3"},
},
+ },
+ {
+ name: "Test with mixed delimiters, more : than =",
+ line: "key1:value1 key2:value2 key3=value3",
+ want: map[string][]string{
+ typePunctuation: {"key1", ":", "value1", "key2", ":", "value2", "key3", "=", "value3"},
+ typeSplitting: {"key1:", "value1", "key2:", "value2", "key3=value3"},
+ },
+ },
+ {
+ name: "Dense json",
+ line: `{"key1":"value1","key2":"value2","key3":"value3"}`,
+ want: map[string][]string{
+ typePunctuation: {`{`, `"`, `key1`, `"`, `:`, `"`, `value1`, `"`, `,`, `"`, `key2`, `"`, `:`, `"`, `value2`, `"`, `,`, `"`, `key3`, `"`, `:`, `"`, `value3`, `"`, `}`},
+ typeSplitting: {`{"key1":`, `"value1","key2":`, `"value2","key3":`, `"value3"}`},
+ },
+ },
+ {
+ name: "json with spaces",
+ line: `{"key1":"value1", "key2":"value2", "key3":"value3"}`,
+ want: map[string][]string{
+ typePunctuation: {`{`, `"`, `key1`, `"`, `:`, `"`, `value1`, `"`, `,`, `"`, `key2`, `"`, `:`, `"`, `value2`, `"`, `,`, `"`, `key3`, `"`, `:`, `"`, `value3`, `"`, `}`},
+ typeSplitting: {`{"key1":`, `"value1",`, `"key2":`, `"value2",`, `"key3":`, `"value3"}`},
+ },
+ },
+ {
+ name: "logfmt multiword values",
+ line: `key1=value1 key2=value2 msg="this is a message"`,
+ want: map[string][]string{
+ typePunctuation: {"key1", "=", "value1", "key2", "=", "value2", "msg", "=", `"`, `this`, "is", "a", `message`, `"`},
+ typeSplitting: {"key1=", "value1", "key2=", "value2", "msg=", `"this`, "is", "a", `message"`},
+ },
+ },
+ {
+ name: "longer line",
+ line: "09:17:38.033366 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full 0 1717060658",
+ want: map[string][]string{
+ typePunctuation: {`09`, `:`, `17`, `:`, `38`, `.`, `033366`, `▶`, `INFO`, `route`, `ops`, `sending`, `to`, `dest`, `https`, `:`, `/`, `/`, `graphite-cortex-ops-blocks-us-east4`, `.`, `grafana`, `.`, `net`, `/`, `graphite`, `/`, `metrics`, `:`, `service_is_carbon-relay-ng`, `.`, `instance_is_carbon-relay-ng-c665b7b-j2trk`, `.`, `mtype_is_counter`, `.`, `dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics`, `.`, `unit_is_Metric`, `.`, `action_is_drop`, `.`, `reason_is_queue_full`, `0`, `1717060658`},
+ typeSplitting: {`09:`, `17:`, `38.033366`, `▶`, `INFO`, ``, `route`, `ops`, `sending`, `to`, `dest`, `https:`, `//graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics:`, ``, `service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full`, `0`, `1717060658`},
+ },
+ },
+ {
+ name: "Consecutive splits points: equals followed by space",
+ line: `ts=2024-05-30T12:50:36.648377186Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`,
+ want: map[string][]string{
+ typePunctuation: {`ts`, `=`, `2024-05-30T12`, `:`, `50`, `:`, `36`, `.`, `648377186Z`, `caller`, `=`, `scheduler_processor`, `.`, `go`, `:`, `143`, `level`, `=`, `warn`, `msg`, `=`, `"`, `error`, `contacting`, `scheduler`, `"`, `err`, `=`, `"`, `rpc`, `error`, `:`, `code`, `=`, `Unavailable`, `desc`, `=`, `connection`, `error`, `:`, `desc`, `=`, `\`, `"`, `error`, `reading`, `server`, `preface`, `:`, `EOF`, `\`, `"`, `"`, `addr`, `=`, `10`, `.`, `0`, `.`, `151`, `.`, `101`, `:`, `9095`},
+ typeSplitting: {"ts=", "2024-05-30T12:50:36.648377186Z", "caller=", "scheduler_processor.go:143", "level=", "warn", "msg=", "\"error", "contacting", "scheduler\"", "err=", "\"rpc", "error:", "code", "=", ``, "Unavailable", "desc", "=", ``, "connection", "error:", "desc", "=", ``, `\"error`, "reading", "server", "preface:", `EOF\""`, "addr=", "10.0.151.101:9095"},
+ },
+ },
+ {
+ name: "Only punctation",
+ line: `!@£$%^&*()`,
+ want: map[string][]string{
+ typePunctuation: {`!`, `@`, `£$`, `%`, `^`, `&`, `*`, `(`, `)`},
+ typeSplitting: {`!@£$%^&*()`},
+ },
+ },
+}
+
+func TestTokenizer_Tokenize(t *testing.T) {
+ tests := []struct {
+ name string
+ tokenizer LineTokenizer
+ }{
{
- name: "Test with mixed delimiters, more : than =",
- line: "key1:value1 key2:value2 key3=value3",
- want: []string{"key1:", "value1", "key2:", "value2", "key3=value3"},
+ name: typePunctuation,
+ tokenizer: newPunctuationTokenizer(),
},
{
- name: "Dense json",
- line: `{"key1":"value1","key2":"value2","key3":"value3"}`,
- want: []string{`{"key1":`, `"value1","key2":`, `"value2","key3":`, `"value3"}`},
+ name: typeSplitting,
+ tokenizer: splittingTokenizer{},
},
+ }
+
+ for _, tt := range tests {
+ for _, tc := range testCases {
+ t.Run(tt.name+":"+tc.name, func(t *testing.T) {
+ got, _ := tt.tokenizer.Tokenize(tc.line)
+ require.Equal(t, tc.want[tt.name], got)
+ })
+ }
+ }
+}
+
+func TestTokenizer_TokenizeAndJoin(t *testing.T) {
+ tests := []struct {
+ name string
+ tokenizer LineTokenizer
+ }{
{
- name: "json with spaces",
- line: `{"key1":"value1", "key2":"value2", "key3":"value3"}`,
- want: []string{`{"key1":`, `"value1",`, `"key2":`, `"value2",`, `"key3":`, `"value3"}`},
+ name: typePunctuation,
+ tokenizer: newPunctuationTokenizer(),
},
{
- name: "logfmt multiword values",
- line: `key1=value1 key2=value2 msg="this is a message"`,
- want: []string{"key1=", "value1", "key2=", "value2", "msg=", `"this`, "is", "a", `message"`},
+ name: typeSplitting,
+ tokenizer: splittingTokenizer{},
},
}
for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := tokenizer.Tokenize(tt.line); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("splittingTokenizer.Tokenize() = %v, want %v", got, tt.want)
+ for _, tc := range testCases {
+ t.Run(tt.name+":"+tc.name, func(t *testing.T) {
+ got := tt.tokenizer.Join(tt.tokenizer.Tokenize(tc.line))
+ require.Equal(t, tc.line, got)
+ })
+ }
+ }
+}
+
+func BenchmarkSplittingTokenizer(b *testing.B) {
+ tokenizer := newPunctuationTokenizer()
+
+ for _, tt := range testCases {
+ tc := tt
+ b.Run(tc.name, func(b *testing.B) {
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ tokenizer.Tokenize(tc.line)
}
})
}
diff --git a/pkg/pattern/drain/log_cluster.go b/pkg/pattern/drain/log_cluster.go
index af5932d16f706..cffff3abe5215 100644
--- a/pkg/pattern/drain/log_cluster.go
+++ b/pkg/pattern/drain/log_cluster.go
@@ -11,16 +11,18 @@ import (
)
type LogCluster struct {
- id int
- Size int
- Tokens []string
- Stringer func([]string) string
- Chunks Chunks
+ id int
+ Size int
+ Tokens []string
+ TokenState interface{}
+ Stringer func([]string, interface{}) string
+
+ Chunks Chunks
}
func (c *LogCluster) String() string {
if c.Stringer != nil {
- return c.Stringer(c.Tokens)
+ return c.Stringer(c.Tokens, c.TokenState)
}
return strings.Join(c.Tokens, " ")
}
diff --git a/pkg/pattern/drain/testdata/grafana-ruler.txt b/pkg/pattern/drain/testdata/grafana-ruler.txt
new file mode 100644
index 0000000000000..54b6854d9e172
--- /dev/null
+++ b/pkg/pattern/drain/testdata/grafana-ruler.txt
@@ -0,0 +1,50000 @@
+logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:15.806655602Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+level=debug ts=2024-05-29T13:44:15.805113753Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance"
+logger=ngalert.state.manager.persist user=103548 slug=gen2 t=2024-05-29T13:44:15.805016017Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+level=debug ts=2024-05-29T13:44:15.804597912Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.802571162Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.801740193Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.800327814Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799945019Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799932951Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:15.799982989Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:15.798839218Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+logger=ngalert.state.manager.persist user=14927 slug=rstsoftware t=2024-05-29T13:44:15.798496844Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.340653ms
+level=debug ts=2024-05-29T13:44:15.797668756Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.797275166Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.796750449Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=84535 slug=arweave instance= t=2024-05-29T13:44:15.796640981Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=84535 slug=arweave t=2024-05-29T13:44:15.796542294Z level=debug msg="State manager processing evaluation results" resultCount=1
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=wcs9-tds-devus-jenkins-w, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-jenkins-w-6c6cb984d8-qrpm7, uid=d229ff35-bf4d-4bb5-8791-60b0a3bebca8" t=2024-05-29T13:44:15.796130498Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=vault, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65" t=2024-05-29T13:44:15.796062736Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=vault, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d" t=2024-05-29T13:44:15.795990925Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.795593051Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-web, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-web-6fc5b6f9c5-6spps, uid=b75b2425-e66c-4869-94f7-cfecc5d4c935" t=2024-05-29T13:44:15.795680228Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=630397 slug=tatin instance= t=2024-05-29T13:44:15.795542988Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-web, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthts-web-57f5b6f56b-bdmh9, uid=8f6b5224-94ce-4f5d-ba08-03f9fc2f572f" t=2024-05-29T13:44:15.795397351Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=102553 slug=flownative instance= t=2024-05-29T13:44:15.795103234Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.scheduler user=102553 slug=flownative version=1 fingerprint=4ad9e35be0f80ca3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.79499903Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.794695854s EvaluationString:}]" duration=116.038803ms
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthts-app-989d79dbb-lwc9p, uid=a6cfb6f8-edfe-4c28-8435-acb6d54f3599" t=2024-05-29T13:44:15.795068084Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-c59n4, uid=4d533dcf-4e6c-4ffe-a0fc-caa6e617c8c8" t=2024-05-29T13:44:15.794992842Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-4nw98, uid=855af10e-bb32-49c1-8a47-0fba814e437c" t=2024-05-29T13:44:15.794979122Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthts-app-989d79dbb-lwc9p, uid=a6cfb6f8-edfe-4c28-8435-acb6d54f3599" t=2024-05-29T13:44:15.794753977Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-4nw98, uid=855af10e-bb32-49c1-8a47-0fba814e437c" t=2024-05-29T13:44:15.794631294Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsqausauthts-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthts-utils-59f788556b-xrfpx, uid=d195032e-df70-4672-bc90-79692b1411af" t=2024-05-29T13:44:15.794322337Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsqalivets-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivets-utils-75b748978f-r2vkj, uid=1d39d0d7-d483-427b-ba91-45d897674698" t=2024-05-29T13:44:15.794284465Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsdevauthts-utils, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthts-utils-7f54f8d7b4-njddr, uid=352d7df2-7832-41f3-ad3e-cbe1a060c968" t=2024-05-29T13:44:15.793876757Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsdevauthts-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthts-utils-7f54f8d7b4-njddr, uid=352d7df2-7832-41f3-ad3e-cbe1a060c968" t=2024-05-29T13:44:15.793846886Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf" t=2024-05-29T13:44:15.793416796Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60" t=2024-05-29T13:44:15.793216421Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance="datasource_uid=p06gSxS7k, ref_id=A" t=2024-05-29T13:44:15.793080651Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:15.79304032Z level=debug msg="State manager processing evaluation results" resultCount=1
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833" t=2024-05-29T13:44:15.792980836Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227" t=2024-05-29T13:44:15.792956616Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833" t=2024-05-29T13:44:15.792793782Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager.persist user=412141 slug=sharethrough t=2024-05-29T13:44:15.79278731Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792775073Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792756002Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a" t=2024-05-29T13:44:15.79227249Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7" t=2024-05-29T13:44:15.791954212Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d" t=2024-05-29T13:44:15.791863631Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791738618Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c" t=2024-05-29T13:44:15.791660547Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791526073Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.791206493Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791456811Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.79134478Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.791225391Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791129917Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791114955Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791100679Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+level=debug ts=2024-05-29T13:44:15.791027617Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=15338 slug=rstsoftwarerc instance= t=2024-05-29T13:44:15.790951656Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7" t=2024-05-29T13:44:15.791010011Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=65a68c433031b4e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.790598463Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.78875161s EvaluationString:}]" duration=1.693079007s
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49" t=2024-05-29T13:44:15.790593572Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c" t=2024-05-29T13:44:15.790564871Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52" t=2024-05-29T13:44:15.790229164Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c" t=2024-05-29T13:44:15.790085591Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18" t=2024-05-29T13:44:15.79004016Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced" t=2024-05-29T13:44:15.789860646Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52" t=2024-05-29T13:44:15.78960996Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa" t=2024-05-29T13:44:15.789407005Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced" t=2024-05-29T13:44:15.789216261Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:15.789039986Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1
+logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn" t=2024-05-29T13:44:15.789011178Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed" t=2024-05-29T13:44:15.788904162Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788771442Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788761161Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788725479Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish" t=2024-05-29T13:44:15.788780219Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788701028Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788691799Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.78866505Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788646347Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788639897Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.7886198Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d" t=2024-05-29T13:44:15.78870732Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.7885482Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.78854173Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788522663Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788502704Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788472468Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788464205Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.78841334Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788374794Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788330559Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788320822Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788310995Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=473762 slug=intentiq t=2024-05-29T13:44:15.788261794Z level=debug msg="State manager processing evaluation results" resultCount=1
+logger=ngalert.scheduler user=473762 slug=intentiq version=35 fingerprint=0bc4b6f46a852420 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.788200731Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.787878355s EvaluationString:}]" duration=15.345212ms
+logger=ngalert.scheduler user=893151 slug=cmtdsnp version=1 fingerprint=0db5016ab8b43d15 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.781149137Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a7a0} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a800} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764303759s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a910} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a958} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76433916s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9aa70} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9ae60} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9aeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764355151s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9afa8} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9b008} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9b070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764373772s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc03af9b118} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc03af9b178} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc03af9b1e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764387162s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc030f5c0d0} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc030f5c160} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc030f5c1c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764408623s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c3e8} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c4f8} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764423283s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c680} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c6e8} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c5d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764439703s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c7d0} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c840} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c8a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764453224s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c9f8} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5ca80} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764468124s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ]} {Instance:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cba0} B:{Var:B Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cc08} C:{Var:C Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cb40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764487155s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ]} {Instance:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5ccd8} B:{Var:B Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cd40} C:{Var:C Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cda8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764503805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5cf88} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5ce50} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5cf38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764517896s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d158} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d1b0} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764531786s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5d2a8} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5d420} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5d480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764558377s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5d520} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5d578} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5d5c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764572477s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5d670} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5d6c8} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5d718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764585898s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5d7c0} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5d820} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5d920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764600318s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d9d0} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5da30} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5da80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764613268s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5db20} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5db80} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5dbd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764631609s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5dd70} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5ddc8} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5dc80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764645279s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5def8} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5df50} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5dfa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76465865s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc007df22f8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc007df2170} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc007df22a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76467379s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc007df2448} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc007df2498} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc007df24e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764687301s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc007df2b10} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc007df2ed8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc007df3318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764702761s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc007df3a90} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc007df3e50} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f66028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764718062s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f660c8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f66120} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f66178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764731882s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f66318} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f66368} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f662c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764763083s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f66410} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f66470} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f664c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764779893s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f66570} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f665c8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f66620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764794374s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f666c8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f66720} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f66778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764809464s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f66820} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f66878} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f668d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764827125s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f66970} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f669d0} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f66a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764844805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f66b40} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f66b90} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f66ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764859176s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f66ce8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f66c40} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f66c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764873126s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc039f66de8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc039f66e40} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc039f66d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764887547s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc039f66ef0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc039f66f48} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc039f66fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764901857s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc039f67050} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc039f670b0} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc039f67108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764914537s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc039f671b0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc039f67208} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc039f67260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764928968s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f673b8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f67308} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f67360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764945518s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f674b0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f67510} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f67460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764957669s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f675c0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f67610} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f67670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764971009s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f67720} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f67780} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f677d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764983579s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f67878} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f678c8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f67928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76499886s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f679c8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f67a20} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f67a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76501074s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f67b20} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f67b78} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f67bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765025851s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f67d20} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f67c78} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f67cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765039801s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f67dc0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f67e18} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f67e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765053501s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f67f70} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f67fc8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f67f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765066942s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc03d388070} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc03d3880c8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc03d388120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765097643s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ]} {Instance:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388290} B:{Var:B Labels:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d3881d8} C:{Var:C Labels:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765112593s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ]} {Instance:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388340} B:{Var:B Labels:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388450} C:{Var:C Labels:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765126413s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d3886e0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d388798} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d388688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765142264s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d3888a8} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d388908} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d388960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765158244s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d388c28} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d388a68} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d388b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765171556s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d388cd0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d388d30} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d388d88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765184626s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d388e88} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d388ee0} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d388e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765197737s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389038} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389098} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d3890f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765212627s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d3892f8} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d389240} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d3892a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765227147s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d3893f0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d389458} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d3894b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765246268s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d389600} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d389660} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d389720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765262588s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d3898f8} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d389950} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d389898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765575208s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d389a40} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d389ac0} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d389b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765607309s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389cb0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389d00} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389c58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765621829s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c000} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc03d389ea0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc03d389f58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765635919s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c120} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c180} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c0c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76564905s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c278} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c2d0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76566618s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17c378} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17c3d0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17c420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76599224s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17c510} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17c570} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17c4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76601021s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c618} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c668} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766028111s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c808} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c760} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c7b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766043021s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c900} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c958} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766056622s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17caa8} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17ca00} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17ca50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766070182s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17cb50} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17cba0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17cbf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766087283s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17ccc0} B:{Var:B Labels:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cd28} C:{Var:C Labels:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cd90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766100093s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cf60} B:{Var:B Labels:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17ce48} C:{Var:C Labels:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766115693s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d100} B:{Var:B Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d030} C:{Var:C Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766129774s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ]} {Instance:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d228} B:{Var:B Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d288} C:{Var:C Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766143314s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ]} {Instance:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d350} B:{Var:B Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d3a8} C:{Var:C Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766158405s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d518} B:{Var:B Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d578} C:{Var:C Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d4b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766172735s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d638} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d688} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766187725s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d7f0} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d860} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d8c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766203076s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d9d8} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17da38} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766216616s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17db60} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17dbb8} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17db00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766229717s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dcc8} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dd20} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dc70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766244637s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc01b17de60} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc01b17dec0} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc01b17de10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766258198s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dfd0} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc007bfa558} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17df70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766275218s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc007bfa978} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc007bfa9e8} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc007bfaa50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766289788s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ]} {Instance:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfacc0} B:{Var:B Labels:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb200} C:{Var:C Labels:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766302519s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb750} B:{Var:B Labels:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb7c8} C:{Var:C Labels:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766315829s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc007bfb9b8} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc007bfba20} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc007bfbaa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76632805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc007bfbc70} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514000} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76634059s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514340} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc0175143b0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76635318s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514548} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc0175145c0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766368291s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017514728} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017514790} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc0175147f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766381841s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc017514918} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc0175149c8} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc0175148b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766396332s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514c70} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514ab8} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514bd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766409542s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514d38} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514da0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514e18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766422132s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc0175150d8} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514f48} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766435853s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc0175151a8} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017515208} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017515278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766450273s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515370} B:{Var:B Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc0175153e0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766463784s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515558} B:{Var:B Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515610} C:{Var:C Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766478724s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ]} {Instance:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515980} B:{Var:B Labels:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515850} C:{Var:C Labels:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766493104s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ]} {Instance:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515b08} B:{Var:B Labels:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515b80} C:{Var:C Labels:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766505865s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc017515c60} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc017515d48} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc017515e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766518805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc017515f80} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc017515fe0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc017515ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766531756s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010050190} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc0100501e0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010050238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766543886s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc0100502f8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010050700} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010050758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766557896s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010050850} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc0100508b0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010050800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766571407s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc010050958} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc0100509b0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc010051210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766585347s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc0100512b8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc010051310} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc010051368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766601518s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010051468} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc0100514b8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010051510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766614698s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc0100515b0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010051600} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010051658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766630558s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc0100517d0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010051730} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010051780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766647769s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc010051c50} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc010051cb0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc010051d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766662059s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a7540c0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754110} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.7666789s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754270} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a7541d0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76669259s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754310} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754368} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a7543b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766705791s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754458} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a7544a8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a7544f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766721281s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc02a754600} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc02a754650} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc02a7545b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766735071s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754768} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a7547c0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766748572s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754870} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a7548e8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766771933s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a7549e0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754a38} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766785663s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754b50} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754ba8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766821454s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a754cd8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a754d40} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a754db8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766832464s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a754eb0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a754f10} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a754f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766844145s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755090} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a7550f8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766856295s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a7551c0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755220} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766868135s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755340} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a7553a0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766879746s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a7554c8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a755540} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a7555a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766893946s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a755740} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a755670} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a7556d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766908177s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a7558f0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a755818} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a755880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766922317s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755a80} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a7559c0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755a20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766935097s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755b50} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755bc0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766947418s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755d00} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755d60} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766960678s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a755e80} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-me
+level=debug ts=2024-05-29T13:44:15.788087186Z caller=remote_instance_store.go:51 user=151289 slug=everflow msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.786018318Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError
+logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305715867392, instance=https://api.shine.fr/v2/referrals/liveness_check, job=Liveness Check referrals-v2, probe=Amsterdam" t=2024-05-29T13:44:15.785579974Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-replica-service-7b4df8ff7f-k4mzn" t=2024-05-29T13:44:15.785573489Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-replica-service-7b4df8ff7f-h8r69" t=2024-05-29T13:44:15.785527862Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-replica-service-7b4df8ff7f-9gbcz" t=2024-05-29T13:44:15.785475171Z level=debug msg="Keeping state" state=Normal
+level=error ts=2024-05-29T13:44:15.785177173Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found"
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-import-service-5c95f8f985-fdzbq" t=2024-05-29T13:44:15.785285397Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-forecast-service-75f5ddb88d-vprqs" t=2024-05-29T13:44:15.785102836Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-forecast-service-75f5ddb88d-nwfg2" t=2024-05-29T13:44:15.784965233Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-export-service-66dbcf8f5b-jrc4g" t=2024-05-29T13:44:15.784820075Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.784694596Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.784612816Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.784474628Z level=debug msg="State manager processing evaluation results" resultCount=1
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-budget-service-5d9b6c54f8-wjf9k" t=2024-05-29T13:44:15.784299856Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-budget-service-5d9b6c54f8-wjf9k" t=2024-05-29T13:44:15.784287937Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.784172425Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=budget-gateway-service-5bf9899ddb-4hj4v" t=2024-05-29T13:44:15.783938316Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=396586 slug=opengov t=2024-05-29T13:44:15.783836757Z level=debug msg="State manager processing evaluation results" resultCount=24
+level=debug ts=2024-05-29T13:44:15.783603379Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.783596503Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance"
+logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.78346786Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1
+logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.783454581Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.78344714Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.783417446Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.783407599Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.scheduler user=163513 slug=dialpad version=35 fingerprint=c5a97915aa68b6b4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.783300388Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.782775149s EvaluationString:}]" duration=52.906714ms
+level=debug ts=2024-05-29T13:44:15.782165884Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance"
+logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.78157326Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.68816ms
+logger=ngalert.state.manager user=765158 slug=stellarmenus instance="__name__=up, instance=grafana-prod, job=Step Functions" t=2024-05-29T13:44:15.781423863Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.780992116Z level=debug msg="State manager processing evaluation results" resultCount=1
+logger=ngalert.scheduler user=312340 slug=lakefs version=100 fingerprint=78b1f02b6c94c6b4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.78084665Z level=debug msg="Alert rule evaluated" results="[{Instance:TableName=control-plane-v2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=control-plane-v2 Value:0xc017202a68} C:{Var:C Labels:TableName=control-plane-v2 Value:0xc017202a60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.780331055s EvaluationString:[ var='B' labels={TableName=control-plane-v2} value=0 ], [ var='C' labels={TableName=control-plane-v2} value=0 ]}]" duration=44.56025ms
+logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.780918271Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.50355ms
+level=debug ts=2024-05-29T13:44:15.780526613Z caller=remote_instance_store.go:51 user=756904 slug=orbdatanfr msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.779528076Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.778315273Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.777510495Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=237629 slug=ocrolus t=2024-05-29T13:44:15.777076516Z level=debug msg="State manager processing evaluation results" resultCount=1
+logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.775967332Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.075377ms
+logger=ngalert.state.manager.persist user=656158 slug=muonspacegroundprod t=2024-05-29T13:44:15.775731596Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=ssd_used" previous_handler=resultNoData t=2024-05-29T13:44:15.775712504Z level=debug msg="Execution keep last state is Normal" handler=resultNormal
+logger=ngalert.state.manager user=806229 slug=simplisafe instance="host=ip-10-91-5-100.us-west-2.compute.internal" t=2024-05-29T13:44:15.77284151Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=14927 slug=rstsoftware instance= t=2024-05-29T13:44:15.771474347Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770768917Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770729015Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770622945Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770613173Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770598955Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770579081Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770571348Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770550889Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770387812Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770365274Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770292505Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770271125Z level=debug msg="Setting next state" handler=resultNoData
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770231852Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770221589Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770163201Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.scheduler user=206107 slug=hydrolix version=3 fingerprint=4ecfee11a8a54653 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.7700428Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdeyrm9s020owb, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.769733229s EvaluationString:}]" duration=123.571101ms
+level=info ts=2024-05-29T13:44:15.769608655Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhsq1zf0gsle alerts=1
+logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.769599016Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.924338ms
+logger=ngalert.scheduler user=404375 slug=cbeanalytics version=2 fingerprint=ccf0a14cbed23fee attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.76775728Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.767411307s EvaluationString:}]" duration=16.800948ms
+level=info ts=2024-05-29T13:44:15.767623342Z caller=grafana.go:247 user=396586 slug=opengov msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=40 alerts=0
+logger=ngalert.scheduler user=491157 slug=prd01wr version=2 fingerprint=165f2fee356ad8f8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.767433563Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76711132s EvaluationString:}]" duration=21.075989ms
+level=debug ts=2024-05-29T13:44:15.767074048Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.766874668Z level=debug msg="Keeping state" state=Normal
+level=error ts=2024-05-29T13:44:15.766358493Z caller=remote_rule_evaluator.go:110 user=432323 slug=lithic msg="remote evaluate failed" code=Code(422) err="failed to parse expression 'B': reduction avg not implemented"
+level=debug ts=2024-05-29T13:44:15.765364182Z caller=remote_instance_store.go:51 user=381989 slug=vanoordacf msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=328755 slug=infogrideu instance="ServiceName=sensor-planning-api" t=2024-05-29T13:44:15.763868946Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.764331698Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance"
+logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.764288258Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+logger=ngalert.state.manager user=453308 slug=hyperzodprod instance= t=2024-05-29T13:44:15.763944033Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=453308 slug=hyperzodprod instance= t=2024-05-29T13:44:15.763924718Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.763473101Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.762876862Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.762989195Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.762895982Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.762846442Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError
+logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.762835984Z level=debug msg="Setting next state" handler=resultError
+level=debug ts=2024-05-29T13:44:15.76276045Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.761866075Z caller=remote_instance_store.go:51 user=882448 slug=bookbookspace1 msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.761779898Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=446790 slug=empowereco instance="instance=stargaze" t=2024-05-29T13:44:15.761521942Z level=debug msg="Setting next state" handler=resultNormal
+logger=ngalert.state.manager user=446790 slug=empowereco instance="instance=jackal" t=2024-05-29T13:44:15.761415347Z level=debug msg="Keeping state" state=Normal
+logger=ngalert.state.manager user=446790 slug=empowereco instance="instance=jackal" t=2024-05-29T13:44:15.761399917Z level=debug msg="Setting next state" handler=resultNormal
+level=debug ts=2024-05-29T13:44:15.761178995Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.759196238Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.759118674Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.758652888Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.758263356Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z
+level=debug ts=2024-05-29T13:44:15.757924854Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.757443058Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance"
+logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdg5sm3oacbnkc, ref_id=A" t=2024-05-29T13:44:15.756384898Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData
+logger=ngalert.scheduler user=206107 slug=hydrolix version=3 fingerprint=f72cb230217f9c02 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.756213335Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdg5sm3oacbnkc, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.755874102s EvaluationString:}]" duration=54.049352ms
+logger=ngalert.state.manager.persist user=328755 slug=infogrideu t=2024-05-29T13:44:15.755491937Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.755393099Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1
+level=debug ts=2024-05-29T13:44:15.753795376Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance"
+level=debug ts=2024-05-29T13:44:15.753194967Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance"
+level=info ts=2024-05-29T13:44:15.753056343Z caller=grafana.go:247 user=884866 slug=cnonumerique msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=pending&state=error" groups=10 alerts=0
+logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.751862342Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1
+logger=ngalert.state.manager user=698963 slug=lemonade instance="app=home-risk, pod=home-risk-668d54b448-jfx7r" t=2024-05-29T13:44:15.75185027Z level=debug msg="Keeping state" state=Normal
+Error parsing panelUID for alert annotationruleID433dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=698963 slug=lemonade version=5 fingerprint=b5b925e753db6a58 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.751653578Z level=debug msg="Alert rule evaluated" results="[{Instance:app=home-risk, pod=home-risk-668d54b448-f4hll State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=home-risk, pod=home-risk-668d54b448-f4hll Value:0xc036c6dfc0} THRESHOLD:{Var:THRESHOLD Labels:app=home-risk, pod=home-risk-668d54b448-f4hll Value:0xc036c6df80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.751349421s EvaluationString:[ var='QUERY' labels={app=home-risk, pod=home-risk-668d54b448-f4hll} value=0 ], [ var='THRESHOLD' labels={app=home-risk, pod=home-risk-668d54b448-f4hll} value=0 ]} {Instance:app=home-risk, pod=home-risk-668d54b448-jfx7r State:Normal Error: