From 17c0b332b5dc8516d1f5e8fca78d5bc740143a80 Mon Sep 17 00:00:00 2001 From: Olga Naidjonoka Date: Mon, 21 Oct 2024 12:05:33 +0300 Subject: [PATCH] added servrless tests step --- .buildkite/hooks/pre-command | 4 +- .../x-pack/pipeline.xpack.agentbeat.yml | 205 +- dev-tools/mage/agentbeat-serverless.go | 52 + dev-tools/mage/gotest.go | 27 + dev-tools/mage/spec.go | 100 - .../srvrlesstest/component/platforms.go | 162 + .../target/srvrlesstest/core/process/cmd.go | 36 + .../srvrlesstest/core/process/cmd_darwin.go | 54 + .../srvrlesstest/core/process/cmd_linux.go | 57 + .../core/process/external_unix.go | 29 + .../core/process/external_windows.go | 52 + .../srvrlesstest/core/process/job_unix.go | 39 + .../srvrlesstest/core/process/job_windows.go | 87 + .../srvrlesstest/core/process/process.go | 184 + .../mage/target/srvrlesstest/define/batch.go | 307 + .../srvrlesstest/define/requirements.go | 166 + .../mage/target/srvrlesstest/srvrlesstest.go | 428 + .../srvrlesstest/testing/common/batch.go | 19 + .../srvrlesstest/testing/common/build.go | 19 + .../srvrlesstest/testing/common/config.go | 134 + .../srvrlesstest/testing/common/instance.go | 66 + .../srvrlesstest/testing/common/logger.go | 11 + .../testing/common/prefix_output.go | 61 + .../srvrlesstest/testing/common/runner.go | 44 + .../srvrlesstest/testing/common/stack.go | 76 + .../srvrlesstest/testing/common/supported.go | 15 + .../srvrlesstest/testing/define/batch.go | 307 + .../testing/define/requirements.go | 167 + .../target/srvrlesstest/testing/ess/client.go | 66 + .../target/srvrlesstest/testing/ess/config.go | 73 + .../create_deployment_csp_configuration.yaml | 15 + .../ess/create_deployment_request.tmpl.json | 102 + .../srvrlesstest/testing/ess/deployment.go | 388 + .../srvrlesstest/testing/ess/serverless.go | 318 + .../testing/ess/serverless_provisioner.go | 262 + .../testing/ess/statful_provisioner.go | 188 + .../target/srvrlesstest/testing/fetcher.go | 243 + .../srvrlesstest/testing/kubernetes/image.go | 245 + .../testing/kubernetes/kind/provisioner.go | 285 + .../srvrlesstest/testing/kubernetes/runner.go | 122 + .../testing/kubernetes/supported.go | 104 + .../srvrlesstest/testing/linux/debian.go | 206 + .../srvrlesstest/testing/linux/linux.go | 156 + .../target/srvrlesstest/testing/linux/rhel.go | 113 + .../mage/target/srvrlesstest/testing/log.go | 144 + .../testing/multipas/provisioner.go | 317 + .../target/srvrlesstest/testing/ogc/api.go | 47 + .../target/srvrlesstest/testing/ogc/config.go | 87 + .../srvrlesstest/testing/ogc/provisioner.go | 341 + .../srvrlesstest/testing/ogc/supported.go | 189 + .../srvrlesstest/testing/runner/archiver.go | 112 + .../srvrlesstest/testing/runner/json.go | 47 + .../srvrlesstest/testing/runner/junit.go | 86 + .../srvrlesstest/testing/runner/runner.go | 955 ++ .../srvrlesstest/testing/runner/utils.go | 41 + .../target/srvrlesstest/testing/ssh/client.go | 288 + .../target/srvrlesstest/testing/ssh/file.go | 19 + .../srvrlesstest/testing/ssh/interface.go | 49 + .../target/srvrlesstest/testing/ssh/keys.go | 47 + .../srvrlesstest/testing/supported/batch.go | 182 + .../testing/supported/supported.go | 274 + .../srvrlesstest/testing/windows/windows.go | 329 + .../target/srvrlesstest/utils/root_unix.go | 20 + .../target/srvrlesstest/utils/root_windows.go | 46 + .../srvrlesstest/utils/root_windows_test.go | 20 + go.mod | 23 +- go.sum | 23 + x-pack/agentbeat/magefile.go | 82 +- x-pack/filebeat/filebeat.reference.yml | 9400 ++++++++--------- 69 files changed, 13964 insertions(+), 4998 deletions(-) create mode 100644 dev-tools/mage/agentbeat-serverless.go delete mode 100644 dev-tools/mage/spec.go create mode 100644 dev-tools/mage/target/srvrlesstest/component/platforms.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/cmd.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/cmd_darwin.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/cmd_linux.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/external_unix.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/external_windows.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/job_unix.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/job_windows.go create mode 100644 dev-tools/mage/target/srvrlesstest/core/process/process.go create mode 100644 dev-tools/mage/target/srvrlesstest/define/batch.go create mode 100644 dev-tools/mage/target/srvrlesstest/define/requirements.go create mode 100644 dev-tools/mage/target/srvrlesstest/srvrlesstest.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/batch.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/build.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/config.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/instance.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/logger.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/prefix_output.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/runner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/stack.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/common/supported.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/define/batch.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/define/requirements.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/client.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/config.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_csp_configuration.yaml create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_request.tmpl.json create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/deployment.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/serverless.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/serverless_provisioner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ess/statful_provisioner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/fetcher.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/kubernetes/image.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind/provisioner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/kubernetes/runner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/kubernetes/supported.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/linux/debian.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/linux/linux.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/linux/rhel.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/log.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/multipas/provisioner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ogc/api.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ogc/config.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ogc/provisioner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ogc/supported.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/runner/archiver.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/runner/json.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/runner/junit.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/runner/runner.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/runner/utils.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ssh/client.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ssh/file.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ssh/interface.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/ssh/keys.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/supported/batch.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/supported/supported.go create mode 100644 dev-tools/mage/target/srvrlesstest/testing/windows/windows.go create mode 100644 dev-tools/mage/target/srvrlesstest/utils/root_unix.go create mode 100644 dev-tools/mage/target/srvrlesstest/utils/root_windows.go create mode 100644 dev-tools/mage/target/srvrlesstest/utils/root_windows_test.go diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 5cb0722edd84..884c413cdb56 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -21,8 +21,8 @@ fi if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-agentbeat" && "$BUILDKITE_STEP_KEY" == *"agentbeat-it"* ]]; then out=$(.buildkite/scripts/agentbeat/setup_agentbeat.py) echo "$out" - AGENTBEAT_PATH=$(echo "$out" | tail -n 1) - export AGENTBEAT_PATH + AGENT_BUILD_DIR=$(echo "$out" | tail -n 1) + export AGENT_BUILD_DIR fi if [[ "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" || \ diff --git a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml index 1687aa25d922..221ba66aef49 100644 --- a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml @@ -1,43 +1,31 @@ env: ASDF_MAGE_VERSION: 1.15.0 - ASDF_NODEJS_VERSION: 18.17.1 - GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" - AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" - AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" - - IMAGE_MACOS_ARM: "generic-13-ventura-arm" - IMAGE_MACOS_X86_64: "generic-13-ventura-x64" - - IMAGE_WIN_2022: "family/platform-ingest-beats-windows-2022" - IMAGE_BEATS_WITH_HOOKS_LATEST: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-beats-ci-with-hooks:latest" - AGENTBEAT_SPEC: "./agentbeat.spec.yml" - steps: - - group: "Check/Update" - key: "x-pack-agentbeat-check-update" - - steps: - - label: "agentbeat: Run pre-commit" - command: "pre-commit run --all-files" - agents: - image: "${IMAGE_BEATS_WITH_HOOKS_LATEST}" - memory: "2Gi" - useCustomGlobalHooks: true - notify: - - github_commit_status: - context: "agentbeat: pre-commit" - - - wait: ~ - # with PRs, we want to run mandatory tests only if check/update step succeed - # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests - # this allows building DRA artifacts even if there is flakiness in check/update step - if: build.env("BUILDKITE_PULL_REQUEST") != "false" - depends_on: "x-pack-agentbeat-check-update" +# - group: "Check/Update" +# key: "x-pack-agentbeat-check-update" +# +# steps: +# - label: "agentbeat: Run pre-commit" +# command: "pre-commit run --all-files" +# agents: +# image: "${IMAGE_BEATS_WITH_HOOKS_LATEST}" +# memory: "2Gi" +# useCustomGlobalHooks: true +# notify: +# - github_commit_status: +# context: "agentbeat: pre-commit" +# +# - wait: ~ +# # with PRs, we want to run mandatory tests only if check/update step succeed +# # for other cases, e.g. merge commits, we want to run mundatory test (and publish) independently of other tests +# # this allows building DRA artifacts even if there is flakiness in check/update step +# if: build.env("BUILDKITE_PULL_REQUEST") != "false" +# depends_on: "x-pack-agentbeat-check-update" - group: "Agentbeat tests" key: "agentbeat-mandatory-tests" @@ -47,6 +35,7 @@ steps: key: "agentbeat-package-linux" env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + PACKAGES: tar.gz,zip SNAPSHOT: true command: | set -euo pipefail @@ -70,93 +59,87 @@ steps: - github_commit_status: context: "agentbeat: Packaging" - - label: ":ubuntu: x-pack/agentbeat: Ubuntu x86_64 Spec tests" - key: "agentbeat-it-linux-x86-64" - env: - PLATFORM: "linux/amd64" +# - label: ":linux: Agentbeat/Integration tests Linux" +# key: "agentbeat-it-linux" +# depends_on: +# - agentbeat-package-linux +# env: +# ASDF_NODEJS_VERSION: 18.17.1 +# PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" +# SNAPSHOT: true +# command: | +# set -euo pipefail +# echo "~~~ Downloading artifacts" +# buildkite-agent artifact download x-pack/agentbeat/build/distributions/** . --step 'agentbeat-package-linux' +# ls -lah x-pack/agentbeat/build/distributions/ +# echo "~~~ Installing @elastic/synthetics with npm" +# npm install -g @elastic/synthetics +# echo "~~~ Running tests" +# cd x-pack/agentbeat +# mage goIntegTest +# artifact_paths: +# - x-pack/agentbeat/build/distributions/**/* +# - "x-pack/agentbeat/build/*.xml" +# - "x-pack/agentbeat/build/*.json" +# plugins: +# - test-collector#v1.10.2: +# files: "x-pack/agentbeat/build/TEST-*.xml" +# format: "junit" +# branches: "main" +# debug: true +# retry: +# automatic: +# - limit: 1 +# timeout_in_minutes: 60 +# agents: +# provider: "gcp" +# image: "${IMAGE_UBUNTU_X86_64}" +# machineType: "${GCP_HI_PERF_MACHINE_TYPE}" +# disk_size: 100 +# disk_type: "pd-ssd" +# notify: +# - github_commit_status: +# context: "agentbeat: Integration tests" + + - group: "Agentbeat: Servelress Tests" + key: "agentbeat-serverless-tests" + + steps: + - label: ":ubuntu: Serverless tests" + key: "agentbeat-it-serverless" depends_on: - agentbeat-package-linux + env: + AGENT_STACK_VERSION: "8.16.0-SNAPSHOT" + TEST_INTEG_AUTH_GCP_DATACENTER: "us-central1-a" + GOFLAGS: "-buildvcs=false" + TEST_INTEG_CLEAN_ON_EXIT: true + TEST_PLATFORMS: "linux/amd64" + SNAPSHOT: true command: | cd x-pack/agentbeat - mage -v testWithSpec + mage serverlessTest + artifact_paths: + - x-pack/agentbeat/build/TEST-** + - x-pack/agentbeat/build/diagnostics/* + plugins: + - test-collector#v1.10.2: + files: "x-pack/agentbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + retry: + automatic: + - limit: 1 + timeout_in_minutes: 60 + concurrency_group: elastic-agent-extended-testing/serverless-integration + concurrency: 8 agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" - machineType: "${GCP_HI_PERF_MACHINE_TYPE}" + machineType: "${GCP_STANDARD_MACHINE_TYPE}" disk_size: 100 disk_type: "pd-ssd" notify: - github_commit_status: - context: "agentbeat: Ubuntu x86_64 Spec tests" - - - label: ":ubuntu: x-pack/agentbeat: Ubuntu arm64 Spec tests" - key: "agentbeat-it-linux-arm64" - env: - PLATFORM: "linux/arm64" - depends_on: - - agentbeat-package-linux - command: | - cd x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" - instanceType: "${AWS_ARM_INSTANCE_TYPE}" - notify: - - github_commit_status: - context: "agentbeat: Ubuntu arm64 Spec tests" - - - label: ":windows: x-pack/agentbeat: Windows x86_64 Spec tests" - key: "agentbeat-it-windows" - env: - PLATFORM: "windows/amd64" - depends_on: - - agentbeat-package-linux - command: | - Set-Location -Path x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "gcp" - image: "${IMAGE_WIN_2022}" - machine_type: "${GCP_WIN_MACHINE_TYPE}" - disk_size: 200 - disk_type: "pd-ssd" - notify: - - github_commit_status: - context: "agentbeat: Windows x86_64 Spec tests" - - - label: ":macos: x-pack/agentbeat: macOS x86_64 Spec tests" - key: "agentbeat-it-macos-x86-64" - depends_on: - - agentbeat-package-linux - env: - PLATFORM: "darwin/amd64" - command: | - set -euo pipefail - source .buildkite/scripts/install_macos_tools.sh - cd x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "orka" - imagePrefix: "${IMAGE_MACOS_X86_64}" - notify: - - github_commit_status: - context: "agentbeat: macOS x86_64 Spec tests" - - - label: ":macos: x-pack/agentbeat: macOS arm64 Spec tests" - key: "agentbeat-it-macos-arm64" - depends_on: - - agentbeat-package-linux - env: - PLATFORM: "darwin/arm64" - command: | - set -euo pipefail - source .buildkite/scripts/install_macos_tools.sh - cd x-pack/agentbeat - mage -v testWithSpec - agents: - provider: "orka" - imagePrefix: "${IMAGE_MACOS_ARM}" - notify: - - github_commit_status: - context: "agentbeat: macOS arm64 Spec tests" + context: "agentbeat: Serverless tests" diff --git a/dev-tools/mage/agentbeat-serverless.go b/dev-tools/mage/agentbeat-serverless.go new file mode 100644 index 000000000000..d31ffc490be1 --- /dev/null +++ b/dev-tools/mage/agentbeat-serverless.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "log" + "os" +) + +// TestBeatServerless todo description +func TestBeatServerless(beat string) { + if os.Getenv("AGENT_BUILD_DIR") == "" { + log.Fatal("AGENT_BUILD_DIR is not defined") + } + + setStackProvisioner() + setTestBinaryName(beat) + +} + +func setStackProvisioner() { + stackProvisioner := os.Getenv("STACK_PROVISIONER") + if stackProvisioner == "" { + if err := os.Setenv("STACK_PROVISIONER", "serverless"); err != nil { + log.Fatal("error setting serverless stack var: %w", err) + } + } else if stackProvisioner == "stateful" { + fmt.Println("--- Warning: running TestBeatServerless as stateful") + } +} + +func setTestBinaryName(beat string) { + if err := os.Setenv("TEST_BINARY_NAME", beat); err != nil { + log.Fatal("error setting binary name: %w", err) + } +} diff --git a/dev-tools/mage/gotest.go b/dev-tools/mage/gotest.go index ecc8f277b941..efed67fdea6b 100644 --- a/dev-tools/mage/gotest.go +++ b/dev-tools/mage/gotest.go @@ -428,3 +428,30 @@ func BuildSystemTestGoBinary(binArgs TestBinaryArgs) error { }() return sh.RunV("go", args...) } + +func GoTestBuild(ctx context.Context, params GoTestArgs) error { + if params.OutputFile == "" { + return fmt.Errorf("missing output file") + } + + fmt.Println(">> go test:", params.TestName, "Building Test Binary") + + args := []string{"test", "-c", "-o", params.OutputFile} + + if len(params.Tags) > 0 { + params := strings.Join(params.Tags, " ") + if params != "" { + args = append(args, "-tags", params) + } + } + + args = append(args, params.Packages...) + + goTestBuild := makeCommand(ctx, params.Env, "go", args...) + + err := goTestBuild.Run() + if err != nil { + return err + } + return nil +} diff --git a/dev-tools/mage/spec.go b/dev-tools/mage/spec.go deleted file mode 100644 index 03c733f1dd6d..000000000000 --- a/dev-tools/mage/spec.go +++ /dev/null @@ -1,100 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package mage - -import ( - "gopkg.in/yaml.v2" - - "log" - "os" - "strings" -) - -type spec struct { - Inputs []input -} - -type input struct { - Name string - Description string - Platforms []string - Command command -} - -func (i *input) GetCommand() string { - return strings.Join(i.Command.Args, " ") -} - -type command struct { - Name string - Args []string -} - -// SpecCommands parses agent.beat.spec.yml and collects commands for tests -func SpecCommands(specPath string, platform string) []string { - spec, _ := parseToObj(specPath) - - filteredInputs := filter(spec.Inputs, func(input input) bool { - return contains(input.Platforms, platform) - }) - - commands := make(map[string]interface{}) - for _, i := range filteredInputs { - commands[i.GetCommand()] = nil - } - keys := make([]string, 0, len(commands)) - for k := range commands { - keys = append(keys, k) - } - - return keys -} - -func parseToObj(path string) (spec, error) { - specFile, err := os.ReadFile(path) - if err != nil { - log.Fatalf("Error opening agentbeat.spec.yml: %v", err) - return spec{}, err - } - var spec spec - err = yaml.Unmarshal(specFile, &spec) - if err != nil { - log.Fatalf("Error parsing agentbeat.spec.yml: %v", err) - return spec, err - } - return spec, nil -} - -func filter[T any](slice []T, condition func(T) bool) []T { - var result []T - for _, v := range slice { - if condition(v) { - result = append(result, v) - } - } - return result -} - -func contains(slice []string, item string) bool { - for _, v := range slice { - if v == item { - return true - } - } - return false -} diff --git a/dev-tools/mage/target/srvrlesstest/component/platforms.go b/dev-tools/mage/target/srvrlesstest/component/platforms.go new file mode 100644 index 000000000000..d7523d95e9c2 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/component/platforms.go @@ -0,0 +1,162 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package component + +import ( + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/utils" + goruntime "runtime" + "strings" + + "github.com/elastic/go-sysinfo" +) + +const ( + // Container represents running inside a container + Container = "container" + // Darwin represents running on Mac OSX + Darwin = "darwin" + // Linux represents running on Linux + Linux = "linux" + // Windows represents running on Windows + Windows = "windows" +) + +const ( + // AMD64 represents the amd64 architecture + AMD64 = "amd64" + // ARM64 represents the arm64 architecture + ARM64 = "arm64" +) + +// Platform defines the platform that a component can support +type Platform struct { + OS string + Arch string + GOOS string +} + +// Platforms is an array of platforms. +type Platforms []Platform + +// GlobalPlatforms defines the platforms that a component can support +var GlobalPlatforms = Platforms{ + { + OS: Container, + Arch: AMD64, + GOOS: Linux, + }, + { + OS: Container, + Arch: ARM64, + GOOS: Linux, + }, + { + OS: Darwin, + Arch: AMD64, + GOOS: Darwin, + }, + { + OS: Darwin, + Arch: ARM64, + GOOS: Darwin, + }, + { + OS: Linux, + Arch: AMD64, + GOOS: Linux, + }, + { + OS: Linux, + Arch: ARM64, + GOOS: Linux, + }, + { + OS: Windows, + Arch: AMD64, + GOOS: Windows, + }, +} + +// String returns the platform string identifier. +func (p *Platform) String() string { + return fmt.Sprintf("%s/%s", p.OS, p.Arch) +} + +// Exists returns true if the +func (p Platforms) Exists(platform string) bool { + pieces := strings.SplitN(platform, "/", 2) + if len(pieces) != 2 { + return false + } + for _, platform := range p { + if platform.OS == pieces[0] && platform.Arch == pieces[1] { + return true + } + } + return false +} + +// UserDetail provides user specific information on the running platform. +type UserDetail struct { + Root bool +} + +// PlatformDetail is platform that has more detail information about the running platform. +type PlatformDetail struct { + Platform + + NativeArch string + Family string + Major int + Minor int + + User UserDetail +} + +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail PlatformDetail) PlatformDetail + +// LoadPlatformDetail loads the platform details for the current system. +func LoadPlatformDetail(modifiers ...PlatformModifier) (PlatformDetail, error) { + hasRoot, err := utils.HasRoot() + if err != nil { + return PlatformDetail{}, err + } + info, err := sysinfo.Host() + if err != nil { + return PlatformDetail{}, err + } + os := info.Info().OS + nativeArch := info.Info().NativeArchitecture + if nativeArch == "x86_64" { + // go-sysinfo Architecture and NativeArchitecture prefer x64_64 + // but GOARCH prefers amd64 + nativeArch = "amd64" + } + if nativeArch == "aarch64" { + // go-sysinfo Architecture and NativeArchitecture prefer aarch64 + // but GOARCH prefers arm64 + nativeArch = "arm64" + } + detail := PlatformDetail{ + Platform: Platform{ + OS: goruntime.GOOS, + Arch: goruntime.GOARCH, + GOOS: goruntime.GOOS, + }, + NativeArch: nativeArch, + Family: os.Family, + Major: os.Major, + Minor: os.Minor, + User: UserDetail{ + Root: hasRoot, + }, + } + for _, modifier := range modifiers { + detail = modifier(detail) + } + return detail, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/cmd.go b/dev-tools/mage/target/srvrlesstest/core/process/cmd.go new file mode 100644 index 000000000000..5df2d164037f --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/cmd.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build !linux && !darwin + +package process + +import ( + "context" + "os" + "os/exec" + "path/filepath" +) + +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { + var cmd *exec.Cmd + if ctx == nil { + cmd = exec.Command(path, arg...) + } else { + cmd = exec.CommandContext(ctx, path, arg...) + } + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Dir = filepath.Dir(path) + + return cmd, nil +} + +func killCmd(proc *os.Process) error { + return proc.Kill() +} + +func terminateCmd(proc *os.Process) error { + return proc.Kill() +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/cmd_darwin.go b/dev-tools/mage/target/srvrlesstest/core/process/cmd_darwin.go new file mode 100644 index 000000000000..11008414eecf --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/cmd_darwin.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build darwin + +package process + +import ( + "context" + "fmt" + "math" + "os" + "os/exec" + "path/filepath" + "syscall" +) + +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { + var cmd *exec.Cmd + if ctx == nil { + cmd = exec.Command(path, arg...) + } else { + cmd = exec.CommandContext(ctx, path, arg...) + } + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Dir = filepath.Dir(path) + if isInt32(uid) && isInt32(gid) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + NoSetGroups: true, + }, + } + } else { + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) + } + + return cmd, nil +} + +func isInt32(val int) bool { + return val >= 0 && val <= math.MaxInt32 +} + +func killCmd(proc *os.Process) error { + return proc.Kill() +} + +func terminateCmd(proc *os.Process) error { + return proc.Signal(syscall.SIGTERM) +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/cmd_linux.go b/dev-tools/mage/target/srvrlesstest/core/process/cmd_linux.go new file mode 100644 index 000000000000..3a5641926308 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/cmd_linux.go @@ -0,0 +1,57 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build linux + +package process + +import ( + "context" + "fmt" + "math" + "os" + "os/exec" + "path/filepath" + "syscall" +) + +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { + var cmd *exec.Cmd + if ctx == nil { + cmd = exec.Command(path, arg...) + } else { + cmd = exec.CommandContext(ctx, path, arg...) + } + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Dir = filepath.Dir(path) + if isInt32(uid) && isInt32(gid) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + // on shutdown all sub-processes are sent SIGTERM, in the case that the Agent dies or is -9 killed + // then also kill the children (only supported on linux) + Pdeathsig: syscall.SIGKILL, + Credential: &syscall.Credential{ + Uid: uint32(uid), + Gid: uint32(gid), + NoSetGroups: true, + }, + } + } else { + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) + } + + return cmd, nil +} + +func isInt32(val int) bool { + return val >= 0 && val <= math.MaxInt32 +} + +func killCmd(proc *os.Process) error { + return proc.Kill() +} + +func terminateCmd(proc *os.Process) error { + return proc.Signal(syscall.SIGTERM) +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/external_unix.go b/dev-tools/mage/target/srvrlesstest/core/process/external_unix.go new file mode 100644 index 000000000000..4a4724ada2b8 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/external_unix.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build !windows + +package process + +import ( + "os" + "syscall" + "time" +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if proc.Signal(syscall.Signal(0)) != nil { + // failed to contact process, return + return + } + } +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/external_windows.go b/dev-tools/mage/target/srvrlesstest/core/process/external_windows.go new file mode 100644 index 000000000000..255012dd708c --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/external_windows.go @@ -0,0 +1,52 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build windows + +package process + +import ( + "os" + "syscall" + "time" +) + +const ( + // exitCodeStillActive according to docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess + exitCodeStillActive = 259 +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if isWindowsProcessExited(proc.Pid) { + return + } + } +} + +func isWindowsProcessExited(pid int) bool { + const desiredAccess = syscall.STANDARD_RIGHTS_READ | syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE + h, err := syscall.OpenProcess(desiredAccess, false, uint32(pid)) + if err != nil { + // failed to open handle, report exited + return true + } + + // get exit code, this returns immediately in case it is still running + // it returns exitCodeStillActive + var ec uint32 + if err := syscall.GetExitCodeProcess(h, &ec); err != nil { + // failed to contact, report exited + return true + } + + return ec != exitCodeStillActive +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/job_unix.go b/dev-tools/mage/target/srvrlesstest/core/process/job_unix.go new file mode 100644 index 000000000000..53ae5ec529dc --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/job_unix.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build !windows + +package process + +import ( + "os" +) + +// Job is noop on unix +type Job int + +var ( + // JobObject is public global JobObject, 0 value on linux + JobObject Job +) + +// CreateJobObject returns a job object. +func CreateJobObject() (pj Job, err error) { + return pj, err +} + +// NewJob is noop on unix +func NewJob() (Job, error) { + return 0, nil +} + +// Close is noop on unix +func (job Job) Close() error { + return nil +} + +// Assign is noop on unix +func (job Job) Assign(p *os.Process) error { + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/job_windows.go b/dev-tools/mage/target/srvrlesstest/core/process/job_windows.go new file mode 100644 index 000000000000..e0bcfdb1cbae --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/job_windows.go @@ -0,0 +1,87 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build windows + +package process + +import ( + "os" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Job is wrapper for windows JobObject +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects +// This helper guarantees a clean process tree kill on job handler close +type Job windows.Handle + +var ( + // Public global JobObject should be initialized once in main + JobObject Job +) + +// CreateJobObject creates JobObject on Windows, global per process +// Should only be initialized once in main function +func CreateJobObject() (pj Job, err error) { + if pj, err = NewJob(); err != nil { + return pj, err + } + JobObject = pj + return pj, nil +} + +// NewJob creates a instance of the JobObject +func NewJob() (Job, error) { + h, err := windows.CreateJobObject(nil, nil) + if err != nil { + return 0, err + } + + // From https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects + // ... if the job has the JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE flag specified, + // closing the last job object handle terminates all associated processes + // and then destroys the job object itself. + // If a nested job has the JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE flag specified, + // closing the last job object handle terminates all processes associated + // with the job and its child jobs in the hierarchy. + info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{ + BasicLimitInformation: windows.JOBOBJECT_BASIC_LIMIT_INFORMATION{ + LimitFlags: windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE, + }, + } + if _, err := windows.SetInformationJobObject( + h, + windows.JobObjectExtendedLimitInformation, + uintptr(unsafe.Pointer(&info)), + uint32(unsafe.Sizeof(info))); err != nil { + return 0, err + } + + return Job(h), nil +} + +// Close closes job handler +func (job Job) Close() error { + if job == 0 { + return nil + } + return windows.CloseHandle(windows.Handle(job)) +} + +// Assign assigns the process to the JobObject +func (job Job) Assign(p *os.Process) error { + if job == 0 || p == nil { + return nil + } + return windows.AssignProcessToJobObject( + windows.Handle(job), + windows.Handle((*process)(unsafe.Pointer(p)).Handle)) +} + +type process struct { + Pid int + Handle uintptr +} diff --git a/dev-tools/mage/target/srvrlesstest/core/process/process.go b/dev-tools/mage/target/srvrlesstest/core/process/process.go new file mode 100644 index 000000000000..f50b83cbe11e --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/core/process/process.go @@ -0,0 +1,184 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package process + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" +) + +// Info groups information about fresh new process +type Info struct { + PID int + Process *os.Process + Stdin io.WriteCloser + Stderr io.ReadCloser +} + +// CmdOption is an option func to change the underlying command +type CmdOption func(c *exec.Cmd) error + +// StartConfig configuration for the process start set by the StartOption functions +type StartConfig struct { + ctx context.Context + uid, gid int + args, env []string + cmdOpts []CmdOption +} + +// StartOption start options function +type StartOption func(cfg *StartConfig) + +// Start starts a new process +func Start(path string, opts ...StartOption) (proc *Info, err error) { + // Apply options + c := StartConfig{ + uid: os.Geteuid(), + gid: os.Getegid(), + } + + for _, opt := range opts { + opt(&c) + } + + return startContext(c.ctx, path, c.uid, c.gid, c.args, c.env, c.cmdOpts...) +} + +// WithContext sets an optional context +func WithContext(ctx context.Context) StartOption { + return func(cfg *StartConfig) { + cfg.ctx = ctx + } +} + +// WithArgs sets arguments +func WithArgs(args []string) StartOption { + return func(cfg *StartConfig) { + cfg.args = args + } +} + +// WithEnv sets the environment variables +func WithEnv(env []string) StartOption { + return func(cfg *StartConfig) { + cfg.env = env + } +} + +// WithUID sets UID +func WithUID(uid int) StartOption { + return func(cfg *StartConfig) { + cfg.uid = uid + } +} + +// WithGID sets GID +func WithGID(gid int) StartOption { + return func(cfg *StartConfig) { + cfg.gid = gid + } +} + +// WithCmdOptions sets the exec.Cmd options +func WithCmdOptions(cmdOpts ...CmdOption) StartOption { + return func(cfg *StartConfig) { + cfg.cmdOpts = cmdOpts + } +} + +// WithWorkDir sets the cmd working directory +func WithWorkDir(wd string) CmdOption { + return func(c *exec.Cmd) error { + c.Dir = wd + return nil + } +} + +// Kill kills the process. +func (i *Info) Kill() error { + return killCmd(i.Process) +} + +// Stop stops the process cleanly. +func (i *Info) Stop() error { + return terminateCmd(i.Process) +} + +// StopWait stops the process and waits for it to exit. +func (i *Info) StopWait() error { + err := i.Stop() + if err != nil { + return err + } + _, err = i.Process.Wait() + return err +} + +// Wait returns a channel that will send process state once it exits. Each +// call to Wait() creates a goroutine. Failure to read from the returned +// channel will leak this goroutine. +func (i *Info) Wait() <-chan *os.ProcessState { + ch := make(chan *os.ProcessState) + + go func() { + procState, err := i.Process.Wait() + if err != nil { + // process is not a child - some OSs requires process to be child + externalProcess(i.Process) + } + ch <- procState + }() + + return ch +} + +// startContext starts a new process with context. The context is optional and can be nil. +func startContext(ctx context.Context, path string, uid, gid int, args []string, env []string, opts ...CmdOption) (*Info, error) { + cmd, err := getCmd(ctx, path, env, uid, gid, args...) + if err != nil { + return nil, fmt.Errorf("failed to create command for %q: %w", path, err) + } + for _, o := range opts { + if err := o(cmd); err != nil { + return nil, fmt.Errorf("failed to set option command for %q: %w", path, err) + } + } + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdin for %q: %w", path, err) + } + + var stderr io.ReadCloser + if cmd.Stderr == nil { + stderr, err = cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stderr for %q: %w", path, err) + } + } + + // start process + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start %q: %w", path, err) + } + + // Hook to JobObject on windows, noop on other platforms. + // This ties the application processes lifespan to the agent's. + // Fixes the orphaned beats processes left behind situation + // after the agent process gets killed. + if err := JobObject.Assign(cmd.Process); err != nil { + _ = killCmd(cmd.Process) + return nil, fmt.Errorf("failed job assignment %q: %w", path, err) + } + + return &Info{ + PID: cmd.Process.Pid, + Process: cmd.Process, + Stdin: stdin, + Stderr: stderr, + }, err +} diff --git a/dev-tools/mage/target/srvrlesstest/define/batch.go b/dev-tools/mage/target/srvrlesstest/define/batch.go new file mode 100644 index 000000000000..be254dec6ebf --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/define/batch.go @@ -0,0 +1,307 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package define + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +// defaultOS is the set of OS that are used in the case that a requirement doesn't define any +var defaultOS = []OS{ + { + Type: Darwin, + Arch: AMD64, + }, + { + Type: Darwin, + Arch: ARM64, + }, + { + Type: Linux, + Arch: AMD64, + }, + { + Type: Linux, + Arch: ARM64, + }, + { + Type: Windows, + Arch: AMD64, + }, +} + +// Batch is a grouping of tests that all have the same requirements. +type Batch struct { + // Group must be set on each test to define which group the tests belongs. + // Tests that are in the same group are executed on the same runner. + Group string `json:"group"` + + // OS defines the operating systems this test batch needs. + OS OS `json:"os"` + + // Stack defines the stack required for this batch. + Stack *Stack `json:"stack,omitempty"` + + // Tests define the set of packages and tests that do not require sudo + // privileges to be performed. + Tests []BatchPackageTests `json:"tests"` + + // SudoTests define the set of packages and tests that do require sudo + // privileges to be performed. + SudoTests []BatchPackageTests `json:"sudo_tests"` +} + +// BatchPackageTests is a package and its tests that belong to a batch. +type BatchPackageTests struct { + // Name is the package name. + Name string `json:"name"` + // Tests is the set of tests in the package. + Tests []BatchPackageTest `json:"tests"` +} + +// BatchPackageTest is a specific test in a package. +type BatchPackageTest struct { + // Name of the test. + Name string `json:"name"` + // Stack needed for test. + Stack bool `json:"stack"` +} + +// DetermineBatches parses the package directory with the possible extra build +// tags to determine the set of batches for the package. +func DetermineBatches(dir string, testFlags string, buildTags ...string) ([]Batch, error) { + const ( + defineMatcher = "define skip; requirements: " + ) + + // the 'define' build tag is added so that the `define.Require` skips and + // logs the requirements for each test. + buildTags = append(buildTags, "define") + + // 'go test' wants a directory path to either be absolute or start with + // './' so it knows it's a directory and not package. + if !filepath.IsAbs(dir) && !strings.HasPrefix(dir, "./") { + dir = "./" + dir + } + + // run 'go test' and collect the JSON output to be parsed + // #nosec G204 -- test function code, it will be okay + cmdArgs := []string{"test", "-v", "--tags", strings.Join(buildTags, ","), "-json"} + if testFlags != "" { + flags := strings.Split(testFlags, " ") + cmdArgs = append(cmdArgs, flags...) + } + + cmdArgs = append(cmdArgs, dir) + testCmd := exec.Command("go", cmdArgs...) + output, err := testCmd.Output() + if err != nil { + // format cmdArgs to make the error message more coherent + cmdArgs = append([]string{"go"}, cmdArgs...) + + var errExit *exec.ExitError + if errors.As(err, &errExit) { + b := bytes.NewBuffer(errExit.Stderr) + b.Write(output) + output = b.Bytes() + } + return nil, fmt.Errorf( + "error running go test: (%w), got:\n\n%s\ntried to run: %v", + err, string(output), cmdArgs) + } + + // parses each test and determine the batches that each test belongs in + var batches []Batch + sc := bufio.NewScanner(bytes.NewReader(output)) + for sc.Scan() { + var tar testActionResult + err := json.Unmarshal([]byte(sc.Text()), &tar) + if err != nil { + return nil, err + } + if tar.Action == "output" && strings.Contains(tar.Output, defineMatcher) { + reqRaw := tar.Output[strings.Index(tar.Output, defineMatcher)+len(defineMatcher) : strings.LastIndex(tar.Output, "\n")] + var req Requirements + err := json.Unmarshal([]byte(reqRaw), &req) + if err != nil { + return nil, fmt.Errorf("failed to parse requirements JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + err = req.Validate() + if err != nil { + return nil, fmt.Errorf("parsed requirements are invalid JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + batches = appendTest(batches, tar, req) + } + } + return batches, nil +} + +func appendTest(batches []Batch, tar testActionResult, req Requirements) []Batch { + var set []OS + for _, o := range req.OS { + if o.Arch == "" { + set = append(set, OS{ + Type: o.Type, + Arch: AMD64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + if o.Type != Windows { + set = append(set, OS{ + Type: o.Type, + Arch: ARM64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } else { + set = append(set, OS{ + Type: o.Type, + Arch: o.Arch, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } + if len(set) == 0 { + // no os define; means the test supports all + set = defaultOS + } + for _, o := range set { + var batch Batch + batchIdx := findBatchIdx(batches, req.Group, o, req.Stack) + if batchIdx == -1 { + // new batch required + batch = Batch{ + Group: req.Group, + OS: o, + Tests: nil, + SudoTests: nil, + } + batches = append(batches, batch) + batchIdx = len(batches) - 1 + } + batch = batches[batchIdx] + if o.Distro != "" { + batch.OS.Distro = o.Distro + } + if o.Version != "" { + batch.OS.Version = o.Version + } + if o.DockerVariant != "" { + batch.OS.DockerVariant = o.DockerVariant + } + if req.Stack != nil && batch.Stack == nil { + // assign the stack to this batch + batch.Stack = copyStack(req.Stack) + } + if req.Sudo { + batch.SudoTests = appendPackageTest(batch.SudoTests, tar.Package, tar.Test, req.Stack != nil) + } else { + batch.Tests = appendPackageTest(batch.Tests, tar.Package, tar.Test, req.Stack != nil) + } + batches[batchIdx] = batch + } + return batches +} + +func appendPackageTest(tests []BatchPackageTests, pkg string, name string, stack bool) []BatchPackageTests { + for i, pt := range tests { + if pt.Name == pkg { + for _, testName := range pt.Tests { + if testName.Name == name { + // we already selected this test for this package for this batch, + // we can return immediately + return tests + } + } + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests[i] = pt + return tests + } + } + var pt BatchPackageTests + pt.Name = pkg + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests = append(tests, pt) + return tests +} + +func findBatchIdx(batches []Batch, group string, os OS, stack *Stack) int { + for i, b := range batches { + if b.Group != group { + // must be in the same group + continue + } + if b.OS.Type != os.Type || b.OS.Arch != os.Arch { + // must be same type and arch both are always defined at this point + continue + } + if os.Distro != "" { + // must have the same distro + if b.OS.Distro != "" && b.OS.Distro != os.Distro { + continue + } + } + if os.Version != "" { + // must have the same version + if b.OS.Version != "" && b.OS.Version != os.Version { + continue + } + } + if os.DockerVariant != "" { + // must be the same docker image + if b.OS.DockerVariant != "" && b.OS.DockerVariant != os.DockerVariant { + continue + } + } + if stack == nil { + // don't care if the batch has a cloud or not + return i + } + if b.Stack == nil { + // need cloud, but batch doesn't have cloud calling code can set it + return i + } + if b.Stack.Version == stack.Version { + // same cloud version; compatible + return i + } + } + return -1 +} + +func copyStack(stack *Stack) *Stack { + var s Stack + if stack != nil { + s = *stack + return &s + } + return nil +} + +type testActionResult struct { + Time string `json:"Time"` + Action string `json:"Action"` + Package string `json:"Package"` + Test string `json:"Test"` + Output string `json:"Output"` +} diff --git a/dev-tools/mage/target/srvrlesstest/define/requirements.go b/dev-tools/mage/target/srvrlesstest/define/requirements.go new file mode 100644 index 000000000000..2a3b2948f9a7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/define/requirements.go @@ -0,0 +1,166 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package define + +import ( + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/component" +) + +const ( + // Default constant can be used as the default group for tests. + Default = "default" +) + +const ( + // Darwin is macOS platform + Darwin = component.Darwin + // Linux is Linux platform + Linux = component.Linux + // Windows is Windows platform + Windows = component.Windows + // Kubernetes is Kubernetes platform + Kubernetes = "kubernetes" +) + +const ( + // AMD64 is amd64 architecture + AMD64 = component.AMD64 + // ARM64 is arm64 architecture + ARM64 = component.ARM64 +) + +// OS defines an operating system, architecture, version and distribution combination. +type OS struct { + // Type is the operating system type (darwin, linux or windows). + // + // This is always required to be defined on the OS structure. + // If it is not defined the test runner will error. + Type string `json:"type"` + // Arch is the architecture type (amd64 or arm64). + // + // In the case that it's not provided the test will run on every + // architecture that is supported. + Arch string `json:"arch"` + // Version is a specific version of the OS type to run this test on + // + // When defined the test runs on this specific version only. When not + // defined the test is run on a selected version for this operating system. + Version string `json:"version"` + // Distro allows in the Linux case for a specific distribution to be + // selected for running on. Example would be "ubuntu". In the Kubernetes case + // for a specific distribution of kubernetes. Example would be "kind". + Distro string `json:"distro"` + // DockerVariant allows in the Kubernetes case for a specific variant to + // be selected for running with. Example would be "wolfi". + DockerVariant string `json:"docker_variant"` +} + +// Validate returns an error if not valid. +func (o OS) Validate() error { + if o.Type == "" { + return errors.New("type must be defined") + } + if o.Type != Darwin && o.Type != Linux && o.Type != Windows && o.Type != Kubernetes { + return errors.New("type must be either darwin, linux, windows, or kubernetes") + } + if o.Arch != "" { + if o.Arch != AMD64 && o.Arch != ARM64 { + return errors.New("arch must be either amd64 or arm64") + } + if o.Type == Windows && o.Arch == ARM64 { + return errors.New("windows on arm64 not supported") + } + } + if o.Distro != "" && (o.Type != Linux && o.Type != Kubernetes) { + return errors.New("distro can only be set when type is linux or kubernetes") + } + if o.DockerVariant != "" && o.Type != Kubernetes { + return errors.New("docker variant can only be set when type is kubernetes") + } + return nil +} + +// Stack defines the stack required for the test. +type Stack struct { + // Version defines a specific stack version to create for this test. + // + // In the case that no version is provided the same version being used for + // the current test execution is used. + Version string `json:"version"` +} + +// Requirements defines the testing requirements for the test to run. +type Requirements struct { + // Group must be set on each test to define which group the tests belongs to. + // Tests that are in the same group are executed on the same runner. + // + // Useful when tests take a long time to complete and sharding them across multiple + // hosts can improve the total amount of time to complete all the tests. + Group string `json:"group"` + + // OS defines the operating systems this test can run on. In the case + // multiple are provided the test is ran multiple times one time on each + // combination. + OS []OS `json:"os,omitempty"` + + // Stack defines the stack required for the test. + Stack *Stack `json:"stack,omitempty"` + + // Local defines if this test can safely be performed on a local development machine. + // If not set then the test will not be performed when local only testing is performed. + // + // This doesn't mean this test can only run locally. It will still run on defined OS's + // when a full test run is performed. + Local bool `json:"local"` + + // Sudo defines that this test must run under superuser permissions. On Mac and Linux the + // test gets executed under sudo and on Windows it gets run under Administrator. + Sudo bool `json:"sudo"` +} + +// Validate returns an error if not valid. +func (r Requirements) Validate() error { + if r.Group == "" { + return errors.New("group is required") + } + for i, o := range r.OS { + if err := o.Validate(); err != nil { + return fmt.Errorf("invalid os %d: %w", i, err) + } + } + return nil +} + +// runtimeAllowed returns true if the runtime matches a valid OS. +func (r Requirements) runtimeAllowed(os string, arch string, version string, distro string) bool { + if len(r.OS) == 0 { + // all allowed + return true + } + for _, o := range r.OS { + if o.Type != Kubernetes && o.Type != os { + // not valid on this runtime + continue + } + if o.Arch != "" && o.Arch != arch { + // not allowed on specific architecture + continue + } + if o.Version != "" && o.Version != version { + // not allowed on specific version + continue + } + if o.Distro != "" && o.Distro != distro { + // not allowed on specific distro + continue + } + // allowed + return true + } + // made it this far, not allowed + return false +} diff --git a/dev-tools/mage/target/srvrlesstest/srvrlesstest.go b/dev-tools/mage/target/srvrlesstest/srvrlesstest.go new file mode 100644 index 000000000000..21bfe2767c2f --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/srvrlesstest.go @@ -0,0 +1,428 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package srvrlesstest + +import ( + "context" + "fmt" + tcommon "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ess" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind" + multipass "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/multipas" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ogc" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/runner" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/magefile/mage/mg" +) + +type ProvisionerType uint32 + +var ( + goIntegTestTimeout = 2 * time.Hour + goProvisionAndTestTimeout = goIntegTestTimeout + 30*time.Minute +) + +const ( + snapshotEnv = "SNAPSHOT" +) + +// Integration namespace contains tasks related to operating and running integration tests. +type Integration mg.Namespace + +func IntegRunner(ctx context.Context, matrix bool, singleTest string) error { + if _, ok := ctx.Deadline(); !ok { + // If the context doesn't have a timeout (usually via the mage -t option), give it one. + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, goProvisionAndTestTimeout) + defer cancel() + } + + for { + failedCount, err := integRunnerOnce(ctx, matrix, singleTest) + if err != nil { + return err + } + if failedCount > 0 { + if hasCleanOnExit() { + mg.Deps(Integration.Clean) + } + os.Exit(1) + } + if !hasRunUntilFailure() { + if hasCleanOnExit() { + mg.Deps(Integration.Clean) + } + return nil + } + } +} + +func hasCleanOnExit() bool { + clean := os.Getenv("TEST_INTEG_CLEAN_ON_EXIT") + b, _ := strconv.ParseBool(clean) + return b +} + +func hasRunUntilFailure() bool { + runUntil := os.Getenv("TEST_RUN_UNTIL_FAILURE") + b, _ := strconv.ParseBool(runUntil) + return b +} + +func integRunnerOnce(ctx context.Context, matrix bool, singleTest string) (int, error) { + goTestFlags := os.Getenv("GOTEST_FLAGS") + + batches, err := define.DetermineBatches("testing/integration", goTestFlags, "integration") + if err != nil { + return 0, fmt.Errorf("failed to determine batches: %w", err) + } + r, err := createTestRunner(matrix, singleTest, goTestFlags, batches...) + if err != nil { + return 0, fmt.Errorf("error creating test runner: %w", err) + } + results, err := r.Run(ctx) + if err != nil { + return 0, fmt.Errorf("error running test: %w", err) + } + _ = os.Remove("build/TEST-go-integration.out") + _ = os.Remove("build/TEST-go-integration.out.json") + _ = os.Remove("build/TEST-go-integration.xml") + err = writeFile("build/TEST-go-integration.out", results.Output, 0644) + if err != nil { + return 0, fmt.Errorf("error writing test out file: %w", err) + } + err = writeFile("build/TEST-go-integration.out.json", results.JSONOutput, 0644) + if err != nil { + return 0, fmt.Errorf("error writing test out json file: %w", err) + } + err = writeFile("build/TEST-go-integration.xml", results.XMLOutput, 0644) + if err != nil { + return 0, fmt.Errorf("error writing test out xml file: %w", err) + } + if results.Failures > 0 { + r.Logger().Logf("Testing completed (%d failures, %d successful)", results.Failures, results.Tests-results.Failures) + } else { + r.Logger().Logf("Testing completed (%d successful)", results.Tests) + } + r.Logger().Logf("Console output written here: build/TEST-go-integration.out") + r.Logger().Logf("Console JSON output written here: build/TEST-go-integration.out.json") + r.Logger().Logf("JUnit XML written here: build/TEST-go-integration.xml") + r.Logger().Logf("Diagnostic output (if present) here: build/diagnostics") + return results.Failures, nil +} + +// Clean cleans up the integration testing leftovers +func (Integration) Clean() error { + fmt.Println("--- Clean mage artifacts") + _ = os.RemoveAll(".agent-testing") + + // Clean out .integration-cache/.ogc-cache always + defer os.RemoveAll(".integration-cache") + defer os.RemoveAll(".ogc-cache") + + _, err := os.Stat(".integration-cache") + if err == nil { + // .integration-cache exists; need to run `Clean` from the runner + r, err := createTestRunner(false, "", "") + if err != nil { + return fmt.Errorf("error creating test runner: %w", err) + } + err = r.Clean() + if err != nil { + return fmt.Errorf("error running clean: %w", err) + } + } + + return nil +} + +func createTestRunner(matrix bool, singleTest string, goTestFlags string, batches ...define.Batch) (*runner.Runner, error) { + goVersion, err := mage.DefaultBeatBuildVariableSources.GetGoVersion() + if err != nil { + return nil, err + } + + agentVersion, agentStackVersion, err := getTestRunnerVersions() + if err != nil { + return nil, err + } + + agentBuildDir := os.Getenv("AGENT_BUILD_DIR") + if agentBuildDir == "" { + agentBuildDir = filepath.Join("build", "distributions") + } + essToken, ok, err := ess.GetESSAPIKey() + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("ESS api key missing; run 'mage integration:auth'") + } + + // Possible to change the region for deployment, default is gcp-us-west2 which is + // the CFT region. + essRegion := os.Getenv("TEST_INTEG_AUTH_ESS_REGION") + if essRegion == "" { + essRegion = "gcp-us-west2" + } + + serviceTokenPath, ok, err := getGCEServiceTokenPath() + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("GCE service token missing; run 'mage integration:auth'") + } + datacenter := os.Getenv("TEST_INTEG_AUTH_GCP_DATACENTER") + if datacenter == "" { + // us-central1-a is used because T2A instances required for ARM64 testing are only + // available in the central regions + datacenter = "us-central1-a" + } + + ogcCfg := ogc.Config{ + ServiceTokenPath: serviceTokenPath, + Datacenter: datacenter, + } + + var instanceProvisioner tcommon.InstanceProvisioner + instanceProvisionerMode := os.Getenv("INSTANCE_PROVISIONER") + switch instanceProvisionerMode { + case "", ogc.Name: + instanceProvisionerMode = ogc.Name + instanceProvisioner, err = ogc.NewProvisioner(ogcCfg) + case multipass.Name: + instanceProvisioner = multipass.NewProvisioner() + case kind.Name: + instanceProvisioner = kind.NewProvisioner() + default: + return nil, fmt.Errorf("INSTANCE_PROVISIONER environment variable must be one of 'ogc' or 'multipass', not %s", instanceProvisionerMode) + } + + email, err := ogcCfg.ClientEmail() + if err != nil { + return nil, err + } + + provisionCfg := ess.ProvisionerConfig{ + Identifier: fmt.Sprintf("at-%s", strings.Replace(strings.Split(email, "@")[0], ".", "-", -1)), + APIKey: essToken, + Region: essRegion, + } + + var stackProvisioner tcommon.StackProvisioner + stackProvisionerMode := os.Getenv("STACK_PROVISIONER") + switch stackProvisionerMode { + case "", ess.ProvisionerStateful: + stackProvisionerMode = ess.ProvisionerStateful + stackProvisioner, err = ess.NewProvisioner(provisionCfg) + if err != nil { + return nil, err + } + case ess.ProvisionerServerless: + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + stackProvisioner, err = ess.NewServerlessProvisioner(ctx, provisionCfg) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("STACK_PROVISIONER environment variable must be one of %q or %q, not %s", + ess.ProvisionerStateful, + ess.ProvisionerServerless, + stackProvisionerMode) + } + + timestamp := timestampEnabled() + + extraEnv := map[string]string{} + if agentCollectDiag := os.Getenv("AGENT_COLLECT_DIAG"); agentCollectDiag != "" { + extraEnv["AGENT_COLLECT_DIAG"] = agentCollectDiag + } + if agentKeepInstalled := os.Getenv("AGENT_KEEP_INSTALLED"); agentKeepInstalled != "" { + extraEnv["AGENT_KEEP_INSTALLED"] = agentKeepInstalled + } + + extraEnv["TEST_LONG_RUNNING"] = os.Getenv("TEST_LONG_RUNNING") + extraEnv["LONG_TEST_RUNTIME"] = os.Getenv("LONG_TEST_RUNTIME") + + // these following two env vars are currently not used by anything, but can be used in the future to test beats or + // other binaries, see https://github.com/elastic/elastic-agent/pull/3258 + binaryName := os.Getenv("TEST_BINARY_NAME") + if binaryName == "" { + binaryName = "elastic-agent" + } + + repoDir := os.Getenv("TEST_INTEG_REPO_PATH") + if repoDir == "" { + repoDir = "." + } + + diagDir := filepath.Join("build", "diagnostics") + _ = os.MkdirAll(diagDir, 0755) + + cfg := tcommon.Config{ + AgentVersion: agentVersion, + StackVersion: agentStackVersion, + BuildDir: agentBuildDir, + GOVersion: goVersion, + RepoDir: repoDir, + DiagnosticsDir: diagDir, + StateDir: ".integration-cache", + Platforms: testPlatforms(), + Packages: testPackages(), + Groups: testGroups(), + Matrix: matrix, + SingleTest: singleTest, + VerboseMode: mg.Verbose(), + Timestamp: timestamp, + TestFlags: goTestFlags, + ExtraEnv: extraEnv, + BinaryName: binaryName, + } + + r, err := runner.NewRunner(cfg, instanceProvisioner, stackProvisioner, batches...) + if err != nil { + return nil, fmt.Errorf("failed to create runner: %w", err) + } + return r, nil +} + +func writeFile(name string, data []byte, perm os.FileMode) error { + err := os.WriteFile(name, data, perm) + if err != nil { + return fmt.Errorf("failed to write file %s: %w", name, err) + } + return nil +} + +func getTestRunnerVersions() (string, string, error) { + var err error + agentStackVersion := os.Getenv("AGENT_STACK_VERSION") + agentVersion := os.Getenv("AGENT_VERSION") + if agentVersion == "" { + agentVersion, err = mage.DefaultBeatBuildVariableSources.GetBeatVersion() + if err != nil { + return "", "", err + } + if agentStackVersion == "" { + // always use snapshot for stack version + agentStackVersion = fmt.Sprintf("%s-SNAPSHOT", agentVersion) + } + if hasSnapshotEnv() { + // in the case that SNAPSHOT=true is set in the environment the + // default version of the agent is used, but as a snapshot build + agentVersion = fmt.Sprintf("%s-SNAPSHOT", agentVersion) + } + } + + if agentStackVersion == "" { + agentStackVersion = agentVersion + } + + return agentVersion, agentStackVersion, nil +} + +func hasSnapshotEnv() bool { + snapshot := os.Getenv(snapshotEnv) + if snapshot == "" { + return false + } + b, _ := strconv.ParseBool(snapshot) + + return b +} + +func getGCEServiceTokenPath() (string, bool, error) { + serviceTokenPath := os.Getenv("TEST_INTEG_AUTH_GCP_SERVICE_TOKEN_FILE") + if serviceTokenPath == "" { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", false, fmt.Errorf("unable to determine user's home directory: %w", err) + } + serviceTokenPath = filepath.Join(homeDir, ".config", "gcloud", "agent-testing-service-token.json") + } + _, err := os.Stat(serviceTokenPath) + if os.IsNotExist(err) { + return serviceTokenPath, false, nil + } else if err != nil { + return serviceTokenPath, false, fmt.Errorf("unable to check for service account key file at %s: %w", serviceTokenPath, err) + } + return serviceTokenPath, true, nil +} + +func timestampEnabled() bool { + timestamp := os.Getenv("TEST_INTEG_TIMESTAMP") + if timestamp == "" { + return false + } + b, _ := strconv.ParseBool(timestamp) + return b +} + +func testPlatforms() []string { + platformsStr := os.Getenv("TEST_PLATFORMS") + if platformsStr == "" { + return nil + } + var platforms []string + for _, p := range strings.Split(platformsStr, " ") { + if p != "" { + platforms = append(platforms, p) + } + } + return platforms +} + +func testPackages() []string { + packagesStr, defined := os.LookupEnv("TEST_PACKAGES") + if !defined { + return nil + } + + var packages []string + for _, p := range strings.Split(packagesStr, ",") { + if p == "tar.gz" { + p = "targz" + } + packages = append(packages, p) + } + + return packages +} + +func testGroups() []string { + groupsStr := os.Getenv("TEST_GROUPS") + if groupsStr == "" { + return nil + } + var groups []string + for _, g := range strings.Split(groupsStr, " ") { + if g != "" { + groups = append(groups, g) + } + } + return groups +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/batch.go b/dev-tools/mage/target/srvrlesstest/testing/common/batch.go new file mode 100644 index 000000000000..93da39da1f4e --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/batch.go @@ -0,0 +1,19 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +import "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + +// OSBatch defines the mapping between a SupportedOS and a define.Batch. +type OSBatch struct { + // ID is the unique ID for the batch. + ID string + // LayoutOS provides all the OS information to create an instance. + OS SupportedOS + // Batch defines the batch of tests to run on this layout. + Batch define.Batch + // Skip defines if this batch will be skipped because no supported layout exists yet. + Skip bool +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/build.go b/dev-tools/mage/target/srvrlesstest/testing/common/build.go new file mode 100644 index 000000000000..044584f8eb01 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/build.go @@ -0,0 +1,19 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +// Build describes a build and its paths. +type Build struct { + // Version of the Elastic Agent build. + Version string + // Type of OS this build is for. + Type string + // Arch is architecture this build is for. + Arch string + // Path is the path to the build. + Path string + // SHA512 is the path to the SHA512 file. + SHA512Path string +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/config.go b/dev-tools/mage/target/srvrlesstest/testing/common/config.go new file mode 100644 index 000000000000..0502a632dd14 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/config.go @@ -0,0 +1,134 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +import ( + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "strings" +) + +// Config provides the configuration for running the runner. +type Config struct { + AgentVersion string + StateDir string + ReleaseVersion string + StackVersion string + BuildDir string + GOVersion string + RepoDir string + DiagnosticsDir string + + // Platforms filters the tests to only run on the provided list + // of platforms even if the tests supports more than what is + // defined in this list. + Platforms []string + + // Packages filters the tests to only run on the provided list + // of platforms even if the tests supports more than what is + // defined in this list. + Packages []string + + // BinaryName is the name of the binary package under test, i.e, elastic-agent, metricbeat, etc + // this is used to copy the .tar.gz to the remote host + BinaryName string + + // Groups filters the tests to only run tests that are part of + // the groups defined in this list. + Groups []string + + // Matrix enables matrix testing. This explodes each test to + // run on all supported platforms the runner supports. + Matrix bool + + // SingleTest only has the runner run that specific test. + SingleTest string + + // VerboseMode passed along a verbose mode flag to tests + VerboseMode bool + + // Timestamp enables timestamps on the console output. + Timestamp bool + + // Testflags contains extra go test flags to be set when running tests + TestFlags string + + // ExtraEnv contains extra environment flags to pass to the runner. + ExtraEnv map[string]string +} + +// Validate returns an error if the information is invalid. +func (c *Config) Validate() error { + if c.AgentVersion == "" { + return errors.New("field AgentVersion must be set") + } + if c.StackVersion == "" { + return errors.New("field StackVersion must be set") + } + if c.BuildDir == "" { + return errors.New("field BuildDir must be set") + } + if c.GOVersion == "" { + return errors.New("field GOVersion must be set") + } + if c.RepoDir == "" { + return errors.New("field RepoDir must be set") + } + if c.StateDir == "" { + return errors.New("field StateDir must be set") + } + _, err := c.GetPlatforms() + if err != nil { + return err + } + return nil +} + +// GetPlatforms returns the defined platforms for the configuration. +func (c *Config) GetPlatforms() ([]define.OS, error) { + var each []define.OS + for _, platform := range c.Platforms { + o, err := parsePlatform(platform) + if err != nil { + return nil, err + } + each = append(each, o) + } + return each, nil +} + +func parsePlatform(platform string) (define.OS, error) { + separated := strings.Split(platform, "/") + var os define.OS + switch len(separated) { + case 0: + return define.OS{}, fmt.Errorf("failed to parse platform string %q: empty string", platform) + case 1: + os = define.OS{Type: separated[0]} + case 2: + os = define.OS{Type: separated[0], Arch: separated[1]} + case 3: + if separated[0] == define.Linux { + os = define.OS{Type: separated[0], Arch: separated[1], Distro: separated[2]} + } else { + os = define.OS{Type: separated[0], Arch: separated[1], Version: separated[2]} + } + case 4: + if separated[0] == define.Linux { + os = define.OS{Type: separated[0], Arch: separated[1], Distro: separated[2], Version: separated[3]} + } else if separated[0] == define.Kubernetes { + os = define.OS{Type: separated[0], Arch: separated[1], Version: separated[2], DockerVariant: separated[3]} + } else { + return define.OS{}, fmt.Errorf("failed to parse platform string %q: more than 2 separators", platform) + } + default: + return define.OS{}, fmt.Errorf("failed to parse platform string %q: more than 3 separators", platform) + } + if err := os.Validate(); err != nil { + return define.OS{}, fmt.Errorf("failed to parse platform string %q: %w", platform, err) + } + return os, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/instance.go b/dev-tools/mage/target/srvrlesstest/testing/common/instance.go new file mode 100644 index 000000000000..e2c8a44a077c --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/instance.go @@ -0,0 +1,66 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +import ( + "context" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" +) + +type ProvisionerType uint32 + +const ( + ProvisionerTypeVM ProvisionerType = iota + ProvisionerTypeK8SCluster +) + +// Instance represents a provisioned instance. +type Instance struct { + // Provider is the instance provider for the instance. + // See INSTANCE_PROVISIONER environment variable for the supported providers. + Provider string `yaml:"provider"` + // ID is the identifier of the instance. + // + // This must be the same ID of the OSBatch. + ID string `yaml:"id"` + // Name is the nice-name of the instance. + Name string `yaml:"name"` + // Provisioner is the instance provider for the instance. + // See INSTANCE_PROVISIONER environment variable for the supported Provisioner. + Provisioner string `yaml:"provisioner"` + // IP is the IP address of the instance. + IP string `yaml:"ip"` + // Username is the username used to SSH to the instance. + Username string `yaml:"username"` + // RemotePath is the based path used for performing work on the instance. + RemotePath string `yaml:"remote_path"` + // Internal holds internal information used by the provisioner. + // Best to not touch the contents of this, and leave it be for + // the provisioner. + Internal map[string]interface{} `yaml:"internal"` +} + +// InstanceProvisioner performs the provisioning of instances. +type InstanceProvisioner interface { + // Name returns the name of the instance provisioner. + Name() string + + // Type returns the type of the provisioner. + Type() ProvisionerType + + // SetLogger sets the logger for it to use. + SetLogger(l Logger) + + // Supported returns true of false if the provisioner supports the given batch. + Supported(batch define.OS) bool + + // Provision brings up the machines. + // + // The provision should re-use already prepared instances when possible. + Provision(ctx context.Context, cfg Config, batches []OSBatch) ([]Instance, error) + + // Clean cleans up all provisioned resources. + Clean(ctx context.Context, cfg Config, instances []Instance) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/logger.go b/dev-tools/mage/target/srvrlesstest/testing/common/logger.go new file mode 100644 index 000000000000..061678b53347 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/logger.go @@ -0,0 +1,11 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +// Logger is a simple logging interface used by each runner type. +type Logger interface { + // Logf logs the message for this runner. + Logf(format string, args ...any) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/prefix_output.go b/dev-tools/mage/target/srvrlesstest/testing/common/prefix_output.go new file mode 100644 index 000000000000..b3eb98225704 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/prefix_output.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +import ( + "bytes" + "strings" +) + +// prefixOutput is an `io.Writer` that prefixes each written line with the provided prefix text +type prefixOutput struct { + logger Logger + prefix string + remainder []byte +} + +// NewPrefixOutput creates a prefix output `io.Writer`. +func NewPrefixOutput(logger Logger, prefix string) *prefixOutput { + return &prefixOutput{ + logger: logger, + prefix: prefix, + } +} + +func (r *prefixOutput) Write(p []byte) (int, error) { + if len(p) == 0 { + // nothing to do + return 0, nil + } + offset := 0 + for { + idx := bytes.IndexByte(p[offset:], '\n') + if idx < 0 { + // not all used add to remainder to be used on next call + r.remainder = append(r.remainder, p[offset:]...) + return len(p), nil + } + + var line []byte + if r.remainder != nil { + line = r.remainder + r.remainder = nil + line = append(line, p[offset:offset+idx]...) + } else { + line = append(line, p[offset:offset+idx]...) + } + offset += idx + 1 + // drop '\r' from line (needed for Windows) + if len(line) > 0 && line[len(line)-1] == '\r' { + line = line[0 : len(line)-1] + } + if len(line) == 0 { + // empty line + continue + } + str := strings.TrimSpace(string(line)) + r.logger.Logf("%s%s", r.prefix, str) + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/runner.go b/dev-tools/mage/target/srvrlesstest/testing/common/runner.go new file mode 100644 index 000000000000..233462e33d53 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/runner.go @@ -0,0 +1,44 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +import ( + "context" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" +) + +// OSRunnerPackageResult is the result for each package. +type OSRunnerPackageResult struct { + // Name is the package name. + Name string + // Output is the raw test output. + Output []byte + // XMLOutput is the XML Junit output. + XMLOutput []byte + // JSONOutput is the JSON output. + JSONOutput []byte +} + +// OSRunnerResult is the result of the test run provided by a OSRunner. +type OSRunnerResult struct { + // Packages is the results for each package. + Packages []OSRunnerPackageResult + + // SudoPackages is the results for each package that need to run as sudo. + SudoPackages []OSRunnerPackageResult +} + +// OSRunner provides an interface to run the tests on the OS. +type OSRunner interface { + // Prepare prepares the runner to actual run on the host. + Prepare(ctx context.Context, sshClient ssh.SSHClient, logger Logger, arch string, goVersion string) error + // Copy places the required files on the host. + Copy(ctx context.Context, sshClient ssh.SSHClient, logger Logger, repoArchive string, builds []Build) error + // Run runs the actual tests and provides the result. + Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (OSRunnerResult, error) + // Diagnostics gathers any diagnostics from the host. + Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger Logger, destination string) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/stack.go b/dev-tools/mage/target/srvrlesstest/testing/common/stack.go new file mode 100644 index 000000000000..3047b340ea0c --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/stack.go @@ -0,0 +1,76 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +import "context" + +// Stack is a created stack. +type Stack struct { + // ID is the identifier of the instance. + // + // This must be the same ID used for requesting a stack. + ID string `yaml:"id"` + + // Provisioner is the stack provisioner. See STACK_PROVISIONER environment + // variable for the supported provisioners. + Provisioner string `yaml:"provisioner"` + + // Version is the version of the stack. + Version string `yaml:"version"` + + // Ready determines if the stack is ready to be used. + Ready bool `yaml:"ready"` + + // Elasticsearch is the URL to communicate with elasticsearch. + Elasticsearch string `yaml:"elasticsearch"` + + // Kibana is the URL to communication with kibana. + Kibana string `yaml:"kibana"` + + // Username is the username. + Username string `yaml:"username"` + + // Password is the password. + Password string `yaml:"password"` + + // Internal holds internal information used by the provisioner. + // Best to not touch the contents of this, and leave it be for + // the provisioner. + Internal map[string]interface{} `yaml:"internal"` +} + +// Same returns true if other is the same stack as this one. +// Two stacks are considered the same if their provisioner and ID are the same. +func (s Stack) Same(other Stack) bool { + return s.Provisioner == other.Provisioner && + s.ID == other.ID +} + +// StackRequest request for a new stack. +type StackRequest struct { + // ID is the unique ID for the stack. + ID string `yaml:"id"` + + // Version is the version of the stack. + Version string `yaml:"version"` +} + +// StackProvisioner performs the provisioning of stacks. +type StackProvisioner interface { + // Name returns the name of the stack provisioner. + Name() string + + // SetLogger sets the logger for it to use. + SetLogger(l Logger) + + // Create creates a stack. + Create(ctx context.Context, request StackRequest) (Stack, error) + + // WaitForReady should block until the stack is ready or the context is cancelled. + WaitForReady(ctx context.Context, stack Stack) (Stack, error) + + // Delete deletes the stack. + Delete(ctx context.Context, stack Stack) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/common/supported.go b/dev-tools/mage/target/srvrlesstest/testing/common/supported.go new file mode 100644 index 000000000000..c896cbca74d9 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/common/supported.go @@ -0,0 +1,15 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package common + +import "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + +// SupportedOS maps a OS definition to a OSRunner. +type SupportedOS struct { + define.OS + + // Runner is the runner to use for the OS. + Runner OSRunner +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/define/batch.go b/dev-tools/mage/target/srvrlesstest/testing/define/batch.go new file mode 100644 index 000000000000..be254dec6ebf --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/define/batch.go @@ -0,0 +1,307 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package define + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +// defaultOS is the set of OS that are used in the case that a requirement doesn't define any +var defaultOS = []OS{ + { + Type: Darwin, + Arch: AMD64, + }, + { + Type: Darwin, + Arch: ARM64, + }, + { + Type: Linux, + Arch: AMD64, + }, + { + Type: Linux, + Arch: ARM64, + }, + { + Type: Windows, + Arch: AMD64, + }, +} + +// Batch is a grouping of tests that all have the same requirements. +type Batch struct { + // Group must be set on each test to define which group the tests belongs. + // Tests that are in the same group are executed on the same runner. + Group string `json:"group"` + + // OS defines the operating systems this test batch needs. + OS OS `json:"os"` + + // Stack defines the stack required for this batch. + Stack *Stack `json:"stack,omitempty"` + + // Tests define the set of packages and tests that do not require sudo + // privileges to be performed. + Tests []BatchPackageTests `json:"tests"` + + // SudoTests define the set of packages and tests that do require sudo + // privileges to be performed. + SudoTests []BatchPackageTests `json:"sudo_tests"` +} + +// BatchPackageTests is a package and its tests that belong to a batch. +type BatchPackageTests struct { + // Name is the package name. + Name string `json:"name"` + // Tests is the set of tests in the package. + Tests []BatchPackageTest `json:"tests"` +} + +// BatchPackageTest is a specific test in a package. +type BatchPackageTest struct { + // Name of the test. + Name string `json:"name"` + // Stack needed for test. + Stack bool `json:"stack"` +} + +// DetermineBatches parses the package directory with the possible extra build +// tags to determine the set of batches for the package. +func DetermineBatches(dir string, testFlags string, buildTags ...string) ([]Batch, error) { + const ( + defineMatcher = "define skip; requirements: " + ) + + // the 'define' build tag is added so that the `define.Require` skips and + // logs the requirements for each test. + buildTags = append(buildTags, "define") + + // 'go test' wants a directory path to either be absolute or start with + // './' so it knows it's a directory and not package. + if !filepath.IsAbs(dir) && !strings.HasPrefix(dir, "./") { + dir = "./" + dir + } + + // run 'go test' and collect the JSON output to be parsed + // #nosec G204 -- test function code, it will be okay + cmdArgs := []string{"test", "-v", "--tags", strings.Join(buildTags, ","), "-json"} + if testFlags != "" { + flags := strings.Split(testFlags, " ") + cmdArgs = append(cmdArgs, flags...) + } + + cmdArgs = append(cmdArgs, dir) + testCmd := exec.Command("go", cmdArgs...) + output, err := testCmd.Output() + if err != nil { + // format cmdArgs to make the error message more coherent + cmdArgs = append([]string{"go"}, cmdArgs...) + + var errExit *exec.ExitError + if errors.As(err, &errExit) { + b := bytes.NewBuffer(errExit.Stderr) + b.Write(output) + output = b.Bytes() + } + return nil, fmt.Errorf( + "error running go test: (%w), got:\n\n%s\ntried to run: %v", + err, string(output), cmdArgs) + } + + // parses each test and determine the batches that each test belongs in + var batches []Batch + sc := bufio.NewScanner(bytes.NewReader(output)) + for sc.Scan() { + var tar testActionResult + err := json.Unmarshal([]byte(sc.Text()), &tar) + if err != nil { + return nil, err + } + if tar.Action == "output" && strings.Contains(tar.Output, defineMatcher) { + reqRaw := tar.Output[strings.Index(tar.Output, defineMatcher)+len(defineMatcher) : strings.LastIndex(tar.Output, "\n")] + var req Requirements + err := json.Unmarshal([]byte(reqRaw), &req) + if err != nil { + return nil, fmt.Errorf("failed to parse requirements JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + err = req.Validate() + if err != nil { + return nil, fmt.Errorf("parsed requirements are invalid JSON from test %s/%s: %w", tar.Package, tar.Test, err) + } + batches = appendTest(batches, tar, req) + } + } + return batches, nil +} + +func appendTest(batches []Batch, tar testActionResult, req Requirements) []Batch { + var set []OS + for _, o := range req.OS { + if o.Arch == "" { + set = append(set, OS{ + Type: o.Type, + Arch: AMD64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + if o.Type != Windows { + set = append(set, OS{ + Type: o.Type, + Arch: ARM64, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } else { + set = append(set, OS{ + Type: o.Type, + Arch: o.Arch, + Version: o.Version, + Distro: o.Distro, + DockerVariant: o.DockerVariant, + }) + } + } + if len(set) == 0 { + // no os define; means the test supports all + set = defaultOS + } + for _, o := range set { + var batch Batch + batchIdx := findBatchIdx(batches, req.Group, o, req.Stack) + if batchIdx == -1 { + // new batch required + batch = Batch{ + Group: req.Group, + OS: o, + Tests: nil, + SudoTests: nil, + } + batches = append(batches, batch) + batchIdx = len(batches) - 1 + } + batch = batches[batchIdx] + if o.Distro != "" { + batch.OS.Distro = o.Distro + } + if o.Version != "" { + batch.OS.Version = o.Version + } + if o.DockerVariant != "" { + batch.OS.DockerVariant = o.DockerVariant + } + if req.Stack != nil && batch.Stack == nil { + // assign the stack to this batch + batch.Stack = copyStack(req.Stack) + } + if req.Sudo { + batch.SudoTests = appendPackageTest(batch.SudoTests, tar.Package, tar.Test, req.Stack != nil) + } else { + batch.Tests = appendPackageTest(batch.Tests, tar.Package, tar.Test, req.Stack != nil) + } + batches[batchIdx] = batch + } + return batches +} + +func appendPackageTest(tests []BatchPackageTests, pkg string, name string, stack bool) []BatchPackageTests { + for i, pt := range tests { + if pt.Name == pkg { + for _, testName := range pt.Tests { + if testName.Name == name { + // we already selected this test for this package for this batch, + // we can return immediately + return tests + } + } + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests[i] = pt + return tests + } + } + var pt BatchPackageTests + pt.Name = pkg + pt.Tests = append(pt.Tests, BatchPackageTest{ + Name: name, + Stack: stack, + }) + tests = append(tests, pt) + return tests +} + +func findBatchIdx(batches []Batch, group string, os OS, stack *Stack) int { + for i, b := range batches { + if b.Group != group { + // must be in the same group + continue + } + if b.OS.Type != os.Type || b.OS.Arch != os.Arch { + // must be same type and arch both are always defined at this point + continue + } + if os.Distro != "" { + // must have the same distro + if b.OS.Distro != "" && b.OS.Distro != os.Distro { + continue + } + } + if os.Version != "" { + // must have the same version + if b.OS.Version != "" && b.OS.Version != os.Version { + continue + } + } + if os.DockerVariant != "" { + // must be the same docker image + if b.OS.DockerVariant != "" && b.OS.DockerVariant != os.DockerVariant { + continue + } + } + if stack == nil { + // don't care if the batch has a cloud or not + return i + } + if b.Stack == nil { + // need cloud, but batch doesn't have cloud calling code can set it + return i + } + if b.Stack.Version == stack.Version { + // same cloud version; compatible + return i + } + } + return -1 +} + +func copyStack(stack *Stack) *Stack { + var s Stack + if stack != nil { + s = *stack + return &s + } + return nil +} + +type testActionResult struct { + Time string `json:"Time"` + Action string `json:"Action"` + Package string `json:"Package"` + Test string `json:"Test"` + Output string `json:"Output"` +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/define/requirements.go b/dev-tools/mage/target/srvrlesstest/testing/define/requirements.go new file mode 100644 index 000000000000..c269b4466c2b --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/define/requirements.go @@ -0,0 +1,167 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package define + +import ( + "errors" + "fmt" + + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/component" +) + +const ( + // Default constant can be used as the default group for tests. + Default = "default" +) + +const ( + // Darwin is macOS platform + Darwin = component.Darwin + // Linux is Linux platform + Linux = component.Linux + // Windows is Windows platform + Windows = component.Windows + // Kubernetes is Kubernetes platform + Kubernetes = "kubernetes" +) + +const ( + // AMD64 is amd64 architecture + AMD64 = component.AMD64 + // ARM64 is arm64 architecture + ARM64 = component.ARM64 +) + +// OS defines an operating system, architecture, version and distribution combination. +type OS struct { + // Type is the operating system type (darwin, linux or windows). + // + // This is always required to be defined on the OS structure. + // If it is not defined the test runner will error. + Type string `json:"type"` + // Arch is the architecture type (amd64 or arm64). + // + // In the case that it's not provided the test will run on every + // architecture that is supported. + Arch string `json:"arch"` + // Version is a specific version of the OS type to run this test on + // + // When defined the test runs on this specific version only. When not + // defined the test is run on a selected version for this operating system. + Version string `json:"version"` + // Distro allows in the Linux case for a specific distribution to be + // selected for running on. Example would be "ubuntu". In the Kubernetes case + // for a specific distribution of kubernetes. Example would be "kind". + Distro string `json:"distro"` + // DockerVariant allows in the Kubernetes case for a specific variant to + // be selected for running with. Example would be "wolfi". + DockerVariant string `json:"docker_variant"` +} + +// Validate returns an error if not valid. +func (o OS) Validate() error { + if o.Type == "" { + return errors.New("type must be defined") + } + if o.Type != Darwin && o.Type != Linux && o.Type != Windows && o.Type != Kubernetes { + return errors.New("type must be either darwin, linux, windows, or kubernetes") + } + if o.Arch != "" { + if o.Arch != AMD64 && o.Arch != ARM64 { + return errors.New("arch must be either amd64 or arm64") + } + if o.Type == Windows && o.Arch == ARM64 { + return errors.New("windows on arm64 not supported") + } + } + if o.Distro != "" && (o.Type != Linux && o.Type != Kubernetes) { + return errors.New("distro can only be set when type is linux or kubernetes") + } + if o.DockerVariant != "" && o.Type != Kubernetes { + return errors.New("docker variant can only be set when type is kubernetes") + } + return nil +} + +// Stack defines the stack required for the test. +type Stack struct { + // Version defines a specific stack version to create for this test. + // + // In the case that no version is provided the same version being used for + // the current test execution is used. + Version string `json:"version"` +} + +// Requirements defines the testing requirements for the test to run. +type Requirements struct { + // Group must be set on each test to define which group the tests belongs to. + // Tests that are in the same group are executed on the same runner. + // + // Useful when tests take a long time to complete and sharding them across multiple + // hosts can improve the total amount of time to complete all the tests. + Group string `json:"group"` + + // OS defines the operating systems this test can run on. In the case + // multiple are provided the test is ran multiple times one time on each + // combination. + OS []OS `json:"os,omitempty"` + + // Stack defines the stack required for the test. + Stack *Stack `json:"stack,omitempty"` + + // Local defines if this test can safely be performed on a local development machine. + // If not set then the test will not be performed when local only testing is performed. + // + // This doesn't mean this test can only run locally. It will still run on defined OS's + // when a full test run is performed. + Local bool `json:"local"` + + // Sudo defines that this test must run under superuser permissions. On Mac and Linux the + // test gets executed under sudo and on Windows it gets run under Administrator. + Sudo bool `json:"sudo"` +} + +// Validate returns an error if not valid. +func (r Requirements) Validate() error { + if r.Group == "" { + return errors.New("group is required") + } + for i, o := range r.OS { + if err := o.Validate(); err != nil { + return fmt.Errorf("invalid os %d: %w", i, err) + } + } + return nil +} + +// runtimeAllowed returns true if the runtime matches a valid OS. +func (r Requirements) runtimeAllowed(os string, arch string, version string, distro string) bool { + if len(r.OS) == 0 { + // all allowed + return true + } + for _, o := range r.OS { + if o.Type != Kubernetes && o.Type != os { + // not valid on this runtime + continue + } + if o.Arch != "" && o.Arch != arch { + // not allowed on specific architecture + continue + } + if o.Version != "" && o.Version != version { + // not allowed on specific version + continue + } + if o.Distro != "" && o.Distro != distro { + // not allowed on specific distro + continue + } + // allowed + return true + } + // made it this far, not allowed + return false +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/client.go b/dev-tools/mage/target/srvrlesstest/testing/ess/client.go new file mode 100644 index 000000000000..4551a59cf4fa --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/client.go @@ -0,0 +1,66 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ess + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" +) + +type Client struct { + config *Config + client *http.Client +} + +func NewClient(config Config) *Client { + cfg := defaultConfig() + cfg.Merge(config) + + c := new(Client) + c.client = http.DefaultClient + c.config = cfg + + return c +} + +func (c *Client) doGet(ctx context.Context, relativeUrl string) (*http.Response, error) { + u, err := url.JoinPath(c.config.BaseUrl, relativeUrl) + if err != nil { + return nil, fmt.Errorf("unable to create API URL: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, fmt.Errorf("unable to create GET request: %w", err) + } + + req.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", c.config.ApiKey)) + + return c.client.Do(req) +} + +func (c *Client) doPost(ctx context.Context, relativeUrl, contentType string, body io.Reader) (*http.Response, error) { + u, err := url.JoinPath(c.config.BaseUrl, relativeUrl) + if err != nil { + return nil, fmt.Errorf("unable to create API URL: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, body) + if err != nil { + return nil, fmt.Errorf("unable to create POST request: %w", err) + } + + req.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", c.config.ApiKey)) + req.Header.Set("Content-Type", contentType) + + return c.client.Do(req) +} + +func (c *Client) BaseURL() string { + return c.config.BaseUrl +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/config.go b/dev-tools/mage/target/srvrlesstest/testing/ess/config.go new file mode 100644 index 000000000000..9e3590313004 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/config.go @@ -0,0 +1,73 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ess + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +type Config struct { + BaseUrl string `json:"base_url" yaml:"base_url"` + ApiKey string `json:"api_key" yaml:"api_key"` +} + +func defaultConfig() *Config { + baseURL := os.Getenv("TEST_INTEG_AUTH_ESS_URL") + if baseURL == "" { + baseURL = "https://cloud.elastic.co" + } + url := strings.TrimRight(baseURL, "/") + "/api/v1" + return &Config{ + BaseUrl: url, + } +} + +// Merge overlays the provided configuration on top of +// this configuration. +func (c *Config) Merge(anotherConfig Config) { + if anotherConfig.BaseUrl != "" { + c.BaseUrl = anotherConfig.BaseUrl + } + + if anotherConfig.ApiKey != "" { + c.ApiKey = anotherConfig.ApiKey + } +} + +// GetESSAPIKey returns the ESS API key, if it exists +func GetESSAPIKey() (string, bool, error) { + essAPIKeyFile, err := GetESSAPIKeyFilePath() + if err != nil { + return "", false, err + } + _, err = os.Stat(essAPIKeyFile) + if os.IsNotExist(err) { + return "", false, nil + } else if err != nil { + return "", false, fmt.Errorf("unable to check if ESS config directory exists: %w", err) + } + data, err := os.ReadFile(essAPIKeyFile) + if err != nil { + return "", true, fmt.Errorf("unable to read ESS API key: %w", err) + } + essAPIKey := strings.TrimSpace(string(data)) + return essAPIKey, true, nil +} + +// GetESSAPIKeyFilePath returns the path to the ESS API key file +func GetESSAPIKeyFilePath() (string, error) { + essAPIKeyFile := os.Getenv("TEST_INTEG_AUTH_ESS_APIKEY_FILE") + if essAPIKeyFile == "" { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("unable to determine user's home directory: %w", err) + } + essAPIKeyFile = filepath.Join(homeDir, ".config", "ess", "api_key.txt") + } + return essAPIKeyFile, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_csp_configuration.yaml b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_csp_configuration.yaml new file mode 100644 index 000000000000..199f664a65a6 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_csp_configuration.yaml @@ -0,0 +1,15 @@ +gcp: + integrations_server_conf_id: "gcp.integrationsserver.n2.68x32x45.2" + elasticsearch_conf_id: "gcp.es.datahot.n2.68x10x45" + elasticsearch_deployment_template_id: "gcp-storage-optimized-v5" + kibana_instance_configuration_id: "gcp.kibana.n2.68x32x45" +azure: + integrations_server_conf_id: "azure.integrationsserver.fsv2.2" + elasticsearch_conf_id: "azure.es.datahot.edsv4" + elasticsearch_deployment_template_id: "azure-storage-optimized-v2" + kibana_instance_configuration_id: "azure.kibana.fsv2" +aws: + integrations_server_conf_id: "aws.integrationsserver.c5d.2.1" + elasticsearch_conf_id: "aws.es.datahot.i3.1.1" + elasticsearch_deployment_template_id: "aws-storage-optimized-v5" + kibana_instance_configuration_id: "aws.kibana.c5d.1.1" \ No newline at end of file diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_request.tmpl.json b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_request.tmpl.json new file mode 100644 index 000000000000..3ef93868708f --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/create_deployment_request.tmpl.json @@ -0,0 +1,102 @@ +{ + "resources": { + "integrations_server": [ + { + "elasticsearch_cluster_ref_id": "main-elasticsearch", + "region": "{{ .request.Region }}", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "{{ .integrations_server_conf_id }}", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "integrations_server": { + "version": "{{ .request.Version }}" + } + }, + "ref_id": "main-integrations_server" + } + ], + "elasticsearch": [ + { + "region": "{{ .request.Region }}", + "settings": { + "dedicated_masters_threshold": 6 + }, + "plan": { + "cluster_topology": [ + { + "zone_count": 1, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "instance_configuration_id": "{{.elasticsearch_conf_id}}", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + } + ], + "elasticsearch": { + "version": "{{ .request.Version }}", + "enabled_built_in_plugins": [] + }, + "deployment_template": { + "id": "{{ .elasticsearch_deployment_template_id }}" + } + }, + "ref_id": "main-elasticsearch" + } + ], + "enterprise_search": [], + "kibana": [ + { + "elasticsearch_cluster_ref_id": "main-elasticsearch", + "region": "{{ .request.Region }}", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "{{.kibana_instance_configuration_id}}", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "kibana": { + "version": "{{ .request.Version }}", + "user_settings_json": { + "xpack.fleet.enableExperimental": ["agentTamperProtectionEnabled"] + } + } + }, + "ref_id": "main-kibana" + } + ] + }, + "settings": { + "autoscaling_enabled": false + }, + "name": "{{ .request.Name }}", + "metadata": { + "system_owned": false, + "tags": {{ json .request.Tags }} + } +} \ No newline at end of file diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/deployment.go b/dev-tools/mage/target/srvrlesstest/testing/ess/deployment.go new file mode 100644 index 000000000000..6e84cd667090 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/deployment.go @@ -0,0 +1,388 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ess + +import ( + "bytes" + "context" + _ "embed" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "text/template" + "time" + + "gopkg.in/yaml.v2" +) + +type Tag struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type CreateDeploymentRequest struct { + Name string `json:"name"` + Region string `json:"region"` + Version string `json:"version"` + Tags []Tag `json:"tags"` +} + +type CreateDeploymentResponse struct { + ID string `json:"id"` + + ElasticsearchEndpoint string + KibanaEndpoint string + + Username string + Password string +} + +type GetDeploymentResponse struct { + Elasticsearch struct { + Status DeploymentStatus + ServiceUrl string + } + Kibana struct { + Status DeploymentStatus + ServiceUrl string + } + IntegrationsServer struct { + Status DeploymentStatus + ServiceUrl string + } +} + +type DeploymentStatus string + +func (d *DeploymentStatus) UnmarshalJSON(data []byte) error { + var status string + if err := json.Unmarshal(data, &status); err != nil { + return err + } + + switch status { + case string(DeploymentStatusInitializing), string(DeploymentStatusReconfiguring), string(DeploymentStatusStarted): + *d = DeploymentStatus(status) + default: + return fmt.Errorf("unknown status: [%s]", status) + } + + return nil +} + +func (d *DeploymentStatus) String() string { + return string(*d) +} + +const ( + DeploymentStatusInitializing DeploymentStatus = "initializing" + DeploymentStatusReconfiguring DeploymentStatus = "reconfiguring" + DeploymentStatusStarted DeploymentStatus = "started" +) + +type DeploymentStatusResponse struct { + Overall DeploymentStatus + + Elasticsearch DeploymentStatus + Kibana DeploymentStatus + IntegrationsServer DeploymentStatus +} + +// CreateDeployment creates the deployment with the specified configuration. +func (c *Client) CreateDeployment(ctx context.Context, req CreateDeploymentRequest) (*CreateDeploymentResponse, error) { + reqBodyBytes, err := generateCreateDeploymentRequestBody(req) + if err != nil { + return nil, err + } + + createResp, err := c.doPost( + ctx, + "deployments", + "application/json", + bytes.NewReader(reqBodyBytes), + ) + if err != nil { + return nil, fmt.Errorf("error calling deployment creation API: %w", err) + } + defer createResp.Body.Close() + + var createRespBody struct { + ID string `json:"id"` + Resources []struct { + Kind string `json:"kind"` + Credentials struct { + Username string `json:"username"` + Password string `json:"password"` + } `json:"credentials"` + } `json:"resources"` + Errors []struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"errors"` + } + + if err := json.NewDecoder(createResp.Body).Decode(&createRespBody); err != nil { + return nil, fmt.Errorf("error parsing deployment creation API response: %w", err) + } + + if len(createRespBody.Errors) > 0 { + return nil, fmt.Errorf("failed to create: (%s) %s", createRespBody.Errors[0].Code, createRespBody.Errors[0].Message) + } + + r := CreateDeploymentResponse{ + ID: createRespBody.ID, + } + + for _, resource := range createRespBody.Resources { + if resource.Kind == "elasticsearch" { + r.Username = resource.Credentials.Username + r.Password = resource.Credentials.Password + break + } + } + + // Get Elasticsearch and Kibana endpoint URLs + getResp, err := c.getDeployment(ctx, r.ID) + if err != nil { + return nil, fmt.Errorf("error calling deployment retrieval API: %w", err) + } + defer getResp.Body.Close() + + var getRespBody struct { + Resources struct { + Elasticsearch []struct { + Info struct { + Metadata struct { + ServiceUrl string `json:"service_url"` + } `json:"metadata"` + } `json:"info"` + } `json:"elasticsearch"` + Kibana []struct { + Info struct { + Metadata struct { + ServiceUrl string `json:"service_url"` + } `json:"metadata"` + } `json:"info"` + } `json:"kibana"` + } `json:"resources"` + } + + if err := json.NewDecoder(getResp.Body).Decode(&getRespBody); err != nil { + return nil, fmt.Errorf("error parsing deployment retrieval API response: %w", err) + } + + r.ElasticsearchEndpoint = getRespBody.Resources.Elasticsearch[0].Info.Metadata.ServiceUrl + r.KibanaEndpoint = getRespBody.Resources.Kibana[0].Info.Metadata.ServiceUrl + + return &r, nil +} + +// ShutdownDeployment attempts to shut down the ESS deployment with the specified ID. +func (c *Client) ShutdownDeployment(ctx context.Context, deploymentID string) error { + u, err := url.JoinPath("deployments", deploymentID, "_shutdown") + if err != nil { + return fmt.Errorf("unable to create deployment shutdown API URL: %w", err) + } + + res, err := c.doPost(ctx, u, "", nil) + if err != nil { + return fmt.Errorf("error calling deployment shutdown API: %w", err) + } + defer res.Body.Close() + + if res.StatusCode != 200 { + resBytes, _ := io.ReadAll(res.Body) + return fmt.Errorf("got unexpected response code [%d] from deployment shutdown API: %s", res.StatusCode, string(resBytes)) + } + + return nil +} + +// DeploymentStatus returns the overall status of the deployment as well as statuses of every component. +func (c *Client) DeploymentStatus(ctx context.Context, deploymentID string) (*DeploymentStatusResponse, error) { + getResp, err := c.getDeployment(ctx, deploymentID) + if err != nil { + return nil, fmt.Errorf("error calling deployment retrieval API: %w", err) + } + defer getResp.Body.Close() + + var getRespBody struct { + Resources struct { + Elasticsearch []struct { + Info struct { + Status DeploymentStatus `json:"status"` + } `json:"info"` + } `json:"elasticsearch"` + Kibana []struct { + Info struct { + Status DeploymentStatus `json:"status"` + } `json:"info"` + } `json:"kibana"` + IntegrationsServer []struct { + Info struct { + Status DeploymentStatus `json:"status"` + } `json:"info"` + } `json:"integrations_server"` + } `json:"resources"` + } + + if err := json.NewDecoder(getResp.Body).Decode(&getRespBody); err != nil { + return nil, fmt.Errorf("error parsing deployment retrieval API response: %w", err) + } + + s := DeploymentStatusResponse{ + Elasticsearch: getRespBody.Resources.Elasticsearch[0].Info.Status, + Kibana: getRespBody.Resources.Kibana[0].Info.Status, + IntegrationsServer: getRespBody.Resources.IntegrationsServer[0].Info.Status, + } + s.Overall = overallStatus(s.Elasticsearch, s.Kibana, s.IntegrationsServer) + + return &s, nil +} + +// DeploymentIsReady returns true when the deployment is ready, checking its status +// every `tick` until `waitFor` duration. +func (c *Client) DeploymentIsReady(ctx context.Context, deploymentID string, tick time.Duration) (bool, error) { + ticker := time.NewTicker(tick) + defer ticker.Stop() + + var errs error + statusCh := make(chan DeploymentStatus, 1) + for { + select { + case <-ctx.Done(): + return false, errors.Join(errs, ctx.Err()) + case <-ticker.C: + go func() { + statusCtx, statusCancel := context.WithTimeout(ctx, tick) + defer statusCancel() + status, err := c.DeploymentStatus(statusCtx, deploymentID) + if err != nil { + errs = errors.Join(errs, err) + return + } + statusCh <- status.Overall + }() + case status := <-statusCh: + if status == DeploymentStatusStarted { + return true, nil + } + } + } +} + +func (c *Client) getDeployment(ctx context.Context, deploymentID string) (*http.Response, error) { + u, err := url.JoinPath("deployments", deploymentID) + if err != nil { + return nil, fmt.Errorf("unable to create deployment retrieval API URL: %w", err) + } + + return c.doGet(ctx, u) +} + +func overallStatus(statuses ...DeploymentStatus) DeploymentStatus { + // The overall status is started if every component's status is started. Otherwise, + // we take the non-started components' statuses and pick the first one as the overall + // status. + statusMap := map[DeploymentStatus]struct{}{} + for _, status := range statuses { + statusMap[status] = struct{}{} + } + + if len(statusMap) == 1 { + if _, allStarted := statusMap[DeploymentStatusStarted]; allStarted { + return DeploymentStatusStarted + } + } + + var overallStatus DeploymentStatus + for _, status := range statuses { + if status != DeploymentStatusStarted { + overallStatus = status + break + } + } + + return overallStatus +} + +//go:embed create_deployment_request.tmpl.json +var createDeploymentRequestTemplate string + +//go:embed create_deployment_csp_configuration.yaml +var cloudProviderSpecificValues []byte + +func generateCreateDeploymentRequestBody(req CreateDeploymentRequest) ([]byte, error) { + var csp string + // Special case: AWS us-east-1 region is just called + // us-east-1 (instead of aws-us-east-1)! + if req.Region == "us-east-1" { + csp = "aws" + } else { + regionParts := strings.Split(req.Region, "-") + if len(regionParts) < 2 { + return nil, fmt.Errorf("unable to parse CSP out of region [%s]", req.Region) + } + + csp = regionParts[0] + } + templateContext, err := createDeploymentTemplateContext(csp, req) + if err != nil { + return nil, fmt.Errorf("creating request template context: %w", err) + } + + tpl, err := template.New("create_deployment_request"). + Funcs(template.FuncMap{"json": jsonMarshal}). + Parse(createDeploymentRequestTemplate) + if err != nil { + return nil, fmt.Errorf("unable to parse deployment creation template: %w", err) + } + + var bBuf bytes.Buffer + err = tpl.Execute(&bBuf, templateContext) + if err != nil { + return nil, fmt.Errorf("rendering create deployment request template with context %v : %w", templateContext, err) + } + return bBuf.Bytes(), nil +} + +func jsonMarshal(in any) (string, error) { + jsonBytes, err := json.Marshal(in) + if err != nil { + return "", err + } + + return string(jsonBytes), nil +} + +func createDeploymentTemplateContext(csp string, req CreateDeploymentRequest) (map[string]any, error) { + cspSpecificContext, err := loadCspValues(csp) + if err != nil { + return nil, fmt.Errorf("loading csp-specific values for %q: %w", csp, err) + } + + cspSpecificContext["request"] = req + + return cspSpecificContext, nil +} + +func loadCspValues(csp string) (map[string]any, error) { + var cspValues map[string]map[string]any + + err := yaml.Unmarshal(cloudProviderSpecificValues, &cspValues) + if err != nil { + return nil, fmt.Errorf("unmarshalling error: %w", err) + } + values, supportedCSP := cspValues[csp] + if !supportedCSP { + return nil, fmt.Errorf("csp %s not supported", csp) + } + + return values, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/serverless.go b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless.go new file mode 100644 index 000000000000..78155d089987 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless.go @@ -0,0 +1,318 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ess + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "io" + "net/http" + "strings" + "time" +) + +var serverlessURL = "https://cloud.elastic.co" + +// ServerlessClient is the handler the serverless ES instance +type ServerlessClient struct { + region string + projectType string + api string + proj Project + log common.Logger +} + +// ServerlessRequest contains the data needed for a new serverless instance +type ServerlessRequest struct { + Name string `json:"name"` + RegionID string `json:"region_id"` +} + +// Project represents a serverless project +type Project struct { + Name string `json:"name"` + ID string `json:"id"` + Type string `json:"type"` + Region string `json:"region_id"` + + Credentials struct { + Username string `json:"username"` + Password string `json:"password"` + } `json:"credentials"` + + Endpoints struct { + Elasticsearch string `json:"elasticsearch"` + Kibana string `json:"kibana"` + Fleet string `json:"fleet,omitempty"` + APM string `json:"apm,omitempty"` + } `json:"endpoints"` +} + +// CredResetResponse contains the new auth details for a +// stack credential reset +type CredResetResponse struct { + Password string `json:"password"` + Username string `json:"username"` +} + +// NewServerlessClient creates a new instance of the serverless client +func NewServerlessClient(region, projectType, api string, logger common.Logger) *ServerlessClient { + return &ServerlessClient{ + region: region, + api: api, + projectType: projectType, + log: logger, + } +} + +// DeployStack creates a new serverless elastic stack +func (srv *ServerlessClient) DeployStack(ctx context.Context, req ServerlessRequest) (Project, error) { + reqBody, err := json.Marshal(req) + if err != nil { + return Project{}, fmt.Errorf("error marshaling JSON request %w", err) + } + urlPath := fmt.Sprintf("%s/api/v1/serverless/projects/%s", serverlessURL, srv.projectType) + + httpHandler, err := http.NewRequestWithContext(ctx, "POST", urlPath, bytes.NewReader(reqBody)) + if err != nil { + return Project{}, fmt.Errorf("error creating new httpRequest: %w", err) + } + + httpHandler.Header.Set("Content-Type", "application/json") + httpHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + + resp, err := http.DefaultClient.Do(httpHandler) + if err != nil { + return Project{}, fmt.Errorf("error performing HTTP request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + p, _ := io.ReadAll(resp.Body) + return Project{}, fmt.Errorf("Non-201 status code returned by server: %d, body: %s", resp.StatusCode, string(p)) + } + + serverlessHandle := Project{} + err = json.NewDecoder(resp.Body).Decode(&serverlessHandle) + if err != nil { + return Project{}, fmt.Errorf("error decoding JSON response: %w", err) + } + srv.proj = serverlessHandle + + // as of 8/8-ish, the serverless ESS cloud no longer provides credentials on the first POST request, we must send an additional POST + // to reset the credentials + updated, err := srv.ResetCredentials(ctx) + if err != nil { + return serverlessHandle, fmt.Errorf("error resetting credentials: %w", err) + } + srv.proj.Credentials.Username = updated.Username + srv.proj.Credentials.Password = updated.Password + + return serverlessHandle, nil +} + +// DeploymentIsReady returns true when the serverless deployment is healthy and ready +func (srv *ServerlessClient) DeploymentIsReady(ctx context.Context) (bool, error) { + err := srv.WaitForEndpoints(ctx) + if err != nil { + return false, fmt.Errorf("error waiting for endpoints to become available: %w", err) + } + srv.log.Logf("Endpoints available: ES: %s Fleet: %s Kibana: %s", srv.proj.Endpoints.Elasticsearch, srv.proj.Endpoints.Fleet, srv.proj.Endpoints.Kibana) + err = srv.WaitForElasticsearch(ctx) + if err != nil { + return false, fmt.Errorf("error waiting for ES to become available: %w", err) + } + srv.log.Logf("Elasticsearch healthy...") + err = srv.WaitForKibana(ctx) + if err != nil { + return false, fmt.Errorf("error waiting for Kibana to become available: %w", err) + } + srv.log.Logf("Kibana healthy...") + + return true, nil +} + +// DeleteDeployment deletes the deployment +func (srv *ServerlessClient) DeleteDeployment(ctx context.Context) error { + endpoint := fmt.Sprintf("%s/api/v1/serverless/projects/%s/%s", serverlessURL, srv.proj.Type, srv.proj.ID) + req, err := http.NewRequestWithContext(ctx, "DELETE", endpoint, nil) + if err != nil { + return fmt.Errorf("error creating HTTP request: %w", err) + } + req.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("error performing delete request: %w", err) + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + errBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d from %s: %s", resp.StatusCode, req.URL, errBody) + } + return nil +} + +// WaitForEndpoints polls the API and waits until fleet/ES endpoints are available +func (srv *ServerlessClient) WaitForEndpoints(ctx context.Context) error { + reqURL := fmt.Sprintf("%s/api/v1/serverless/projects/%s/%s", serverlessURL, srv.proj.Type, srv.proj.ID) + httpHandler, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil) + if err != nil { + return fmt.Errorf("error creating http request: %w", err) + } + + httpHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + + readyFunc := func(resp *http.Response) bool { + project := &Project{} + err = json.NewDecoder(resp.Body).Decode(project) + resp.Body.Close() + if err != nil { + srv.log.Logf("response decoding error: %v", err) + return false + } + if project.Endpoints.Elasticsearch != "" { + // fake out the fleet URL, set to ES url + if project.Endpoints.Fleet == "" { + project.Endpoints.Fleet = strings.Replace(project.Endpoints.Elasticsearch, "es.eks", "fleet.eks", 1) + } + + srv.proj.Endpoints = project.Endpoints + return true + } + return false + } + + err = srv.waitForRemoteState(ctx, httpHandler, time.Second*5, readyFunc) + if err != nil { + return fmt.Errorf("error waiting for remote instance to start: %w", err) + } + + return nil +} + +// WaitForElasticsearch waits until the ES endpoint is healthy +func (srv *ServerlessClient) WaitForElasticsearch(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, "GET", srv.proj.Endpoints.Elasticsearch, nil) + if err != nil { + return fmt.Errorf("error creating HTTP request: %w", err) + } + req.SetBasicAuth(srv.proj.Credentials.Username, srv.proj.Credentials.Password) + + // _cluster/health no longer works on serverless, just check response code + readyFunc := func(resp *http.Response) bool { + return resp.StatusCode == 200 + } + + err = srv.waitForRemoteState(ctx, req, time.Second*5, readyFunc) + if err != nil { + return fmt.Errorf("error waiting for ES to become healthy: %w", err) + } + return nil +} + +// WaitForKibana waits until the kibana endpoint is healthy +func (srv *ServerlessClient) WaitForKibana(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, "GET", srv.proj.Endpoints.Kibana+"/api/status", nil) + if err != nil { + return fmt.Errorf("error creating HTTP request: %w", err) + } + req.SetBasicAuth(srv.proj.Credentials.Username, srv.proj.Credentials.Password) + + readyFunc := func(resp *http.Response) bool { + var status struct { + Status struct { + Overall struct { + Level string `json:"level"` + } `json:"overall"` + } `json:"status"` + } + err = json.NewDecoder(resp.Body).Decode(&status) + if err != nil { + srv.log.Logf("response decoding error: %v", err) + return false + } + resp.Body.Close() + return status.Status.Overall.Level == "available" + } + + err = srv.waitForRemoteState(ctx, req, time.Second*5, readyFunc) + if err != nil { + return fmt.Errorf("error waiting for ES to become healthy: %w", err) + } + return nil +} + +// ResetCredentials resets the credentials for the given ESS instance +func (srv *ServerlessClient) ResetCredentials(ctx context.Context) (CredResetResponse, error) { + resetURL := fmt.Sprintf("%s/api/v1/serverless/projects/%s/%s/_reset-internal-credentials", serverlessURL, srv.projectType, srv.proj.ID) + + resetHandler, err := http.NewRequestWithContext(ctx, "POST", resetURL, nil) + if err != nil { + return CredResetResponse{}, fmt.Errorf("error creating new httpRequest: %w", err) + } + + resetHandler.Header.Set("Content-Type", "application/json") + resetHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", srv.api)) + + resp, err := http.DefaultClient.Do(resetHandler) + if err != nil { + return CredResetResponse{}, fmt.Errorf("error performing HTTP request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + p, _ := io.ReadAll(resp.Body) + return CredResetResponse{}, fmt.Errorf("Non-200 status code returned by server: %d, body: %s", resp.StatusCode, string(p)) + } + + updated := CredResetResponse{} + err = json.NewDecoder(resp.Body).Decode(&updated) + if err != nil { + return CredResetResponse{}, fmt.Errorf("error decoding JSON response: %w", err) + } + + return updated, nil +} + +func (srv *ServerlessClient) waitForRemoteState(ctx context.Context, httpHandler *http.Request, tick time.Duration, isReady func(*http.Response) bool) error { + timer := time.NewTimer(time.Millisecond) + // in cases where we get a timeout, also return the last error returned via HTTP + var lastErr error + for { + select { + case <-ctx.Done(): + return fmt.Errorf("got context done; Last HTTP Error: %w", lastErr) + case <-timer.C: + } + + resp, err := http.DefaultClient.Do(httpHandler) + if err != nil { + errMsg := fmt.Errorf("request error: %w", err) + // Logger interface doesn't have a debug level and we don't want to auto-log these; + // as most of the time it's just spam. + //srv.log.Logf(errMsg.Error()) + lastErr = errMsg + timer.Reset(time.Second * 5) + continue + } + if resp.StatusCode != http.StatusOK { + errBody, _ := io.ReadAll(resp.Body) + errMsg := fmt.Errorf("unexpected status code %d in request to %s, body: %s", resp.StatusCode, httpHandler.URL.String(), string(errBody)) + //srv.log.Logf(errMsg.Error()) + lastErr = errMsg + resp.Body.Close() + timer.Reset(time.Second * 5) + continue + } + + if isReady(resp) { + return nil + } + timer.Reset(tick) + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/serverless_provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless_provisioner.go new file mode 100644 index 000000000000..36beb2ae71f9 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/serverless_provisioner.go @@ -0,0 +1,262 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ess + +import ( + "context" + "encoding/json" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "io" + "net/http" + "time" + + "github.com/elastic/elastic-agent-libs/logp" +) + +const ProvisionerServerless = "serverless" + +// ServerlessProvisioner contains +type ServerlessProvisioner struct { + cfg ProvisionerConfig + log common.Logger +} + +type defaultLogger struct { + wrapped *logp.Logger +} + +// Logf implements the runner.Logger interface +func (log *defaultLogger) Logf(format string, args ...any) { + if len(args) == 0 { + + } else { + log.wrapped.Infof(format, args) + } + +} + +// ServerlessRegions is the JSON response from the serverless regions API endpoint +type ServerlessRegions struct { + CSP string `json:"csp"` + CSPRegion string `json:"csp_region"` + ID string `json:"id"` + Name string `json:"name"` +} + +// NewServerlessProvisioner creates a new StackProvisioner instance for serverless +func NewServerlessProvisioner(ctx context.Context, cfg ProvisionerConfig) (common.StackProvisioner, error) { + prov := &ServerlessProvisioner{ + cfg: cfg, + log: &defaultLogger{wrapped: logp.L()}, + } + err := prov.CheckCloudRegion(ctx) + if err != nil { + return nil, fmt.Errorf("error checking region setting: %w", err) + } + return prov, nil +} + +func (prov *ServerlessProvisioner) Name() string { + return ProvisionerServerless +} + +// SetLogger sets the logger for the +func (prov *ServerlessProvisioner) SetLogger(l common.Logger) { + prov.log = l +} + +// Create creates a stack. +func (prov *ServerlessProvisioner) Create(ctx context.Context, request common.StackRequest) (common.Stack, error) { + // allow up to 4 minutes for requests + createCtx, createCancel := context.WithTimeout(ctx, 4*time.Minute) + defer createCancel() + + client := NewServerlessClient(prov.cfg.Region, "observability", prov.cfg.APIKey, prov.log) + srvReq := ServerlessRequest{Name: request.ID, RegionID: prov.cfg.Region} + + prov.log.Logf("Creating serverless stack %s [stack_id: %s]", request.Version, request.ID) + proj, err := client.DeployStack(createCtx, srvReq) + if err != nil { + return common.Stack{}, fmt.Errorf("error deploying stack for request %s: %w", request.ID, err) + } + err = client.WaitForEndpoints(createCtx) + if err != nil { + return common.Stack{}, fmt.Errorf("error waiting for endpoints to become available for serverless stack %s [stack_id: %s, deployment_id: %s]: %w", request.Version, request.ID, proj.ID, err) + } + stack := common.Stack{ + ID: request.ID, + Provisioner: prov.Name(), + Version: request.Version, + Elasticsearch: client.proj.Endpoints.Elasticsearch, + Kibana: client.proj.Endpoints.Kibana, + Username: client.proj.Credentials.Username, + Password: client.proj.Credentials.Password, + Internal: map[string]interface{}{ + "deployment_id": proj.ID, + "deployment_type": proj.Type, + }, + Ready: false, + } + prov.log.Logf("Created serverless stack %s [stack_id: %s, deployment_id: %s]", request.Version, request.ID, proj.ID) + return stack, nil +} + +// WaitForReady should block until the stack is ready or the context is cancelled. +func (prov *ServerlessProvisioner) WaitForReady(ctx context.Context, stack common.Stack) (common.Stack, error) { + deploymentID, deploymentType, err := prov.getDeploymentInfo(stack) + if err != nil { + return stack, fmt.Errorf("failed to get deployment info from the stack: %w", err) + } + + ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) + defer cancel() + + client := NewServerlessClient(prov.cfg.Region, "observability", prov.cfg.APIKey, prov.log) + client.proj.ID = deploymentID + client.proj.Type = deploymentType + client.proj.Region = prov.cfg.Region + client.proj.Endpoints.Elasticsearch = stack.Elasticsearch + client.proj.Endpoints.Kibana = stack.Kibana + client.proj.Credentials.Username = stack.Username + client.proj.Credentials.Password = stack.Password + + prov.log.Logf("Waiting for serverless stack %s to be ready [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + + errCh := make(chan error) + var lastErr error + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + if lastErr == nil { + lastErr = ctx.Err() + } + return stack, fmt.Errorf("serverless stack %s [stack_id: %s, deployment_id: %s] never became ready: %w", stack.Version, stack.ID, deploymentID, lastErr) + case <-ticker.C: + go func() { + statusCtx, statusCancel := context.WithTimeout(ctx, 30*time.Second) + defer statusCancel() + ready, err := client.DeploymentIsReady(statusCtx) + if err != nil { + errCh <- err + } else if !ready { + errCh <- fmt.Errorf("serverless stack %s [stack_id: %s, deployment_id: %s] never became ready", stack.Version, stack.ID, deploymentID) + } else { + errCh <- nil + } + }() + case err := <-errCh: + if err == nil { + stack.Ready = true + return stack, nil + } + lastErr = err + } + } +} + +// Delete deletes a stack. +func (prov *ServerlessProvisioner) Delete(ctx context.Context, stack common.Stack) error { + deploymentID, deploymentType, err := prov.getDeploymentInfo(stack) + if err != nil { + return fmt.Errorf("failed to get deployment info from the stack: %w", err) + } + + client := NewServerlessClient(prov.cfg.Region, "observability", prov.cfg.APIKey, prov.log) + client.proj.ID = deploymentID + client.proj.Type = deploymentType + client.proj.Region = prov.cfg.Region + client.proj.Endpoints.Elasticsearch = stack.Elasticsearch + client.proj.Endpoints.Kibana = stack.Kibana + client.proj.Credentials.Username = stack.Username + client.proj.Credentials.Password = stack.Password + + prov.log.Logf("Destroying serverless stack %s [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + err = client.DeleteDeployment(ctx) + if err != nil { + return fmt.Errorf("error removing serverless stack %s [stack_id: %s, deployment_id: %s]: %w", stack.Version, stack.ID, deploymentID, err) + } + return nil +} + +// CheckCloudRegion checks to see if the provided region is valid for the serverless +// if we have an invalid region, overwrite with a valid one. +// The "normal" and serverless ESS APIs have different regions, hence why we need this. +func (prov *ServerlessProvisioner) CheckCloudRegion(ctx context.Context) error { + urlPath := fmt.Sprintf("%s/api/v1/serverless/regions", serverlessURL) + + httpHandler, err := http.NewRequestWithContext(ctx, "GET", urlPath, nil) + if err != nil { + return fmt.Errorf("error creating new httpRequest: %w", err) + } + + httpHandler.Header.Set("Content-Type", "application/json") + httpHandler.Header.Set("Authorization", fmt.Sprintf("ApiKey %s", prov.cfg.APIKey)) + + resp, err := http.DefaultClient.Do(httpHandler) + if err != nil { + return fmt.Errorf("error performing HTTP request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + p, _ := io.ReadAll(resp.Body) + return fmt.Errorf("Non-201 status code returned by server: %d, body: %s", resp.StatusCode, string(p)) + } + regions := []ServerlessRegions{} + + err = json.NewDecoder(resp.Body).Decode(®ions) + if err != nil { + return fmt.Errorf("error unpacking regions from list: %w", err) + } + resp.Body.Close() + + found := false + for _, region := range regions { + if region.ID == prov.cfg.Region { + found = true + } + } + if !found { + if len(regions) == 0 { + return fmt.Errorf("no regions found for cloudless API") + } + newRegion := regions[0].ID + prov.log.Logf("WARNING: Region %s is not available for serverless, selecting %s. Other regions are:", prov.cfg.Region, newRegion) + for _, avail := range regions { + prov.log.Logf(" %s - %s", avail.ID, avail.Name) + } + prov.cfg.Region = newRegion + } + + return nil +} + +func (prov *ServerlessProvisioner) getDeploymentInfo(stack common.Stack) (string, string, error) { + if stack.Internal == nil { + return "", "", fmt.Errorf("missing internal information") + } + deploymentIDRaw, ok := stack.Internal["deployment_id"] + if !ok { + return "", "", fmt.Errorf("missing internal deployment_id") + } + deploymentID, ok := deploymentIDRaw.(string) + if !ok { + return "", "", fmt.Errorf("internal deployment_id not a string") + } + deploymentTypeRaw, ok := stack.Internal["deployment_type"] + if !ok { + return "", "", fmt.Errorf("missing internal deployment_type") + } + deploymentType, ok := deploymentTypeRaw.(string) + if !ok { + return "", "", fmt.Errorf("internal deployment_type is not a string") + } + return deploymentID, deploymentType, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ess/statful_provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/ess/statful_provisioner.go new file mode 100644 index 000000000000..0e2294969fdd --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ess/statful_provisioner.go @@ -0,0 +1,188 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ess + +import ( + "context" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "os" + "strings" + "time" +) + +const ProvisionerStateful = "stateful" + +// ProvisionerConfig is the configuration for the ESS statefulProvisioner. +type ProvisionerConfig struct { + Identifier string + APIKey string + Region string +} + +// Validate returns an error if the information is invalid. +func (c *ProvisionerConfig) Validate() error { + if c.Identifier == "" { + return errors.New("field Identifier must be set") + } + if c.APIKey == "" { + return errors.New("field APIKey must be set") + } + if c.Region == "" { + return errors.New("field Region must be set") + } + return nil +} + +type statefulProvisioner struct { + logger common.Logger + cfg ProvisionerConfig + client *Client +} + +// NewProvisioner creates the ESS stateful Provisioner +func NewProvisioner(cfg ProvisionerConfig) (common.StackProvisioner, error) { + err := cfg.Validate() + if err != nil { + return nil, err + } + essClient := NewClient(Config{ + ApiKey: cfg.APIKey, + }) + return &statefulProvisioner{ + cfg: cfg, + client: essClient, + }, nil +} + +func (p *statefulProvisioner) Name() string { + return ProvisionerStateful +} + +func (p *statefulProvisioner) SetLogger(l common.Logger) { + p.logger = l +} + +// Create creates a stack. +func (p *statefulProvisioner) Create(ctx context.Context, request common.StackRequest) (common.Stack, error) { + // allow up to 2 minutes for request + createCtx, createCancel := context.WithTimeout(ctx, 2*time.Minute) + defer createCancel() + deploymentTags := map[string]string{ + "division": "engineering", + "org": "ingest", + "team": "elastic-agent-control-plane", + "project": "elastic-agent", + "integration-tests": "true", + } + // If the CI env var is set, this mean we are running inside the CI pipeline and some expected env vars are exposed + if _, e := os.LookupEnv("CI"); e { + deploymentTags["buildkite_id"] = os.Getenv("BUILDKITE_BUILD_NUMBER") + deploymentTags["creator"] = os.Getenv("BUILDKITE_BUILD_CREATOR") + deploymentTags["buildkite_url"] = os.Getenv("BUILDKITE_BUILD_URL") + deploymentTags["ci"] = "true" + } + resp, err := p.createDeployment(createCtx, request, deploymentTags) + if err != nil { + return common.Stack{}, err + } + return common.Stack{ + ID: request.ID, + Provisioner: p.Name(), + Version: request.Version, + Elasticsearch: resp.ElasticsearchEndpoint, + Kibana: resp.KibanaEndpoint, + Username: resp.Username, + Password: resp.Password, + Internal: map[string]interface{}{ + "deployment_id": resp.ID, + }, + Ready: false, + }, nil +} + +// WaitForReady should block until the stack is ready or the context is cancelled. +func (p *statefulProvisioner) WaitForReady(ctx context.Context, stack common.Stack) (common.Stack, error) { + deploymentID, err := p.getDeploymentID(stack) + if err != nil { + return stack, fmt.Errorf("failed to get deployment ID from the stack: %w", err) + } + // allow up to 10 minutes for it to become ready + ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) + defer cancel() + p.logger.Logf("Waiting for cloud stack %s to be ready [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + ready, err := p.client.DeploymentIsReady(ctx, deploymentID, 30*time.Second) + if err != nil { + return stack, fmt.Errorf("failed to check for cloud %s [stack_id: %s, deployment_id: %s] to be ready: %w", stack.Version, stack.ID, deploymentID, err) + } + if !ready { + return stack, fmt.Errorf("cloud %s [stack_id: %s, deployment_id: %s] never became ready: %w", stack.Version, stack.ID, deploymentID, err) + } + stack.Ready = true + return stack, nil +} + +// Delete deletes a stack. +func (p *statefulProvisioner) Delete(ctx context.Context, stack common.Stack) error { + deploymentID, err := p.getDeploymentID(stack) + if err != nil { + return err + } + + // allow up to 1 minute for request + ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) + defer cancel() + + p.logger.Logf("Destroying cloud stack %s [stack_id: %s, deployment_id: %s]", stack.Version, stack.ID, deploymentID) + return p.client.ShutdownDeployment(ctx, deploymentID) +} + +func (p *statefulProvisioner) createDeployment(ctx context.Context, r common.StackRequest, tags map[string]string) (*CreateDeploymentResponse, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) + defer cancel() + + p.logger.Logf("Creating cloud stack %s [stack_id: %s]", r.Version, r.ID) + name := fmt.Sprintf("%s-%s", strings.Replace(p.cfg.Identifier, ".", "-", -1), r.ID) + + // prepare tags + tagArray := make([]Tag, 0, len(tags)) + for k, v := range tags { + tagArray = append(tagArray, Tag{ + Key: k, + Value: v, + }) + } + + createDeploymentRequest := CreateDeploymentRequest{ + Name: name, + Region: p.cfg.Region, + Version: r.Version, + Tags: tagArray, + } + + resp, err := p.client.CreateDeployment(ctx, createDeploymentRequest) + if err != nil { + p.logger.Logf("Failed to create ESS cloud %s: %s", r.Version, err) + return nil, fmt.Errorf("failed to create ESS cloud for version %s: %w", r.Version, err) + } + p.logger.Logf("Created cloud stack %s [stack_id: %s, deployment_id: %s]", r.Version, r.ID, resp.ID) + return resp, nil +} + +func (p *statefulProvisioner) getDeploymentID(stack common.Stack) (string, error) { + if stack.Internal == nil { + return "", fmt.Errorf("missing internal information") + } + deploymentIDRaw, ok := stack.Internal["deployment_id"] + if !ok { + return "", fmt.Errorf("missing internal deployment_id") + } + deploymentID, ok := deploymentIDRaw.(string) + if !ok { + return "", fmt.Errorf("internal deployment_id not a string") + } + return deploymentID, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/fetcher.go b/dev-tools/mage/target/srvrlesstest/testing/fetcher.go new file mode 100644 index 000000000000..7fda78f54a64 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/fetcher.go @@ -0,0 +1,243 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package testing + +import ( + "archive/tar" + "archive/zip" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" +) + +const extAsc = ".asc" +const extHash = ".sha512" + +var ( + // ErrUnsupportedPlatform returned when the operating system and architecture combination is not supported. + ErrUnsupportedPlatform = errors.New("platform is not supported") +) + +// packageArchMap provides a mapping for the endings of the builds of Elastic Agent based on the +// operating system and architecture. +var packageArchMap = map[string]string{ + "linux-amd64-targz": "linux-x86_64.tar.gz", + "linux-amd64-deb": "amd64.deb", + "linux-amd64-rpm": "x86_64.rpm", + "linux-arm64-targz": "linux-arm64.tar.gz", + "linux-arm64-deb": "arm64.deb", + "linux-arm64-rpm": "aarch64.rpm", + "windows-amd64-zip": "windows-x86_64.zip", + "darwin-amd64-targz": "darwin-x86_64.tar.gz", + "darwin-arm64-targz": "darwin-aarch64.tar.gz", +} + +// GetPackageSuffix returns the suffix ending for the builds of Elastic Agent based on the +// operating system and architecture. +func GetPackageSuffix(operatingSystem string, architecture string, packageFormat string) (string, error) { + suffix, ok := packageArchMap[fmt.Sprintf("%s-%s-%s", operatingSystem, architecture, packageFormat)] + if !ok { + return "", fmt.Errorf("%w: %s/%s/%s", ErrUnsupportedPlatform, operatingSystem, architecture, packageFormat) + } + return suffix, nil +} + +// FetcherResult represents a pending result from the fetcher. +type FetcherResult interface { + // Name is the name of the fetched result. + Name() string + // Fetch performs the actual fetch into the provided directory. + Fetch(ctx context.Context, l Logger, dir string) error +} + +// Fetcher provides a path for fetching the Elastic Agent compressed archive +// to extract and run for the integration test. +type Fetcher interface { + // Name returns a unique name for the fetcher. + // + // This name is used as a caching key and if a build has already been fetched for a version then it will not + // be fetched again as long as the same fetcher is being used. + Name() string + // Fetch fetches the Elastic Agent compressed archive to extract and run for the integration test. + // + // The extraction is handled by the caller. This should only download the file + // and place it into the directory. + Fetch(ctx context.Context, operatingSystem string, architecture string, version string, packageFormat string) (FetcherResult, error) +} + +// fetchCache is global to all tests, reducing the time required to fetch the needed artifacts +// to only be need at the start of the first test. +var fetchCache map[string]*fetcherCache +var fetchCacheMx sync.Mutex + +// fetcherCache provides a caching mechanism for only fetching what has not already been fetched. +type fetcherCache struct { + mx sync.Mutex + dir string +} + +// fetch either uses the cache result or performs a new fetch if the content is missing. +func (c *fetcherCache) fetch(ctx context.Context, l Logger, res FetcherResult) (string, error) { + name := res.Name() + src := filepath.Join(c.dir, name) + _, err := os.Stat(src) + if err == nil || os.IsExist(err) { + l.Logf("Using existing artifact %s", name) + return src, nil + } + err = res.Fetch(ctx, l, c.dir) + if err != nil { + return "", err + } + return src, nil +} + +func splitFileType(name string) (string, string, error) { + if strings.HasSuffix(name, ".tar.gz") { + return strings.TrimSuffix(name, ".tar.gz"), ".tar.gz", nil + } + if strings.HasSuffix(name, ".zip") { + return strings.TrimSuffix(name, ".zip"), ".zip", nil + } + if strings.HasSuffix(name, ".deb") { + return strings.TrimSuffix(name, ".deb"), ".deb", nil + } + if strings.HasSuffix(name, ".rpm") { + return strings.TrimSuffix(name, ".rpm"), ".rpm", nil + } + return "", "", fmt.Errorf("unknown file extension type: %s", filepath.Ext(name)) +} + +// untar takes a .tar.gz and extracts its content +func untar(archivePath string, extractDir string) error { + r, err := os.Open(archivePath) + if err != nil { + return err + } + defer r.Close() + + zr, err := gzip.NewReader(r) + if err != nil { + return err + } + + tr := tar.NewReader(zr) + + for { + f, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + + fi := f.FileInfo() + mode := fi.Mode() + abs := filepath.Join(extractDir, f.Name) //nolint:gosec // used only in tests + switch { + case mode.IsRegular(): + // just to be sure, it should already be created by Dir type + if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { + return fmt.Errorf("failed creating directory for file %s: %w", abs, err) + } + + wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()) + if err != nil { + return fmt.Errorf("failed creating file %s: %w", abs, err) + } + + _, err = io.Copy(wf, tr) //nolint:gosec // used only in tests + if closeErr := wf.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return fmt.Errorf("error writing file %s: %w", abs, err) + } + case mode.IsDir(): + if err := os.MkdirAll(abs, 0755); err != nil { + return fmt.Errorf("failed creating directory %s: %w", abs, err) + } + case mode.Type()&os.ModeSymlink == os.ModeSymlink: + // just to be sure, it should already be created by Dir type + if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { + return fmt.Errorf("failed creating directory for symlink %s: %w", abs, err) + } + if err := os.Symlink(f.Linkname, abs); err != nil { + return fmt.Errorf("failed creating symlink %s: %w", abs, err) + } + default: + // skip unknown types + } + } + return nil +} + +// unzip takes a .zip and extracts its content +func unzip(archivePath string, extractDir string) error { + r, err := zip.OpenReader(archivePath) + if err != nil { + return err + } + defer r.Close() + + unpackFile := func(f *zip.File) (err error) { + rc, err := f.Open() + if err != nil { + return err + } + defer func() { + if cerr := rc.Close(); cerr != nil { + err = errors.Join(err, cerr) + } + }() + + fi := f.FileInfo() + mode := fi.Mode() + abs := filepath.Join(extractDir, f.Name) //nolint:gosec // used only in tests + switch { + case mode.IsRegular(): + // just to be sure, it should already be created by Dir type + if err := os.MkdirAll(filepath.Dir(abs), f.Mode()); err != nil { + return fmt.Errorf("failed creating directory for file %s: %w", abs, err) + } + + f, err := os.OpenFile(abs, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) + if err != nil { + return fmt.Errorf("failed creating file %s: %w", abs, err) + } + defer func() { + if cerr := f.Close(); cerr != nil { + err = errors.Join(err, cerr) + } + }() + + //nolint:gosec // used only in tests + if _, err = io.Copy(f, rc); err != nil { + return fmt.Errorf("error writing file %s: %w", abs, err) + } + case mode.IsDir(): + if err := os.MkdirAll(abs, f.Mode()); err != nil { + return fmt.Errorf("failed creating directory %s: %w", abs, err) + } + default: + // skip unknown types + } + return nil + } + + for _, f := range r.File { + if err := unpackFile(f); err != nil { + return err + } + } + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/image.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/image.go new file mode 100644 index 000000000000..ca28adcd5c77 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/image.go @@ -0,0 +1,245 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package kubernetes + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + devtools "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type DockerConfig struct { + CurrentContext string `json:"currentContext"` +} + +type DockerContext struct { + Name string `json:"Name"` + Metadata map[string]interface{} `json:"Metadata"` + Endpoints map[string]Endpoint `json:"Endpoints"` + Storage map[string]interface{} `json:"Storage"` + TLS bool `json:"TLS"` +} + +type DockerBuildOutput struct { + Stream string `json:"stream"` + Aux struct { + ID string `json:"ID"` + } `json:"aux"` +} + +type Endpoint struct { + Host string `json:"Host"` +} + +// AddK8STestsToImage compiles and adds the k8s-inner-tests binary to the given image +func AddK8STestsToImage(ctx context.Context, logger common.Logger, baseImage string, arch string) (string, error) { + // compile k8s test with tag kubernetes_inner + buildBase, err := filepath.Abs("build") + if err != nil { + return "", err + } + + testBinary := filepath.Join(buildBase, "k8s-inner-tests") + + params := devtools.GoTestArgs{ + TestName: "k8s-inner-tests", + Race: false, + Packages: []string{"./testing/kubernetes_inner/..."}, + Tags: []string{"kubernetes_inner"}, + OutputFile: testBinary, + Env: map[string]string{ + "GOOS": "linux", + "GOARCH": arch, + "CGO_ENABLED": "0", + }, + } + + if err := devtools.GoTestBuild(ctx, params); err != nil { + return "", err + } + + cli, err := getDockerClient() + if err != nil { + return "", err + } + + // dockerfile to just copy the tests binary + dockerfile := fmt.Sprintf(` + FROM %s + COPY testsBinary /usr/share/elastic-agent/k8s-inner-tests + `, baseImage) + + // Create a tar archive with the Dockerfile and the binary + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + + // Add Dockerfile to tar + err = tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + if err != nil { + return "", err + } + _, err = tw.Write([]byte(dockerfile)) + if err != nil { + return "", err + } + + // Add binary to tar + binaryFile, err := os.Open(testBinary) + if err != nil { + return "", err + } + defer binaryFile.Close() + + info, err := binaryFile.Stat() + if err != nil { + return "", err + } + + err = tw.WriteHeader(&tar.Header{ + Name: "testsBinary", + Mode: 0777, + Size: info.Size(), + }) + if err != nil { + return "", err + } + _, err = io.Copy(tw, binaryFile) + if err != nil { + return "", err + } + + err = tw.Close() + if err != nil { + return "", err + } + + outputImage := baseImage + "-tests" + + // Build the image + imageBuildResponse, err := cli.ImageBuild(ctx, &buf, types.ImageBuildOptions{ + Tags: []string{outputImage}, + Dockerfile: "Dockerfile", + Remove: true, + }) + if err != nil { + return "", err + } + defer imageBuildResponse.Body.Close() + + scanner := bufio.NewScanner(imageBuildResponse.Body) + for scanner.Scan() { + line := scanner.Text() + var output DockerBuildOutput + if err := json.Unmarshal([]byte(line), &output); err != nil { + return "", fmt.Errorf("error at parsing JSON: %w", err) + } + + if output.Stream != "" { + if out := strings.TrimRight(output.Stream, "\n"); out != "" { + logger.Logf(out) + } + } + } + + if err := scanner.Err(); err != nil { + return "", err + } + + return outputImage, nil +} + +// getDockerClient returns an instance of the Docker client. It first checks +// if there is a current context inside $/.docker/config.json and instantiates +// a client based on it. Otherwise, it fallbacks to a docker client with values +// from environment variables. +func getDockerClient() (*client.Client, error) { + + envClient := func() (*client.Client, error) { + return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + } + + type DockerConfig struct { + CurrentContext string `json:"currentContext"` + } + + configFile := filepath.Join(os.Getenv("HOME"), ".docker", "config.json") + file, err := os.Open(configFile) + if err != nil { + if os.IsNotExist(err) { + return envClient() + } + return nil, err + } + defer file.Close() + + var config DockerConfig + decoder := json.NewDecoder(file) + err = decoder.Decode(&config) + if err != nil { + return nil, err + } + + if config.CurrentContext == "" { + return envClient() + } + + contextDir := filepath.Join(os.Getenv("HOME"), ".docker", "contexts", "meta") + files, err := os.ReadDir(contextDir) + if err != nil { + if os.IsNotExist(err) { + return envClient() + } + return nil, fmt.Errorf("unable to read Docker contexts directory: %w", err) + } + + for _, f := range files { + if f.IsDir() { + metaFile := filepath.Join(contextDir, f.Name(), "meta.json") + if _, err := os.Stat(metaFile); err == nil { + if os.IsNotExist(err) { + return envClient() + } + var dockerContext DockerContext + content, err := os.ReadFile(metaFile) + if err != nil { + return nil, fmt.Errorf("unable to read Docker context meta file: %w", err) + } + if err := json.Unmarshal(content, &dockerContext); err != nil { + return nil, fmt.Errorf("unable to parse Docker context meta file: %w", err) + } + if dockerContext.Name != config.CurrentContext { + continue + } + + endpoint, ok := dockerContext.Endpoints["docker"] + if !ok { + return nil, fmt.Errorf("docker endpoint not found in context") + } + + return client.NewClientWithOpts( + client.WithHost(endpoint.Host), + client.WithAPIVersionNegotiation(), + ) + } + } + } + + return envClient() +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind/provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind/provisioner.go new file mode 100644 index 000000000000..05829a4d4f8b --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/kind/provisioner.go @@ -0,0 +1,285 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package kind + +import ( + "bytes" + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/kubernetes" + "io" + "os" + "os/exec" + "runtime" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/e2e-framework/klient" + "sigs.k8s.io/e2e-framework/klient/k8s" + "sigs.k8s.io/e2e-framework/klient/k8s/resources" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/klient/wait/conditions" +) + +const ( + Name = "kind" +) + +const clusterCfg string = ` +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + scheduler: + extraArgs: + bind-address: "0.0.0.0" + secure-port: "10259" + controllerManager: + extraArgs: + bind-address: "0.0.0.0" + secure-port: "10257" +` + +func NewProvisioner() common.InstanceProvisioner { + return &provisioner{} +} + +type provisioner struct { + logger common.Logger +} + +func (p *provisioner) Name() string { + return Name +} + +func (p *provisioner) Type() common.ProvisionerType { + return common.ProvisionerTypeK8SCluster +} + +func (p *provisioner) SetLogger(l common.Logger) { + p.logger = l +} + +func (p *provisioner) Supported(batch define.OS) bool { + if batch.Type != define.Kubernetes || batch.Arch != runtime.GOARCH { + return false + } + if batch.Distro != "" && batch.Distro != Name { + // not kind, don't run + return false + } + return true +} + +func (p *provisioner) Provision(ctx context.Context, cfg common.Config, batches []common.OSBatch) ([]common.Instance, error) { + var instances []common.Instance + for _, batch := range batches { + k8sVersion := fmt.Sprintf("v%s", batch.OS.Version) + instanceName := fmt.Sprintf("%s-%s", k8sVersion, batch.Batch.Group) + + agentImageName, err := kubernetes.VariantToImage(batch.OS.DockerVariant) + if err != nil { + return nil, err + } + agentImageName = fmt.Sprintf("%s:%s", agentImageName, cfg.AgentVersion) + agentImage, err := kubernetes.AddK8STestsToImage(ctx, p.logger, agentImageName, runtime.GOARCH) + if err != nil { + return nil, fmt.Errorf("failed to add k8s tests to image %s: %w", agentImageName, err) + } + + exists, err := p.clusterExists(instanceName) + if err != nil { + return nil, fmt.Errorf("failed to check if cluster exists: %w", err) + } + if !exists { + p.logger.Logf("Provisioning kind cluster %s", instanceName) + nodeImage := fmt.Sprintf("kindest/node:%s", k8sVersion) + clusterConfig := strings.NewReader(clusterCfg) + + ret, err := p.kindCmd(clusterConfig, "create", "cluster", "--name", instanceName, "--image", nodeImage, "--config", "-") + if err != nil { + return nil, fmt.Errorf("kind: failed to create cluster %s: %s", instanceName, ret.stderr) + } + + exists, err = p.clusterExists(instanceName) + if err != nil { + return nil, err + } + + if !exists { + return nil, fmt.Errorf("kind: failed to find cluster %s after successful creation", instanceName) + } + } else { + p.logger.Logf("Kind cluster %s already exists", instanceName) + } + + kConfigPath, err := p.writeKubeconfig(instanceName) + if err != nil { + return nil, err + } + + c, err := klient.NewWithKubeConfigFile(kConfigPath) + if err != nil { + return nil, err + } + + if err := p.WaitForControlPlane(c); err != nil { + return nil, err + } + + if err := p.LoadImage(ctx, instanceName, agentImage); err != nil { + return nil, err + } + + instances = append(instances, common.Instance{ + ID: batch.ID, + Name: instanceName, + Provisioner: Name, + IP: "", + Username: "", + RemotePath: "", + Internal: map[string]interface{}{ + "config": kConfigPath, + "version": k8sVersion, + "agent_image": agentImage, + }, + }) + } + + return instances, nil +} + +func (p *provisioner) LoadImage(ctx context.Context, clusterName string, image string) error { + ret, err := p.kindCmd(nil, "load", "docker-image", "--name", clusterName, image) + if err != nil { + return fmt.Errorf("kind: load docker-image %s failed: %w: %s", image, err, ret.stderr) + } + return nil +} + +func (p *provisioner) WaitForControlPlane(client klient.Client) error { + r, err := resources.New(client.RESTConfig()) + if err != nil { + return err + } + for _, sl := range []metav1.LabelSelectorRequirement{ + {Key: "component", Operator: metav1.LabelSelectorOpIn, Values: []string{"etcd", "kube-apiserver", "kube-controller-manager", "kube-scheduler"}}, + {Key: "k8s-app", Operator: metav1.LabelSelectorOpIn, Values: []string{"kindnet", "kube-dns", "kube-proxy"}}, + } { + selector, err := metav1.LabelSelectorAsSelector( + &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + sl, + }, + }, + ) + if err != nil { + return err + } + err = wait.For(conditions.New(r).ResourceListMatchN(&v1.PodList{}, len(sl.Values), func(object k8s.Object) bool { + pod, ok := object.(*v1.Pod) + if !ok { + return false + } + + for _, cond := range pod.Status.Conditions { + if cond.Type != v1.PodReady { + continue + } + + return cond.Status == v1.ConditionTrue + } + + return false + }, resources.WithLabelSelector(selector.String()))) + if err != nil { + return err + } + } + return nil +} + +func (p *provisioner) Clean(ctx context.Context, cfg common.Config, instances []common.Instance) error { + // doesn't execute in parallel for the same reasons in Provision + // multipass just cannot handle it + for _, instance := range instances { + func(instance common.Instance) { + err := p.deleteCluster(instance.ID) + if err != nil { + // prevent a failure from stopping the other instances and clean + p.logger.Logf("Delete instance %s failed: %s", instance.Name, err) + } + }(instance) + } + + return nil +} + +func (p *provisioner) clusterExists(name string) (bool, error) { + ret, err := p.kindCmd(nil, "get", "clusters") + if err != nil { + return false, err + } + + for _, c := range strings.Split(ret.stdout, "\n") { + if c == name { + return true, nil + } + } + return false, nil +} + +func (p *provisioner) writeKubeconfig(name string) (string, error) { + kubecfg := fmt.Sprintf("%s-kubecfg", name) + + ret, err := p.kindCmd(nil, "get", "kubeconfig", "--name", name) + if err != nil { + return "", fmt.Errorf("kind get kubeconfig: stderr: %s: %w", ret.stderr, err) + } + + file, err := os.CreateTemp("", fmt.Sprintf("kind-cluster-%s", kubecfg)) + if err != nil { + return "", fmt.Errorf("kind kubeconfig file: %w", err) + } + defer file.Close() + + if n, err := io.WriteString(file, ret.stdout); n == 0 || err != nil { + return "", fmt.Errorf("kind kubecfg file: bytes copied: %d: %w]", n, err) + } + + return file.Name(), nil +} + +type cmdResult struct { + stdout string + stderr string +} + +func (p *provisioner) kindCmd(stdIn io.Reader, args ...string) (cmdResult, error) { + + var stdout, stderr bytes.Buffer + cmd := exec.Command("kind", args...) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if stdIn != nil { + cmd.Stdin = stdIn + } + err := cmd.Run() + return cmdResult{ + stdout: stdout.String(), + stderr: stderr.String(), + }, err +} + +func (p *provisioner) deleteCluster(name string) error { + _, err := p.kindCmd(nil, "delete", "cluster", "--name", name) + return err +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/runner.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/runner.go new file mode 100644 index 000000000000..866ee9133566 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/runner.go @@ -0,0 +1,122 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package kubernetes + +import ( + "context" + "errors" + "fmt" + devtools "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "os" + "path/filepath" + "strings" + "time" +) + +// Runner is a handler for running tests against a Kubernetes cluster +type Runner struct{} + +// Prepare configures the host for running the test +func (Runner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + return nil +} + +// Copy places the required files on the host +func (Runner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + return nil +} + +// Run the test +func (Runner) Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var goTestFlags []string + rawTestFlags := os.Getenv("GOTEST_FLAGS") + if rawTestFlags != "" { + goTestFlags = strings.Split(rawTestFlags, " ") + } + + maxDuration := 2 * time.Hour + var result []common.OSRunnerPackageResult + for _, pkg := range batch.Tests { + packageTestsStrBuilder := strings.Builder{} + packageTestsStrBuilder.WriteString("^(") + for idx, test := range pkg.Tests { + if idx > 0 { + packageTestsStrBuilder.WriteString("|") + } + packageTestsStrBuilder.WriteString(test.Name) + } + packageTestsStrBuilder.WriteString(")$") + + testPrefix := fmt.Sprintf("%s.%s", prefix, filepath.Base(pkg.Name)) + testName := fmt.Sprintf("k8s-%s", testPrefix) + fileName := fmt.Sprintf("build/TEST-go-%s", testName) + extraFlags := make([]string, 0, len(goTestFlags)+6) + if len(goTestFlags) > 0 { + extraFlags = append(extraFlags, goTestFlags...) + } + extraFlags = append(extraFlags, "-test.shuffle", "on", + "-test.timeout", maxDuration.String(), "-test.run", packageTestsStrBuilder.String()) + + env["AGENT_VERSION"] = agentVersion + env["TEST_DEFINE_PREFIX"] = testPrefix + + buildFolderAbsPath, err := filepath.Abs("build") + if err != nil { + return common.OSRunnerResult{}, err + } + + podLogsPath := filepath.Join(buildFolderAbsPath, fmt.Sprintf("k8s-logs-%s", testPrefix)) + err = os.Mkdir(podLogsPath, 0755) + if err != nil && !errors.Is(err, os.ErrExist) { + return common.OSRunnerResult{}, err + } + + env["K8S_TESTS_POD_LOGS_BASE"] = podLogsPath + + params := devtools.GoTestArgs{ + TestName: testName, + OutputFile: fileName + ".out", + JUnitReportFile: fileName + ".xml", + Packages: []string{pkg.Name}, + Tags: []string{"integration", "kubernetes"}, + ExtraFlags: extraFlags, + Env: env, + } + err = devtools.GoTest(ctx, params) + if err != nil { + return common.OSRunnerResult{}, err + } + + var resultPkg common.OSRunnerPackageResult + resultPkg.Name = pkg.Name + outputPath := fmt.Sprintf("build/TEST-go-k8s-%s.%s", prefix, filepath.Base(pkg.Name)) + resultPkg.Output, err = os.ReadFile(outputPath + ".out") + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to fetched test output at %s.out", outputPath) + } + resultPkg.JSONOutput, err = os.ReadFile(outputPath + ".out.json") + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to fetched test output at %s.out.json", outputPath) + } + resultPkg.XMLOutput, err = os.ReadFile(outputPath + ".xml") + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to fetched test output at %s.xml", outputPath) + } + result = append(result, resultPkg) + } + + return common.OSRunnerResult{ + Packages: result, + }, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (Runner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + // does nothing for kubernetes + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/kubernetes/supported.go b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/supported.go new file mode 100644 index 000000000000..5ed03bf41020 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/kubernetes/supported.go @@ -0,0 +1,104 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package kubernetes + +import ( + "errors" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" +) + +// ErrUnknownDockerVariant is the error returned when the variant is unknown. +var ErrUnknownDockerVariant = errors.New("unknown docker variant type") + +// arches defines the list of supported architectures of Kubernetes +var arches = []string{define.AMD64, define.ARM64} + +// versions defines the list of supported version of Kubernetes. +var versions = []define.OS{ + // Kubernetes 1.31 + { + Type: define.Kubernetes, + Version: "1.31.0", + }, + // Kubernetes 1.30 + { + Type: define.Kubernetes, + Version: "1.30.2", + }, + // Kubernetes 1.29 + { + Type: define.Kubernetes, + Version: "1.29.4", + }, + // Kubernetes 1.28 + { + Type: define.Kubernetes, + Version: "1.28.9", + }, +} + +// variants defines the list of variants and the image name for that variant. +// +// Note: This cannot be a simple map as the order matters. We need the +// one that we want to be the default test to be first. +var variants = []struct { + Name string + Image string +}{ + { + Name: "basic", + Image: "docker.elastic.co/beats/elastic-agent", + }, + { + Name: "ubi", + Image: "docker.elastic.co/beats/elastic-agent-ubi", + }, + { + Name: "wolfi", + Image: "docker.elastic.co/beats/elastic-agent-wolfi", + }, + { + Name: "complete", + Image: "docker.elastic.co/beats/elastic-agent-complete", + }, + { + Name: "complete-wolfi", + Image: "docker.elastic.co/beats/elastic-agent-complete-wolfi", + }, + { + Name: "cloud", + Image: "docker.elastic.co/beats-ci/elastic-agent-cloud", + }, + { + Name: "service", + Image: "docker.elastic.co/beats-ci/elastic-agent-service", + }, +} + +// GetSupported returns the list of supported OS types for Kubernetes. +func GetSupported() []define.OS { + supported := make([]define.OS, 0, len(versions)*len(variants)*2) + for _, a := range arches { + for _, v := range versions { + for _, variant := range variants { + c := v + c.Arch = a + c.DockerVariant = variant.Name + supported = append(supported, c) + } + } + } + return supported +} + +// VariantToImage returns the image name from the variant. +func VariantToImage(variant string) (string, error) { + for _, v := range variants { + if v.Name == variant { + return v.Image, nil + } + } + return "", ErrUnknownDockerVariant +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/linux/debian.go b/dev-tools/mage/target/srvrlesstest/testing/linux/debian.go new file mode 100644 index 000000000000..f70e1a978436 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/linux/debian.go @@ -0,0 +1,206 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package linux + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "path" + "path/filepath" + "strings" + "time" +) + +// DebianRunner is a handler for running tests on Linux +type DebianRunner struct{} + +// Prepare the test +func (DebianRunner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + // prepare build-essential and unzip + // + // apt-get update and install are so terrible that we have to place this in a loop, because in some cases the + // apt-get update says it works, but it actually fails. so we add 3 tries here + var err error + for i := 0; i < 3; i++ { + err = func() error { + updateCtx, updateCancel := context.WithTimeout(ctx, 3*time.Minute) + defer updateCancel() + logger.Logf("Running apt-get update") + // `-o APT::Update::Error-Mode=any` ensures that any warning is tried as an error, so the retry + // will occur (without this we get random failures) + stdOut, errOut, err := sshClient.ExecWithRetry(updateCtx, "sudo", []string{"apt-get", "update", "-o APT::Update::Error-Mode=any"}, 15*time.Second) + if err != nil { + return fmt.Errorf("failed to run apt-get update: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + return func() error { + // golang is installed below and not using the package manager, ensures that the exact version + // of golang is used for the running of the test + installCtx, installCancel := context.WithTimeout(ctx, 1*time.Minute) + defer installCancel() + logger.Logf("Install build-essential and unzip") + stdOut, errOut, err = sshClient.ExecWithRetry(installCtx, "sudo", []string{"apt-get", "install", "-y", "build-essential", "unzip"}, 5*time.Second) + if err != nil { + return fmt.Errorf("failed to install build-essential and unzip: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + return nil + }() + }() + if err == nil { + // installation was successful + break + } + logger.Logf("Failed to install build-essential and unzip; will wait 15 seconds and try again") + <-time.After(15 * time.Second) + } + if err != nil { + // seems after 3 tries it still failed + return err + } + + // prepare golang + logger.Logf("Install golang %s (%s)", goVersion, arch) + downloadURL := fmt.Sprintf("https://go.dev/dl/go%s.linux-%s.tar.gz", goVersion, arch) + filename := path.Base(downloadURL) + stdOut, errOut, err := sshClient.Exec(ctx, "curl", []string{"-Ls", downloadURL, "--output", filename}, nil) + if err != nil { + return fmt.Errorf("failed to download go from %s with curl: %w (stdout: %s, stderr: %s)", downloadURL, err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "sudo", []string{"tar", "-C", "/usr/local", "-xzf", filename}, nil) + if err != nil { + return fmt.Errorf("failed to extract go to /usr/local with tar: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/go", "/usr/bin/go"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/go to /usr/bin/go: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/gofmt", "/usr/bin/gofmt"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/gofmt to /usr/bin/gofmt: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + return nil +} + +// Copy places the required files on the host. +func (DebianRunner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + return linuxCopy(ctx, sshClient, logger, repoArchive, builds) +} + +// Run the test +func (DebianRunner) Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var tests []string + for _, pkg := range batch.Tests { + for _, test := range pkg.Tests { + tests = append(tests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + var sudoTests []string + for _, pkg := range batch.SudoTests { + for _, test := range pkg.Tests { + sudoTests = append(sudoTests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + + logArg := "" + if verbose { + logArg = "-v" + } + var result common.OSRunnerResult + if len(tests) > 0 { + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(tests, ",")) + vars = extendVars(vars, env) + + script := fmt.Sprintf(`cd agent && %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + results, err := runTests(ctx, logger, "non-sudo", prefix, script, sshClient, batch.Tests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running non-sudo tests: %w", err) + } + result.Packages = results + } + + if len(sudoTests) > 0 { + prefix := fmt.Sprintf("%s-sudo", prefix) + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(sudoTests, ",")) + vars = extendVars(vars, env) + script := fmt.Sprintf(`cd agent && sudo %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + + results, err := runTests(ctx, logger, "sudo", prefix, script, sshClient, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running sudo tests: %w", err) + } + result.SudoPackages = results + } + + return result, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (DebianRunner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + return linuxDiagnostics(ctx, sshClient, logger, destination) +} + +func runTests(ctx context.Context, logger common.Logger, name string, prefix string, script string, sshClient ssh.SSHClient, tests []define.BatchPackageTests) ([]common.OSRunnerPackageResult, error) { + execTest := strings.NewReader(script) + + session, err := sshClient.NewSession() + if err != nil { + return nil, fmt.Errorf("failed to start session: %w", err) + } + + session.Stdout = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stdout): ", name)) + session.Stderr = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stderr): ", name)) + session.Stdin = execTest + + // allowed to fail because tests might fail + logger.Logf("Running %s tests...", name) + err = session.Run("bash") + if err != nil { + logger.Logf("%s tests failed: %s", name, err) + } + // this seems to always return an error + _ = session.Close() + + var result []common.OSRunnerPackageResult + // fetch the contents for each package + for _, pkg := range tests { + resultPkg, err := getRunnerPackageResult(ctx, sshClient, pkg, prefix) + if err != nil { + return nil, err + } + result = append(result, resultPkg) + } + return result, nil +} + +func getRunnerPackageResult(ctx context.Context, sshClient ssh.SSHClient, pkg define.BatchPackageTests, prefix string) (common.OSRunnerPackageResult, error) { + var err error + var resultPkg common.OSRunnerPackageResult + resultPkg.Name = pkg.Name + outputPath := fmt.Sprintf("$HOME/agent/build/TEST-go-remote-%s.%s", prefix, filepath.Base(pkg.Name)) + resultPkg.Output, err = sshClient.GetFileContents(ctx, outputPath+".out") + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out", outputPath) + } + resultPkg.JSONOutput, err = sshClient.GetFileContents(ctx, outputPath+".out.json") + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out.json", outputPath) + } + resultPkg.XMLOutput, err = sshClient.GetFileContents(ctx, outputPath+".xml") + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.xml", outputPath) + } + return resultPkg, nil +} + +func extendVars(vars string, env map[string]string) string { + var envStr []string + for k, v := range env { + envStr = append(envStr, fmt.Sprintf(`%s="%s"`, k, v)) + } + return fmt.Sprintf("%s %s", vars, strings.Join(envStr, " ")) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/linux/linux.go b/dev-tools/mage/target/srvrlesstest/testing/linux/linux.go new file mode 100644 index 000000000000..d43b024102ee --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/linux/linux.go @@ -0,0 +1,156 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package linux + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "os" + "path/filepath" + "strings" +) + +func linuxDiagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + // take ownership, as sudo tests will create with root permissions (allow to fail in the case it doesn't exist) + diagnosticDir := "$HOME/agent/build/diagnostics" + _, _, _ = sshClient.Exec(ctx, "sudo", []string{"chown", "-R", "$USER:$USER", diagnosticDir}, nil) + stdOut, _, err := sshClient.Exec(ctx, "ls", []string{"-1", diagnosticDir}, nil) + if err != nil { + //nolint:nilerr // failed to list the directory, probably don't have any diagnostics (do nothing) + return nil + } + eachDiagnostic := strings.Split(string(stdOut), "\n") + for _, filename := range eachDiagnostic { + filename = strings.TrimSpace(filename) + if filename == "" { + continue + } + + // don't use filepath.Join as we need this to work in Windows as well + // this is because if we use `filepath.Join` on a Windows host connected to a Linux host + // it will use a `\` and that will be incorrect for Linux + fp := fmt.Sprintf("%s/%s", diagnosticDir, filename) + // use filepath.Join on this path because it's a path on this specific host platform + dp := filepath.Join(destination, filename) + logger.Logf("Copying diagnostic %s", filename) + out, err := os.Create(dp) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", dp, err) + } + err = sshClient.GetFileContentsOutput(ctx, fp, out) + _ = out.Close() + if err != nil { + return fmt.Errorf("failed to copy file from remote host to %s: %w", dp, err) + } + } + return nil +} + +func linuxCopy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + // copy the archive and extract it on the host + logger.Logf("Copying repo") + destRepoName := filepath.Base(repoArchive) + err := sshClient.Copy(repoArchive, destRepoName) + if err != nil { + return fmt.Errorf("failed to SCP repo archive %s: %w", repoArchive, err) + } + + // remove build paths, on cases where the build path is different from agent. + for _, build := range builds { + for _, remoteBuildPath := range []string{build.Path, build.SHA512Path} { + relativeAgentDir := filepath.Join("agent", remoteBuildPath) + _, _, err := sshClient.Exec(ctx, "sudo", []string{"rm", "-rf", relativeAgentDir}, nil) + // doesn't need to be a fatal error. + if err != nil { + logger.Logf("error removing build dir %s: %w", relativeAgentDir, err) + } + } + } + + // ensure that agent directory is removed (possible it already exists if instance already used) + stdout, stderr, err := sshClient.Exec(ctx, + "sudo", []string{"rm", "-rf", "agent"}, nil) + if err != nil { + return fmt.Errorf( + "failed to remove agent directory before unziping new one: %w. stdout: %q, stderr: %q", + err, stdout, stderr) + } + + stdOut, errOut, err := sshClient.Exec(ctx, "unzip", []string{destRepoName, "-d", "agent"}, nil) + if err != nil { + return fmt.Errorf("failed to unzip %s to agent directory: %w (stdout: %s, stderr: %s)", destRepoName, err, stdOut, errOut) + } + + // prepare for testing + logger.Logf("Running make mage and prepareOnRemote") + envs := `GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH"` + installMage := strings.NewReader(fmt.Sprintf(`cd agent && %s make mage && %s mage integration:prepareOnRemote`, envs, envs)) + stdOut, errOut, err = sshClient.Exec(ctx, "bash", nil, installMage) + if err != nil { + return fmt.Errorf("failed to perform make mage and prepareOnRemote: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // determine if the build needs to be replaced on the host + // if it already exists and the SHA512 are the same contents, then + // there is no reason to waste time uploading the build + for _, build := range builds { + copyBuild := true + localSHA512, err := os.ReadFile(build.SHA512Path) + if err != nil { + return fmt.Errorf("failed to read local SHA52 contents %s: %w", build.SHA512Path, err) + } + hostSHA512Path := filepath.Base(build.SHA512Path) + hostSHA512, err := sshClient.GetFileContents(ctx, hostSHA512Path) + if err == nil { + if string(localSHA512) == string(hostSHA512) { + logger.Logf("Skipping copy agent build %s; already the same", filepath.Base(build.Path)) + copyBuild = false + } + } + + if copyBuild { + // ensure the existing copies are removed first + toRemove := filepath.Base(build.Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "sudo", []string{"rm", "-f", toRemove}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + toRemove = filepath.Base(build.SHA512Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "sudo", []string{"rm", "-f", toRemove}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + logger.Logf("Copying agent build %s", filepath.Base(build.Path)) + } + + for _, buildPath := range []string{build.Path, build.SHA512Path} { + if copyBuild { + err = sshClient.Copy(buildPath, filepath.Base(buildPath)) + if err != nil { + return fmt.Errorf("failed to SCP build %s: %w", filepath.Base(buildPath), err) + } + } + insideAgentDir := filepath.Join("agent", buildPath) + stdOut, errOut, err = sshClient.Exec(ctx, "mkdir", []string{"-p", filepath.Dir(insideAgentDir)}, nil) + if err != nil { + return fmt.Errorf("failed to create %s directory: %w (stdout: %s, stderr: %s)", filepath.Dir(insideAgentDir), err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "ln", []string{filepath.Base(buildPath), insideAgentDir}, nil) + if err != nil { + return fmt.Errorf("failed to hard link %s to %s: %w (stdout: %s, stderr: %s)", filepath.Base(buildPath), insideAgentDir, err, stdOut, errOut) + } + } + } + + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/linux/rhel.go b/dev-tools/mage/target/srvrlesstest/testing/linux/rhel.go new file mode 100644 index 000000000000..f122db0812fd --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/linux/rhel.go @@ -0,0 +1,113 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package linux + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "path" + "strings" + "time" +) + +// RhelRunner is a handler for running tests on SUSE Linux Enterpriser Server +type RhelRunner struct{} + +// Prepare configures the host for running the test +func (RhelRunner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + logger.Logf("Install development tools") + dnfCtx, dnfCancel := context.WithTimeout(ctx, 20*time.Minute) + defer dnfCancel() + stdOut, errOut, err := sshClient.ExecWithRetry(dnfCtx, "sudo", []string{"dnf", "-y", "-v", "group", "install", "\"Development Tools\""}, 15*time.Second) + if err != nil { + return fmt.Errorf("failed to run 'dnf group install \"Development Tools\"': %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // install golang + logger.Logf("Install golang %s (%s)", goVersion, arch) + goCtx, goCancel := context.WithTimeout(ctx, 20*time.Minute) + defer goCancel() + downloadURL := fmt.Sprintf("https://go.dev/dl/go%s.linux-%s.tar.gz", goVersion, arch) + filename := path.Base(downloadURL) + stdOut, errOut, err = sshClient.Exec(goCtx, "curl", []string{"-Ls", downloadURL, "--output", filename}, nil) + if err != nil { + return fmt.Errorf("failed to download go from %s with curl: %w (stdout: %s, stderr: %s)", downloadURL, err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(goCtx, "sudo", []string{"tar", "-C", "/usr/local", "-xzf", filename}, nil) + if err != nil { + return fmt.Errorf("failed to extract go to /usr/local with tar: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(goCtx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/go", "/usr/bin/go"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/go to /usr/bin/go: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(goCtx, "sudo", []string{"ln", "-s", "/usr/local/go/bin/gofmt", "/usr/bin/gofmt"}, nil) + if err != nil { + return fmt.Errorf("failed to symlink /usr/local/go/bin/gofmt to /usr/bin/gofmt: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + return nil +} + +// Copy places the required files on the host +func (RhelRunner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + return linuxCopy(ctx, sshClient, logger, repoArchive, builds) +} + +// Run the test +func (RhelRunner) Run(ctx context.Context, verbose bool, sshClient ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var tests []string + for _, pkg := range batch.Tests { + for _, test := range pkg.Tests { + tests = append(tests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + var sudoTests []string + for _, pkg := range batch.SudoTests { + for _, test := range pkg.Tests { + sudoTests = append(sudoTests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + + logArg := "" + if verbose { + logArg = "-v" + } + var result common.OSRunnerResult + if len(tests) > 0 { + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(tests, ",")) + vars = extendVars(vars, env) + + script := fmt.Sprintf(`cd agent && %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + results, err := runTests(ctx, logger, "non-sudo", prefix, script, sshClient, batch.Tests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running non-sudo tests: %w", err) + } + result.Packages = results + } + + if len(sudoTests) > 0 { + prefix := fmt.Sprintf("%s-sudo", prefix) + vars := fmt.Sprintf(`GOPATH="$HOME/go" PATH="$HOME/go/bin:$PATH:/usr/sbin" AGENT_VERSION="%s" TEST_DEFINE_PREFIX="%s" TEST_DEFINE_TESTS="%s"`, agentVersion, prefix, strings.Join(sudoTests, ",")) + vars = extendVars(vars, env) + script := fmt.Sprintf(`cd agent && sudo %s ~/go/bin/mage %s integration:testOnRemote`, vars, logArg) + + results, err := runTests(ctx, logger, "sudo", prefix, script, sshClient, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running sudo tests: %w", err) + } + result.SudoPackages = results + } + + return result, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (RhelRunner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + return linuxDiagnostics(ctx, sshClient, logger, destination) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/log.go b/dev-tools/mage/target/srvrlesstest/testing/log.go new file mode 100644 index 000000000000..4815ea1b18f9 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/log.go @@ -0,0 +1,144 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package testing + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + + "github.com/elastic/elastic-agent-libs/logp" +) + +// Logger is log interface that matches *testing.T. +type Logger interface { + // Log logs the arguments. + Log(args ...any) + // Logf logs the formatted arguments. + Logf(format string, args ...any) +} + +// logWatcher is an `io.Writer` that processes the log lines outputted from the spawned Elastic Agent. +// +// `Write` handles parsing lines as either ndjson or plain text. +type logWatcher struct { + remainder []byte + replicate Logger + alert chan error +} + +func newLogWatcher(replicate Logger) *logWatcher { + return &logWatcher{ + replicate: replicate, + alert: make(chan error), + } +} + +// Watch returns the channel that will get an error when an error is identified from the log. +func (r *logWatcher) Watch() <-chan error { + return r.alert +} + +// Write implements the `io.Writer` interface. +func (r *logWatcher) Write(p []byte) (int, error) { + if len(p) == 0 { + // nothing to do + return 0, nil + } + offset := 0 + for { + idx := bytes.IndexByte(p[offset:], '\n') + if idx < 0 { + // not all used add to remainder to be used on next call + r.remainder = append(r.remainder, p[offset:]...) + return len(p), nil + } + + var line []byte + if r.remainder != nil { + line = r.remainder + r.remainder = nil + line = append(line, p[offset:offset+idx]...) + } else { + line = append(line, p[offset:offset+idx]...) + } + offset += idx + 1 + // drop '\r' from line (needed for Windows) + if len(line) > 0 && line[len(line)-1] == '\r' { + line = line[0 : len(line)-1] + } + if len(line) == 0 { + // empty line + continue + } + str := strings.TrimSpace(string(line)) + // try to parse line as JSON + if str[0] == '{' && r.handleJSON(str) { + // handled as JSON + continue + } + // considered standard text being it's not JSON, just replicate + if r.replicate != nil { + r.replicate.Log(str) + } + } +} + +func (r *logWatcher) handleJSON(line string) bool { + var evt map[string]interface{} + if err := json.Unmarshal([]byte(line), &evt); err != nil { + return false + } + if r.replicate != nil { + r.replicate.Log(line) + } + lvl := getLevel(evt, "log.level") + msg := getMessage(evt, "message") + if lvl == logp.ErrorLevel { + r.alert <- errors.New(msg) + } + return true +} + +func getLevel(evt map[string]interface{}, key string) logp.Level { + lvl := logp.InfoLevel + err := unmarshalLevel(&lvl, getStrVal(evt, key)) + if err == nil { + delete(evt, key) + } + return lvl +} + +func unmarshalLevel(lvl *logp.Level, val string) error { + if val == "" { + return errors.New("empty val") + } else if val == "trace" { + // logp doesn't handle trace level we cast to debug + *lvl = logp.DebugLevel + return nil + } + return lvl.Unpack(val) +} + +func getMessage(evt map[string]interface{}, key string) string { + msg := getStrVal(evt, key) + if msg != "" { + delete(evt, key) + } + return msg +} + +func getStrVal(evt map[string]interface{}, key string) string { + raw, ok := evt[key] + if !ok { + return "" + } + str, ok := raw.(string) + if !ok { + return "" + } + return str +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/multipas/provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/multipas/provisioner.go new file mode 100644 index 000000000000..41a12bea0b50 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/multipas/provisioner.go @@ -0,0 +1,317 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package multipass + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/core/process" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/runner" + "os" + "os/exec" + "path/filepath" + "runtime" + "time" + + "gopkg.in/yaml.v2" +) + +const ( + Ubuntu = "ubuntu" + Name = "multipass" +) + +type provisioner struct { + logger common.Logger +} + +// NewProvisioner creates the multipass provisioner +func NewProvisioner() common.InstanceProvisioner { + return &provisioner{} +} + +func (p *provisioner) Name() string { + return Name +} + +func (p *provisioner) SetLogger(l common.Logger) { + p.logger = l +} + +func (p *provisioner) Type() common.ProvisionerType { + return common.ProvisionerTypeVM +} + +// Supported returns true if multipass supports this OS. +// +// multipass only supports Ubuntu on the same architecture as the running host. +func (p *provisioner) Supported(os define.OS) bool { + if os.Type != define.Linux { + return false + } + if os.Distro != Ubuntu { + return false + } + if os.Version != "20.04" && os.Version != "22.04" && os.Version != "24.04" { + return false + } + // multipass only supports the same architecture of the host + if os.Arch != runtime.GOARCH { + return false + } + return true +} + +func (p *provisioner) Provision(ctx context.Context, cfg common.Config, batches []common.OSBatch) ([]common.Instance, error) { + // this doesn't provision the instances in parallel on purpose + // multipass cannot handle it, it either results in instances sharing the same IP address + // or some instances stuck in Starting state + for _, batch := range batches { + err := func(batch common.OSBatch) error { + launchCtx, launchCancel := context.WithTimeout(ctx, 5*time.Minute) + defer launchCancel() + err := p.launch(launchCtx, cfg, batch) + if err != nil { + return fmt.Errorf("instance %s failed: %w", batch.ID, err) + } + return nil + }(batch) + if err != nil { + return nil, err + } + } + + var results []common.Instance + instances, err := p.list(ctx) + if err != nil { + return nil, err + } + for _, batch := range batches { + mi, ok := instances[batch.ID] + if !ok { + return nil, fmt.Errorf("failed to find %s in multipass list output", batch.ID) + } + if mi.State != "Running" { + return nil, fmt.Errorf("instance %s is not marked as running", batch.ID) + } + results = append(results, common.Instance{ + ID: batch.ID, + Provisioner: Name, + Name: batch.ID, + IP: mi.IPv4[0], + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + Internal: nil, + }) + } + return results, nil +} + +// Clean cleans up all provisioned resources. +func (p *provisioner) Clean(ctx context.Context, _ common.Config, instances []common.Instance) error { + // doesn't execute in parallel for the same reasons in Provision + // multipass just cannot handle it + for _, instance := range instances { + func(instance common.Instance) { + deleteCtx, deleteCancel := context.WithTimeout(ctx, 5*time.Minute) + defer deleteCancel() + err := p.delete(deleteCtx, instance) + if err != nil { + // prevent a failure from stopping the other instances and clean + p.logger.Logf("Delete instance %s failed: %s", instance.Name, err) + } + }(instance) + } + return nil +} + +// launch creates an instance. +func (p *provisioner) launch(ctx context.Context, cfg common.Config, batch common.OSBatch) error { + // check if instance already exists + err := p.ensureInstanceNotExist(ctx, batch) + if err != nil { + p.logger.Logf( + "could not check multipass instance %q does not exists, moving on anyway. Err: %v", err) + } + args := []string{ + "launch", + "-c", "2", + "-d", "50G", // need decent size for all the tests + "-m", "4G", + "-n", batch.ID, + "--cloud-init", "-", + batch.OS.Version, + } + + publicKeyPath := filepath.Join(cfg.StateDir, "id_rsa.pub") + publicKey, err := os.ReadFile(publicKeyPath) + if err != nil { + return fmt.Errorf("failed to read SSH key to send to multipass instance at %s: %w", publicKeyPath, err) + } + + var cloudCfg cloudinitConfig + cloudCfg.SSHAuthorizedKeys = []string{string(publicKey)} + cloudCfgData, err := yaml.Marshal(&cloudCfg) + if err != nil { + return fmt.Errorf("failed to marshal cloud-init configuration: %w", err) + } + + var output bytes.Buffer + p.logger.Logf("Launching multipass image %s", batch.ID) + proc, err := process.Start("multipass", process.WithContext(ctx), process.WithArgs(args), process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run multipass launch: %w", err) + } + _, err = proc.Stdin.Write([]byte(fmt.Sprintf("#cloud-config\n%s", cloudCfgData))) + if err != nil { + _ = proc.Stdin.Close() + _ = proc.Kill() + <-proc.Wait() + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to write cloudinit to stdin: %w", err) + } + _ = proc.Stdin.Close() + ps := <-proc.Wait() + if !ps.Success() { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run multipass launch: exited with code: %d", ps.ExitCode()) + } + return nil +} + +func (p *provisioner) ensureInstanceNotExist(ctx context.Context, batch common.OSBatch) error { + var output bytes.Buffer + var stdErr bytes.Buffer + proc, err := process.Start("multipass", + process.WithContext(ctx), + process.WithArgs([]string{"list", "--format", "json"}), + process.WithCmdOptions( + runner.AttachOut(&output), + runner.AttachErr(&stdErr))) + if err != nil { + return fmt.Errorf("multipass list failed to run: %w", err) + } + + state := <-proc.Wait() + if !state.Success() { + msg := fmt.Sprintf("multipass list exited with non-zero status: %s", + state.String()) + p.logger.Logf(msg) + p.logger.Logf("output: %s", output.String()) + p.logger.Logf("stderr: %s", stdErr.String()) + return errors.New(msg) + } + list := struct { + List []struct { + Ipv4 []string `json:"ipv4"` + Name string `json:"name"` + Release string `json:"release"` + State string `json:"state"` + } `json:"list"` + }{} + err = json.NewDecoder(&output).Decode(&list) + if err != nil { + return fmt.Errorf("could not decode mutipass list output: %w", err) + } + + for _, i := range list.List { + if i.Name == batch.ID { + p.logger.Logf("multipass trying to delete instance %s", batch.ID) + + output.Reset() + stdErr.Reset() + proc, err = process.Start("multipass", + process.WithContext(ctx), + process.WithArgs([]string{"delete", "--purge", batch.ID}), + process.WithCmdOptions( + runner.AttachOut(&output), + runner.AttachErr(&stdErr))) + if err != nil { + return fmt.Errorf( + "multipass instance %q already exist, state %q. Could not delete it: %w", + batch.ID, i.State, err) + } + state = <-proc.Wait() + if !state.Success() { + msg := fmt.Sprintf("failed to delete and purge multipass instance %s: %s", + batch.ID, + state.String()) + p.logger.Logf(msg) + p.logger.Logf("output: %s", output.String()) + p.logger.Logf("stderr: %s", stdErr.String()) + return errors.New(msg) + } + + break + } + } + + return nil +} + +// delete deletes an instance. +func (p *provisioner) delete(ctx context.Context, instance common.Instance) error { + args := []string{ + "delete", + "-p", + instance.ID, + } + + var output bytes.Buffer + p.logger.Logf("Deleting instance %s", instance.Name) + proc, err := process.Start("multipass", process.WithContext(ctx), process.WithArgs(args), process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run multipass delete: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run multipass delete: exited with code: %d", ps.ExitCode()) + } + return nil +} + +// list all the instances. +func (p *provisioner) list(ctx context.Context) (map[string]instance, error) { + cmd := exec.CommandContext(ctx, "multipass", "list", "--format", "yaml") + result, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("failed to run multipass list: %w", err) + } + + // yaml output from multipass gives a list of instances for each instance name, + // even though there is only ever 1 entry in the list + var instancesMulti map[string][]instance + err = yaml.Unmarshal(result, &instancesMulti) + if err != nil { + return nil, fmt.Errorf("failed to parse multipass list output: %w", err) + } + instances := map[string]instance{} + for name, multi := range instancesMulti { + instances[name] = multi[0] + } + + return instances, nil +} + +type instance struct { + State string `yaml:"state"` + IPv4 []string `yaml:"ipv4"` + Release string `yaml:"release"` +} + +type cloudinitConfig struct { + SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"` +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/api.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/api.go new file mode 100644 index 000000000000..1cf19622a320 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/api.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ogc + +import "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + +// Layout definition for `ogc layout import`. +type Layout struct { + Name string `yaml:"name"` + Provider string `yaml:"provider"` + InstanceSize string `yaml:"instance_size"` + RunsOn string `yaml:"runs_on"` + RemotePath string `yaml:"remote_path"` + Scale int `yaml:"scale"` + Username string `yaml:"username"` + SSHPrivateKey string `yaml:"ssh_private_key"` + SSHPublicKey string `yaml:"ssh_public_key"` + Ports []string `yaml:"ports"` + Tags []string `yaml:"tags"` + Labels map[string]string `yaml:"labels"` + Scripts string `yaml:"scripts"` +} + +// Machine definition returned by `ogc up`. +type Machine struct { + ID int `yaml:"id"` + InstanceID string `yaml:"instance_id"` + InstanceName string `yaml:"instance_name"` + InstanceState string `yaml:"instance_state"` + PrivateIP string `yaml:"private_ip"` + PublicIP string `yaml:"public_ip"` + Layout Layout `yaml:"layout"` + Create string `yaml:"created"` +} + +// LayoutOS defines the minimal information for a mapping of an OS to the +// provider, instance size, and runs on for that OS. +type LayoutOS struct { + OS define.OS + Provider string + InstanceSize string + RunsOn string + Username string + RemotePath string +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/config.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/config.go new file mode 100644 index 000000000000..7a74db8fd195 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/config.go @@ -0,0 +1,87 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ogc + +import ( + "encoding/json" + "errors" + "fmt" + "os" +) + +// Config is the configuration for using OGC. +type Config struct { + ServiceTokenPath string + Datacenter string + + content *serviceTokenContent +} + +// Validate returns an error if the information is invalid. +func (c *Config) Validate() error { + if c.ServiceTokenPath == "" { + return errors.New("field ServiceTokenPath must be set") + } + if c.Datacenter == "" { + return errors.New("field Datacenter must be set") + } + return c.ensureParsed() +} + +// ProjectID returns the project ID from the service token. +func (c *Config) ProjectID() (string, error) { + err := c.ensureParsed() + if err != nil { + return "", err + } + return c.content.ProjectID, nil +} + +// ClientEmail returns the client email from the service token. +func (c *Config) ClientEmail() (string, error) { + err := c.ensureParsed() + if err != nil { + return "", err + } + return c.content.ClientEmail, nil +} + +func (c *Config) ensureParsed() error { + if c.content != nil { + // already parsed + return nil + } + content, err := c.parse() + if err != nil { + return err + } + c.content = content + return nil +} + +func (c *Config) parse() (*serviceTokenContent, error) { + var content serviceTokenContent + raw, err := os.ReadFile(c.ServiceTokenPath) + if err != nil { + return nil, fmt.Errorf("failed to read contents of %s: %w", c.ServiceTokenPath, err) + } + err = json.Unmarshal(raw, &content) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON contents of %s: %w", c.ServiceTokenPath, err) + } + if content.Type != "service_account" { + return nil, fmt.Errorf("not a service account token at %s; type != service_account", c.ServiceTokenPath) + } + return &content, nil +} + +// serviceTokenContent is parsed content from a service token file. +type serviceTokenContent struct { + Type string `json:"type"` + ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + + // more fields exists but we only need the provided information +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/provisioner.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/provisioner.go new file mode 100644 index 000000000000..002a3d4c4cfd --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/provisioner.go @@ -0,0 +1,341 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ogc + +import ( + "bytes" + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/core/process" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/runner" + "os" + "path/filepath" + "strings" + "time" + + "gopkg.in/yaml.v2" +) + +const ( + // LayoutIntegrationTag is the tag added to all layouts for the integration testing framework. + LayoutIntegrationTag = "agent-integration" + Name = "ogc" +) + +type provisioner struct { + logger common.Logger + cfg Config +} + +// NewProvisioner creates the OGC provisioner +func NewProvisioner(cfg Config) (common.InstanceProvisioner, error) { + err := cfg.Validate() + if err != nil { + return nil, err + } + return &provisioner{ + cfg: cfg, + }, nil +} + +func (p *provisioner) Name() string { + return Name +} + +func (p *provisioner) SetLogger(l common.Logger) { + p.logger = l +} + +func (p *provisioner) Type() common.ProvisionerType { + return common.ProvisionerTypeVM +} + +// Supported returns true when we support this OS for OGC. +func (p *provisioner) Supported(os define.OS) bool { + _, ok := findOSLayout(os) + return ok +} + +func (p *provisioner) Provision(ctx context.Context, cfg common.Config, batches []common.OSBatch) ([]common.Instance, error) { + // ensure the latest version + pullCtx, pullCancel := context.WithTimeout(ctx, 5*time.Minute) + defer pullCancel() + err := p.ogcPull(pullCtx) + if err != nil { + return nil, err + } + + // import the calculated layouts + importCtx, importCancel := context.WithTimeout(ctx, 30*time.Second) + defer importCancel() + err = p.ogcImport(importCtx, cfg, batches) + if err != nil { + return nil, err + } + + // bring up all the instances + upCtx, upCancel := context.WithTimeout(ctx, 30*time.Minute) + defer upCancel() + upOutput, err := p.ogcUp(upCtx) + if err != nil { + return nil, fmt.Errorf("ogc up failed: %w", err) + } + + // fetch the machines and run the batches on the machine + machines, err := p.ogcMachines(ctx) + if err != nil { + return nil, err + } + if len(machines) == 0 { + // Print the output so its clear what went wrong. + // Without this it's unclear where OGC went wrong, it + // doesn't do a great job of reporting a clean error + fmt.Fprintf(os.Stdout, "%s\n", upOutput) + return nil, fmt.Errorf("ogc didn't create any machines") + } + + // map the machines to instances + var instances []common.Instance + for _, b := range batches { + machine, ok := findMachine(machines, b.ID) + if !ok { + // print the output so its clear what went wrong. + // Without this it's unclear where OGC went wrong, it + // doesn't do a great job of reporting a clean error + fmt.Fprintf(os.Stdout, "%s\n", upOutput) + return nil, fmt.Errorf("failed to find machine for batch ID: %s", b.ID) + } + instances = append(instances, common.Instance{ + ID: b.ID, + Provisioner: Name, + Name: machine.InstanceName, + IP: machine.PublicIP, + Username: machine.Layout.Username, + RemotePath: machine.Layout.RemotePath, + Internal: map[string]interface{}{ + "instance_id": machine.InstanceID, + }, + }) + } + return instances, nil +} + +// Clean cleans up all provisioned resources. +func (p *provisioner) Clean(ctx context.Context, cfg common.Config, _ []common.Instance) error { + return p.ogcDown(ctx) +} + +// ogcPull pulls the latest ogc version. +func (p *provisioner) ogcPull(ctx context.Context) error { + args := []string{ + "pull", + "docker.elastic.co/observability-ci/ogc:5.0.1", + } + var output bytes.Buffer + p.logger.Logf("Pulling latest ogc image") + proc, err := process.Start("docker", process.WithContext(ctx), process.WithArgs(args), process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run docker ogcPull: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run ogc pull: docker run exited with code: %d", ps.ExitCode()) + } + return nil +} + +// ogcImport imports all the required batches into OGC. +func (p *provisioner) ogcImport(ctx context.Context, cfg common.Config, batches []common.OSBatch) error { + var layouts []Layout + for _, ob := range batches { + layouts = append(layouts, osBatchToOGC(cfg.StateDir, ob)) + } + layoutData, err := yaml.Marshal(struct { + Layouts []Layout `yaml:"layouts"` + }{ + Layouts: layouts, + }) + if err != nil { + return fmt.Errorf("failed to marshal layouts YAML: %w", err) + } + + var output bytes.Buffer + p.logger.Logf("Import layouts into ogc") + proc, err := p.ogcRun(ctx, []string{"layout", "import"}, true, process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run ogc import: %w", err) + } + _, err = proc.Stdin.Write(layoutData) + if err != nil { + _ = proc.Stdin.Close() + _ = proc.Kill() + <-proc.Wait() + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to write layouts to stdin: %w", err) + } + _ = proc.Stdin.Close() + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run ogc import: docker run exited with code: %d", ps.ExitCode()) + } + return nil +} + +// ogcUp brings up all the instances. +func (p *provisioner) ogcUp(ctx context.Context) ([]byte, error) { + p.logger.Logf("Bring up instances through ogc") + var output bytes.Buffer + proc, err := p.ogcRun(ctx, []string{"up", LayoutIntegrationTag}, false, process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return nil, fmt.Errorf("failed to run ogc up: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return nil, fmt.Errorf("failed to run ogc up: docker run exited with code: %d", ps.ExitCode()) + } + return output.Bytes(), nil +} + +// ogcDown brings down all the instances. +func (p *provisioner) ogcDown(ctx context.Context) error { + p.logger.Logf("Bring down instances through ogc") + var output bytes.Buffer + proc, err := p.ogcRun(ctx, []string{"down", LayoutIntegrationTag}, false, process.WithCmdOptions(runner.AttachOut(&output), runner.AttachErr(&output))) + if err != nil { + return fmt.Errorf("failed to run ogc down: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + // print the output so its clear what went wrong + fmt.Fprintf(os.Stdout, "%s\n", output.Bytes()) + return fmt.Errorf("failed to run ogc down: docker run exited with code: %d", ps.ExitCode()) + } + return nil +} + +// ogcMachines lists all the instances. +func (p *provisioner) ogcMachines(ctx context.Context) ([]Machine, error) { + var out bytes.Buffer + proc, err := p.ogcRun(ctx, []string{"ls", "--as-yaml"}, false, process.WithCmdOptions(runner.AttachOut(&out))) + if err != nil { + return nil, fmt.Errorf("failed to run ogc ls: %w", err) + } + ps := <-proc.Wait() + if ps.ExitCode() != 0 { + return nil, fmt.Errorf("failed to run ogc ls: docker run exited with code: %d", ps.ExitCode()) + } + var machines []Machine + err = yaml.Unmarshal(out.Bytes(), &machines) + if err != nil { + return nil, fmt.Errorf("failed to parse ogc ls output: %w", err) + } + return machines, nil +} + +func (p *provisioner) ogcRun(ctx context.Context, args []string, interactive bool, processOpts ...process.StartOption) (*process.Info, error) { + wd, err := runner.WorkDir() + if err != nil { + return nil, err + } + tokenName := filepath.Base(p.cfg.ServiceTokenPath) + clientEmail, err := p.cfg.ClientEmail() + if err != nil { + return nil, err + } + projectID, err := p.cfg.ProjectID() + if err != nil { + return nil, err + } + runArgs := []string{"run"} + if interactive { + runArgs = append(runArgs, "-i") + } + runArgs = append(runArgs, + "--rm", + "-e", + fmt.Sprintf("GOOGLE_APPLICATION_SERVICE_ACCOUNT=%s", clientEmail), + "-e", + fmt.Sprintf("GOOGLE_APPLICATION_CREDENTIALS=/root/%s", tokenName), + "-e", + fmt.Sprintf("GOOGLE_PROJECT=%s", projectID), + "-e", + fmt.Sprintf("GOOGLE_DATACENTER=%s", p.cfg.Datacenter), + "-v", + fmt.Sprintf("%s:/root/%s", p.cfg.ServiceTokenPath, tokenName), + "-v", + fmt.Sprintf("%s:%s", wd, wd), + "-w", + wd, + "docker.elastic.co/observability-ci/ogc:5.0.1", + "--", + "ogc", + "-v", + ) + runArgs = append(runArgs, args...) + opts := []process.StartOption{process.WithContext(ctx), process.WithArgs(runArgs)} + opts = append(opts, processOpts...) + return process.Start("docker", opts...) +} + +func osBatchToOGC(cacheDir string, batch common.OSBatch) Layout { + tags := []string{ + LayoutIntegrationTag, + batch.OS.Type, + batch.OS.Arch, + } + if batch.OS.Type == define.Linux { + tags = append(tags, strings.ToLower(fmt.Sprintf("%s-%s", batch.OS.Distro, strings.Replace(batch.OS.Version, ".", "-", -1)))) + } else { + tags = append(tags, strings.ToLower(fmt.Sprintf("%s-%s", batch.OS.Type, strings.Replace(batch.OS.Version, ".", "-", -1)))) + } + los, _ := findOSLayout(batch.OS.OS) + return Layout{ + Name: batch.ID, + Provider: los.Provider, + InstanceSize: los.InstanceSize, + RunsOn: los.RunsOn, + RemotePath: los.RemotePath, + Scale: 1, + Username: los.Username, + SSHPrivateKey: cacheDir + "/id_rsa", + SSHPublicKey: cacheDir + "/id_rsa.pub", + Ports: []string{"22:22"}, + Tags: tags, + Labels: map[string]string{ + "division": "engineering", + "org": "ingest", + "team": "elastic-agent-control-plane", + "project": "elastic-agent", + }, + Scripts: "path", // not used; but required by OGC + } +} + +func findOSLayout(os define.OS) (LayoutOS, bool) { + for _, s := range ogcSupported { + if s.OS == os { + return s, true + } + } + return LayoutOS{}, false +} + +func findMachine(machines []Machine, name string) (Machine, bool) { + for _, m := range machines { + if m.Layout.Name == name { + return m, true + } + } + return Machine{}, false +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ogc/supported.go b/dev-tools/mage/target/srvrlesstest/testing/ogc/supported.go new file mode 100644 index 000000000000..f19bdabb4ade --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ogc/supported.go @@ -0,0 +1,189 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ogc + +import ( + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/supported" +) + +const ( + // Google is for the Google Cloud Platform (GCP) + Google = "google" +) + +// ogcSupported defines the set of supported OS's the OGC provisioner currently supports. +// +// In the case that a batch is not specific on the version and/or distro the first +// one in this list will be picked. So it's best to place the one that we want the +// most testing at the top. +var ogcSupported = []LayoutOS{ + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Ubuntu, + Version: "24.04", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "ubuntu-2404-lts-amd64", + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + }, + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Ubuntu, + Version: "22.04", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "ubuntu-2204-lts", + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + }, + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Ubuntu, + Version: "20.04", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "ubuntu-2004-lts", + Username: "ubuntu", + RemotePath: "/home/ubuntu/agent", + }, + // These instance types are experimental on Google Cloud and very unstable + // We will wait until Google introduces new ARM instance types + // https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu + // { + // OS: define.OS{ + // Type: define.Linux, + // Arch: define.ARM64, + // Distro: runner.Ubuntu, + // Version: "24.04", + // }, + // Provider: Google, + // InstanceSize: "t2a-standard-4", // 4 arm64 cpus, 16 GB RAM + // RunsOn: "ubuntu-2404-lts-arm64", + // Username: "ubuntu", + // RemotePath: "/home/ubuntu/agent", + // }, + // { + // OS: define.OS{ + // Type: define.Linux, + // Arch: define.ARM64, + // Distro: runner.Ubuntu, + // Version: "22.04", + // }, + // Provider: Google, + // InstanceSize: "t2a-standard-4", // 4 arm64 cpus, 16 GB RAM + // RunsOn: "ubuntu-2204-lts-arm64", + // Username: "ubuntu", + // RemotePath: "/home/ubuntu/agent", + // }, + // { + // OS: define.OS{ + // Type: define.Linux, + // Arch: define.ARM64, + // Distro: runner.Ubuntu, + // Version: "20.04", + // }, + // Provider: Google, + // InstanceSize: "t2a-standard-4", // 4 arm64 cpus, 16 GB RAM + // RunsOn: "ubuntu-2004-lts-arm64", + // Username: "ubuntu", + // RemotePath: "/home/ubuntu/agent", + // }, + { + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: supported.Rhel, + Version: "8", + }, + Provider: Google, + InstanceSize: "e2-standard-2", // 2 amd64 cpus, 8 GB RAM + RunsOn: "rhel-8", + Username: "rhel", + RemotePath: "/home/rhel/agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2022", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022-core", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2022-core", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2019", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019-core", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2019-core", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2016", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, + { + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016-core", + }, + Provider: Google, + InstanceSize: "e2-standard-4", // 4 amd64 cpus, 16 GB RAM + RunsOn: "windows-2016-core", + Username: "windows", + RemotePath: "C:\\Users\\windows\\agent", + }, +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/archiver.go b/dev-tools/mage/target/srvrlesstest/testing/runner/archiver.go new file mode 100644 index 000000000000..4b121afa0ae6 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/archiver.go @@ -0,0 +1,112 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package runner + +import ( + "archive/zip" + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func createRepoZipArchive(ctx context.Context, dir string, dest string) error { + absDir, err := filepath.Abs(dir) + if err != nil { + return fmt.Errorf("failed to get absolute path to %s: %w", dir, err) + } + + projectFilesOutput, err := cmdBufferedOutput(exec.Command("git", "ls-files", "-z"), dir) + if err != nil { + return err + } + + // Add files that are not yet tracked in git. Prevents a footcannon where someone writes code to a new file, then tests it before they add to git + untrackedOutput, err := cmdBufferedOutput(exec.Command("git", "ls-files", "--exclude-standard", "-o", "-z"), dir) + if err != nil { + return err + } + + _, err = io.Copy(&projectFilesOutput, &untrackedOutput) + if err != nil { + return fmt.Errorf("failed to read stdout of git ls-files -o: %w", err) + } + + archive, err := os.Create(dest) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", dest, err) + } + defer archive.Close() + + zw := zip.NewWriter(archive) + defer zw.Close() + + s := bufio.NewScanner(&projectFilesOutput) + s.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if i := strings.IndexRune(string(data), '\x00'); i >= 0 { + return i + 1, data[0:i], nil + } + if !atEOF { + return 0, nil, nil + } + return len(data), data, bufio.ErrFinalToken + }) + for s.Scan() { + if ctx.Err() != nil { + // incomplete close and delete + _ = archive.Close() + _ = os.Remove(dest) + return ctx.Err() + } + err := func(line string) error { + if line == "" { + return nil + } + fullPath := filepath.Join(absDir, line) + s, err := os.Stat(fullPath) + if err != nil { + return fmt.Errorf("failed to stat file %s: %w", fullPath, err) + } + if s.IsDir() { + // skip directories + return nil + } + f, err := os.Open(fullPath) + if err != nil { + return fmt.Errorf("failed to open file %s: %w", fullPath, err) + } + defer f.Close() + w, err := zw.Create(line) + if err != nil { + return fmt.Errorf("failed to create zip entry %s: %w", line, err) + } + _, err = io.Copy(w, f) + if err != nil { + return fmt.Errorf("failed to copy zip entry %s: %w", line, err) + } + return nil + }(s.Text()) + if err != nil { + return fmt.Errorf("error adding files: %w", err) + } + } + return nil +} + +func cmdBufferedOutput(cmd *exec.Cmd, workDir string) (bytes.Buffer, error) { + var stdoutBuf bytes.Buffer + cmd.Dir = workDir + cmd.Stdout = &stdoutBuf + err := cmd.Run() + if err != nil { + return *bytes.NewBufferString(""), fmt.Errorf("failed to run cmd %s: %w", strings.Join(cmd.Args, " "), err) + } + return stdoutBuf, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/json.go b/dev-tools/mage/target/srvrlesstest/testing/runner/json.go new file mode 100644 index 000000000000..b8d48663946c --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/json.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package runner + +import ( + "bufio" + "bytes" + "encoding/json" +) + +type JSONTestEntry struct { + Time string `json:"Time"` + Action string `json:"Action"` + Package string `json:"Package"` + Test string `json:"Test"` + Output string `json:"Output"` +} + +func suffixJSONResults(content []byte, suffix string) ([]byte, error) { + var result bytes.Buffer + sc := bufio.NewScanner(bytes.NewReader(content)) + for sc.Scan() { + var entry JSONTestEntry + err := json.Unmarshal([]byte(sc.Text()), &entry) + if err != nil { + return nil, err + } + if entry.Package != "" { + entry.Package += suffix + } + raw, err := json.Marshal(&entry) + if err != nil { + return nil, err + } + _, err = result.Write(raw) + if err != nil { + return nil, err + } + _, err = result.Write([]byte("\n")) + if err != nil { + return nil, err + } + } + return result.Bytes(), nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/junit.go b/dev-tools/mage/target/srvrlesstest/testing/runner/junit.go new file mode 100644 index 000000000000..d1d38af94cb8 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/junit.go @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package runner + +import ( + "encoding/xml" + "io" +) + +// JUnitTestSuites is a collection of JUnit test suites. +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + Name string `xml:"name,attr,omitempty"` + Tests int `xml:"tests,attr,omitempty"` + Failures int `xml:"failures,attr,omitempty"` + Errors int `xml:"errors,attr,omitempty"` + Time string `xml:"time,attr,omitempty"` + Suites []JUnitTestSuite `xml:"testsuite"` +} + +// JUnitTestSuite is a single JUnit test suite which may contain many +// testcases. +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time string `xml:"time,attr"` + Name string `xml:"name,attr"` + Properties []JUnitProperty `xml:"properties>property,omitempty"` + TestCases []JUnitTestCase `xml:"testcase"` + Timestamp string `xml:"timestamp,attr"` +} + +// JUnitTestCase is a single test case with its result. +type JUnitTestCase struct { + XMLName xml.Name `xml:"testcase"` + Classname string `xml:"classname,attr"` + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"` + Failure *JUnitFailure `xml:"failure,omitempty"` +} + +// JUnitSkipMessage contains the reason why a testcase was skipped. +type JUnitSkipMessage struct { + Message string `xml:"message,attr"` +} + +// JUnitProperty represents a key/value pair used to define properties. +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// JUnitFailure contains data related to a failed test. +type JUnitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` + Contents string `xml:",chardata"` +} + +// parseJUnit parses contents into a JUnit structure. +func parseJUnit(contents []byte) (JUnitTestSuites, error) { + var suites JUnitTestSuites + err := xml.Unmarshal(contents, &suites) + if err != nil { + return JUnitTestSuites{}, err + } + return suites, nil +} + +// writeJUnit writes the suites to the out writer. +func writeJUnit(out io.Writer, suites JUnitTestSuites) error { + doc, err := xml.MarshalIndent(suites, "", "\t") + if err != nil { + return err + } + _, err = out.Write([]byte(xml.Header)) + if err != nil { + return err + } + _, err = out.Write(doc) + return err +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/runner.go b/dev-tools/mage/target/srvrlesstest/testing/runner/runner.go new file mode 100644 index 000000000000..9a4fbfa966b5 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/runner.go @@ -0,0 +1,955 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package runner + +import ( + "bytes" + "context" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing" + "io" + "os" + "path/filepath" + "slices" + "strings" + "sync" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + tssh "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/supported" +) + +// Result is the complete result from the runner. +type Result struct { + // Tests is the number of tests ran. + Tests int + // Failures is the number of tests that failed. + Failures int + // Output is the raw test output. + Output []byte + // XMLOutput is the XML Junit output. + XMLOutput []byte + // JSONOutput is the JSON output. + JSONOutput []byte +} + +// State represents the state storage of what has been provisioned. +type State struct { + // Instances stores provisioned and prepared instances. + Instances []StateInstance `yaml:"instances"` + + // Stacks store provisioned stacks. + Stacks []common.Stack `yaml:"stacks"` +} + +// StateInstance is an instance stored in the state. +type StateInstance struct { + common.Instance + + // Prepared set to true when the instance is prepared. + Prepared bool `yaml:"prepared"` +} + +// Runner runs the tests on remote instances. +type Runner struct { + cfg common.Config + logger common.Logger + ip common.InstanceProvisioner + sp common.StackProvisioner + + batches []common.OSBatch + + batchToStack map[string]stackRes + batchToStackCh map[string]chan stackRes + batchToStackMx sync.Mutex + + stateMx sync.Mutex + state State +} + +// NewRunner creates a new runner based on the provided batches. +func NewRunner(cfg common.Config, ip common.InstanceProvisioner, sp common.StackProvisioner, batches ...define.Batch) (*Runner, error) { + err := cfg.Validate() + if err != nil { + return nil, err + } + platforms, err := cfg.GetPlatforms() + if err != nil { + return nil, err + } + + osBatches, err := supported.CreateBatches(batches, platforms, cfg.Groups, cfg.Matrix, cfg.SingleTest) + if err != nil { + return nil, err + } + osBatches = filterSupportedOS(osBatches, ip) + + logger := &runnerLogger{ + writer: os.Stdout, + timestamp: cfg.Timestamp, + } + ip.SetLogger(logger) + sp.SetLogger(logger) + + r := &Runner{ + cfg: cfg, + logger: logger, + ip: ip, + sp: sp, + batches: osBatches, + batchToStack: make(map[string]stackRes), + batchToStackCh: make(map[string]chan stackRes), + } + + err = r.loadState() + if err != nil { + return nil, err + } + return r, nil +} + +// Logger returns the logger used by the runner. +func (r *Runner) Logger() common.Logger { + return r.logger +} + +// Run runs all the tests. +func (r *Runner) Run(ctx context.Context) (Result, error) { + // validate tests can even be performed + err := r.validate() + if err != nil { + return Result{}, err + } + + // prepare + prepareCtx, prepareCancel := context.WithTimeout(ctx, 10*time.Minute) + defer prepareCancel() + sshAuth, repoArchive, err := r.prepare(prepareCtx) + if err != nil { + return Result{}, err + } + + // start the needed stacks + err = r.startStacks(ctx) + if err != nil { + return Result{}, err + } + + // only send to the provisioner the batches that need to be created + var instances []StateInstance + var batches []common.OSBatch + for _, b := range r.batches { + if !b.Skip { + i, ok := r.findInstance(b.ID) + if ok { + instances = append(instances, i) + } else { + batches = append(batches, b) + } + } + } + if len(batches) > 0 { + provisionedInstances, err := r.ip.Provision(ctx, r.cfg, batches) + if err != nil { + return Result{}, err + } + for _, i := range provisionedInstances { + instances = append(instances, StateInstance{ + Instance: i, + Prepared: false, + }) + } + } + + var results map[string]common.OSRunnerResult + switch r.ip.Type() { + case common.ProvisionerTypeVM: + // use SSH to perform all the required work on the instances + results, err = r.runInstances(ctx, sshAuth, repoArchive, instances) + if err != nil { + return Result{}, err + } + case common.ProvisionerTypeK8SCluster: + results, err = r.runK8sInstances(ctx, instances) + if err != nil { + return Result{}, err + } + + default: + return Result{}, fmt.Errorf("invalid provisioner type %d", r.ip.Type()) + } + + // merge the results + return r.mergeResults(results) +} + +// Clean performs a cleanup to ensure anything that could have been left running is removed. +func (r *Runner) Clean() error { + r.stateMx.Lock() + defer r.stateMx.Unlock() + + var instances []common.Instance + for _, i := range r.state.Instances { + instances = append(instances, i.Instance) + } + r.state.Instances = nil + stacks := make([]common.Stack, len(r.state.Stacks)) + copy(stacks, r.state.Stacks) + r.state.Stacks = nil + err := r.writeState() + if err != nil { + return err + } + + var g errgroup.Group + g.Go(func() error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + return r.ip.Clean(ctx, r.cfg, instances) + }) + for _, stack := range stacks { + g.Go(func(stack common.Stack) func() error { + return func() error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + return r.sp.Delete(ctx, stack) + } + }(stack)) + } + return g.Wait() +} + +func (r *Runner) runK8sInstances(ctx context.Context, instances []StateInstance) (map[string]common.OSRunnerResult, error) { + results := make(map[string]common.OSRunnerResult) + var resultsMx sync.Mutex + var err error + for _, instance := range instances { + batch, ok := findBatchByID(instance.ID, r.batches) + if !ok { + err = fmt.Errorf("unable to find batch with ID: %s", instance.ID) + continue + } + + logger := &batchLogger{wrapped: r.logger, prefix: instance.ID} + // start with the ExtraEnv first preventing the other environment flags below + // from being overwritten + env := map[string]string{} + for k, v := range r.cfg.ExtraEnv { + env[k] = v + } + + // ensure that we have all the requirements for the stack if required + if batch.Batch.Stack != nil { + // wait for the stack to be ready before continuing + logger.Logf("Waiting for stack to be ready...") + stack, stackErr := r.getStackForBatchID(batch.ID) + if stackErr != nil { + err = stackErr + continue + } + env["ELASTICSEARCH_HOST"] = stack.Elasticsearch + env["ELASTICSEARCH_USERNAME"] = stack.Username + env["ELASTICSEARCH_PASSWORD"] = stack.Password + env["KIBANA_HOST"] = stack.Kibana + env["KIBANA_USERNAME"] = stack.Username + env["KIBANA_PASSWORD"] = stack.Password + logger.Logf("Using Stack with Kibana host %s, credentials available under .integration-cache", stack.Kibana) + } + + // set the go test flags + env["GOTEST_FLAGS"] = r.cfg.TestFlags + env["KUBECONFIG"] = instance.Instance.Internal["config"].(string) + env["TEST_BINARY_NAME"] = r.cfg.BinaryName + env["K8S_VERSION"] = instance.Instance.Internal["version"].(string) + env["AGENT_IMAGE"] = instance.Instance.Internal["agent_image"].(string) + + prefix := fmt.Sprintf("%s-%s", instance.Instance.Internal["version"].(string), batch.ID) + + // run the actual tests on the host + result, runErr := batch.OS.Runner.Run(ctx, r.cfg.VerboseMode, nil, logger, r.cfg.AgentVersion, prefix, batch.Batch, env) + if runErr != nil { + logger.Logf("Failed to execute tests on instance: %s", err) + err = fmt.Errorf("failed to execute tests on instance %s: %w", instance.Name, err) + } + resultsMx.Lock() + results[batch.ID] = result + resultsMx.Unlock() + } + if err != nil { + return nil, err + } + return results, nil +} + +// runInstances runs the batch on each instance in parallel. +func (r *Runner) runInstances(ctx context.Context, sshAuth ssh.AuthMethod, repoArchive string, instances []StateInstance) (map[string]common.OSRunnerResult, error) { + g, ctx := errgroup.WithContext(ctx) + results := make(map[string]common.OSRunnerResult) + var resultsMx sync.Mutex + for _, i := range instances { + func(i StateInstance) { + g.Go(func() error { + batch, ok := findBatchByID(i.ID, r.batches) + if !ok { + return fmt.Errorf("unable to find batch with ID: %s", i.ID) + } + logger := &batchLogger{wrapped: r.logger, prefix: i.ID} + result, err := r.runInstance(ctx, sshAuth, logger, repoArchive, batch, i) + if err != nil { + logger.Logf("Failed for instance %s (@ %s): %s\n", i.ID, i.IP, err) + return err + } + resultsMx.Lock() + results[batch.ID] = result + resultsMx.Unlock() + return nil + }) + }(i) + } + err := g.Wait() + if err != nil { + return nil, err + } + return results, nil +} + +// runInstance runs the batch on the machine. +func (r *Runner) runInstance(ctx context.Context, sshAuth ssh.AuthMethod, logger common.Logger, repoArchive string, batch common.OSBatch, instance StateInstance) (common.OSRunnerResult, error) { + sshPrivateKeyPath, err := filepath.Abs(filepath.Join(r.cfg.StateDir, "id_rsa")) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to determine OGC SSH private key path: %w", err) + } + + logger.Logf("Starting SSH; connect with `ssh -i %s %s@%s`", sshPrivateKeyPath, instance.Username, instance.IP) + client := tssh.NewClient(instance.IP, instance.Username, sshAuth, logger) + connectCtx, connectCancel := context.WithTimeout(ctx, 10*time.Minute) + defer connectCancel() + err = client.Connect(connectCtx) + if err != nil { + logger.Logf("Failed to connect to instance %s: %s", instance.IP, err) + return common.OSRunnerResult{}, fmt.Errorf("failed to connect to instance %s: %w", instance.Name, err) + } + defer client.Close() + logger.Logf("Connected over SSH") + + if !instance.Prepared { + // prepare the host to run the tests + logger.Logf("Preparing instance") + err = batch.OS.Runner.Prepare(ctx, client, logger, batch.OS.Arch, r.cfg.GOVersion) + if err != nil { + logger.Logf("Failed to prepare instance: %s", err) + return common.OSRunnerResult{}, fmt.Errorf("failed to prepare instance %s: %w", instance.Name, err) + } + + // now its prepared, add to state + instance.Prepared = true + err = r.addOrUpdateInstance(instance) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("failed to save instance state %s: %w", instance.Name, err) + } + } + + // copy the required files (done every run) + err = batch.OS.Runner.Copy(ctx, client, logger, repoArchive, r.getBuilds(batch)) + if err != nil { + logger.Logf("Failed to copy files instance: %s", err) + return common.OSRunnerResult{}, fmt.Errorf("failed to copy files to instance %s: %w", instance.Name, err) + } + // start with the ExtraEnv first preventing the other environment flags below + // from being overwritten + env := map[string]string{} + for k, v := range r.cfg.ExtraEnv { + env[k] = v + } + + // ensure that we have all the requirements for the stack if required + if batch.Batch.Stack != nil { + // wait for the stack to be ready before continuing + logger.Logf("Waiting for stack to be ready...") + stack, err := r.getStackForBatchID(batch.ID) + if err != nil { + return common.OSRunnerResult{}, err + } + env["ELASTICSEARCH_HOST"] = stack.Elasticsearch + env["ELASTICSEARCH_USERNAME"] = stack.Username + env["ELASTICSEARCH_PASSWORD"] = stack.Password + env["KIBANA_HOST"] = stack.Kibana + env["KIBANA_USERNAME"] = stack.Username + env["KIBANA_PASSWORD"] = stack.Password + logger.Logf("Using Stack with Kibana host %s, credentials available under .integration-cache", stack.Kibana) + } + + // set the go test flags + env["GOTEST_FLAGS"] = r.cfg.TestFlags + env["TEST_BINARY_NAME"] = r.cfg.BinaryName + + // run the actual tests on the host + result, err := batch.OS.Runner.Run(ctx, r.cfg.VerboseMode, client, logger, r.cfg.AgentVersion, batch.ID, batch.Batch, env) + if err != nil { + logger.Logf("Failed to execute tests on instance: %s", err) + return common.OSRunnerResult{}, fmt.Errorf("failed to execute tests on instance %s: %w", instance.Name, err) + } + + // fetch any diagnostics + if r.cfg.DiagnosticsDir != "" { + err = batch.OS.Runner.Diagnostics(ctx, client, logger, r.cfg.DiagnosticsDir) + if err != nil { + logger.Logf("Failed to fetch diagnostics: %s", err) + } + } else { + logger.Logf("Skipping diagnostics fetch as DiagnosticsDir was not set") + } + + return result, nil +} + +// validate ensures that required builds of Elastic Agent exist +func (r *Runner) validate() error { + var requiredFiles []string + for _, b := range r.batches { + if !b.Skip { + for _, build := range r.getBuilds(b) { + if !slices.Contains(requiredFiles, build.Path) { + requiredFiles = append(requiredFiles, build.Path) + } + if !slices.Contains(requiredFiles, build.SHA512Path) { + requiredFiles = append(requiredFiles, build.SHA512Path) + } + } + } + } + var missingFiles []string + for _, file := range requiredFiles { + _, err := os.Stat(file) + if os.IsNotExist(err) { + missingFiles = append(missingFiles, file) + } else if err != nil { + return err + } + } + if len(missingFiles) > 0 { + return fmt.Errorf("missing required Elastic Agent package builds for integration runner to execute: %s", strings.Join(missingFiles, ", ")) + } + return nil +} + +// getBuilds returns the build for the batch. +func (r *Runner) getBuilds(b common.OSBatch) []common.Build { + var builds []common.Build + formats := []string{"targz", "zip", "rpm", "deb"} + binaryName := "elastic-agent" + + var packages []string + for _, p := range r.cfg.Packages { + if slices.Contains(formats, p) { + packages = append(packages, p) + } + } + if len(packages) == 0 { + packages = formats + } + + // This is for testing beats in serverless environment + if strings.HasSuffix(r.cfg.BinaryName, "beat") { + var serverlessPackages []string + for _, p := range packages { + if slices.Contains([]string{"targz", "zip"}, p) { + serverlessPackages = append(serverlessPackages, p) + } + } + packages = serverlessPackages + } + + if r.cfg.BinaryName != "" { + binaryName = r.cfg.BinaryName + } + + for _, f := range packages { + arch := b.OS.Arch + if arch == define.AMD64 { + arch = "x86_64" + } + suffix, err := testing.GetPackageSuffix(b.OS.Type, b.OS.Arch, f) + if err != nil { + // Means that OS type & Arch doesn't support that package format + continue + } + packageName := filepath.Join(r.cfg.BuildDir, fmt.Sprintf("%s-%s-%s", binaryName, r.cfg.AgentVersion, suffix)) + build := common.Build{ + Version: r.cfg.ReleaseVersion, + Type: b.OS.Type, + Arch: arch, + Path: packageName, + SHA512Path: packageName + ".sha512", + } + + builds = append(builds, build) + } + return builds +} + +// prepare prepares for the runner to run. +// +// Creates the SSH keys to use, creates the archive of the repo and pulls the latest container for OGC. +func (r *Runner) prepare(ctx context.Context) (ssh.AuthMethod, string, error) { + wd, err := WorkDir() + if err != nil { + return nil, "", err + } + cacheDir := filepath.Join(wd, r.cfg.StateDir) + _, err = os.Stat(cacheDir) + if errors.Is(err, os.ErrNotExist) { + err = os.Mkdir(cacheDir, 0755) + if err != nil { + return nil, "", fmt.Errorf("failed to create %q: %w", cacheDir, err) + } + } else if err != nil { + // unknown error + return nil, "", err + } + + var auth ssh.AuthMethod + var repoArchive string + g, gCtx := errgroup.WithContext(ctx) + g.Go(func() error { + a, err := r.createSSHKey(cacheDir) + if err != nil { + return err + } + auth = a + return nil + }) + g.Go(func() error { + repo, err := r.createRepoArchive(gCtx, r.cfg.RepoDir, cacheDir) + if err != nil { + return err + } + repoArchive = repo + return nil + }) + err = g.Wait() + if err != nil { + return nil, "", err + } + return auth, repoArchive, err +} + +// createSSHKey creates the required SSH keys +func (r *Runner) createSSHKey(dir string) (ssh.AuthMethod, error) { + privateKey := filepath.Join(dir, "id_rsa") + _, priErr := os.Stat(privateKey) + publicKey := filepath.Join(dir, "id_rsa.pub") + _, pubErr := os.Stat(publicKey) + var signer ssh.Signer + if errors.Is(priErr, os.ErrNotExist) || errors.Is(pubErr, os.ErrNotExist) { + // either is missing (re-create) + r.logger.Logf("Create SSH keys to use for SSH") + _ = os.Remove(privateKey) + _ = os.Remove(publicKey) + pri, err := tssh.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("failed to create ssh private key: %w", err) + } + pubBytes, err := tssh.NewPublicKey(&pri.PublicKey) + if err != nil { + return nil, fmt.Errorf("failed to create ssh public key: %w", err) + } + priBytes := tssh.EncodeToPEM(pri) + err = os.WriteFile(privateKey, priBytes, 0600) + if err != nil { + return nil, fmt.Errorf("failed to write ssh private key: %w", err) + } + err = os.WriteFile(publicKey, pubBytes, 0644) + if err != nil { + return nil, fmt.Errorf("failed to write ssh public key: %w", err) + } + signer, err = ssh.ParsePrivateKey(priBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse ssh private key: %w", err) + } + } else if priErr != nil { + // unknown error + return nil, priErr + } else if pubErr != nil { + // unknown error + return nil, pubErr + } else { + // read from existing private key + priBytes, err := os.ReadFile(privateKey) + if err != nil { + return nil, fmt.Errorf("failed to read ssh private key %s: %w", privateKey, err) + } + signer, err = ssh.ParsePrivateKey(priBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse ssh private key: %w", err) + } + } + return ssh.PublicKeys(signer), nil +} + +func (r *Runner) createRepoArchive(ctx context.Context, repoDir string, dir string) (string, error) { + zipPath := filepath.Join(dir, "agent-repo.zip") + _ = os.Remove(zipPath) // start fresh + r.logger.Logf("Creating zip archive of repo to send to remote hosts") + err := createRepoZipArchive(ctx, repoDir, zipPath) + if err != nil { + return "", fmt.Errorf("failed to create zip archive of repo: %w", err) + } + return zipPath, nil +} + +// startStacks starts the stacks required for the tests to run +func (r *Runner) startStacks(ctx context.Context) error { + var versions []string + batchToVersion := make(map[string]string) + for _, lb := range r.batches { + if !lb.Skip && lb.Batch.Stack != nil { + if lb.Batch.Stack.Version == "" { + // no version defined on the stack; set it to the defined stack version + lb.Batch.Stack.Version = r.cfg.StackVersion + } + if !slices.Contains(versions, lb.Batch.Stack.Version) { + versions = append(versions, lb.Batch.Stack.Version) + } + batchToVersion[lb.ID] = lb.Batch.Stack.Version + } + } + + var requests []stackReq + for _, version := range versions { + id := strings.Replace(version, ".", "", -1) + requests = append(requests, stackReq{ + request: common.StackRequest{ID: id, Version: version}, + stack: r.findStack(id), + }) + } + + reportResult := func(version string, stack common.Stack, err error) { + r.batchToStackMx.Lock() + defer r.batchToStackMx.Unlock() + res := stackRes{ + stack: stack, + err: err, + } + for batchID, batchVersion := range batchToVersion { + if batchVersion == version { + r.batchToStack[batchID] = res + ch, ok := r.batchToStackCh[batchID] + if ok { + ch <- res + } + } + } + } + + // start goroutines to provision the needed stacks + for _, request := range requests { + go func(ctx context.Context, req stackReq) { + var err error + var stack common.Stack + if req.stack != nil { + stack = *req.stack + } else { + stack, err = r.sp.Create(ctx, req.request) + if err != nil { + reportResult(req.request.Version, stack, err) + return + } + err = r.addOrUpdateStack(stack) + if err != nil { + reportResult(stack.Version, stack, err) + return + } + } + + if stack.Ready { + reportResult(stack.Version, stack, nil) + return + } + + stack, err = r.sp.WaitForReady(ctx, stack) + if err != nil { + reportResult(stack.Version, stack, err) + return + } + + err = r.addOrUpdateStack(stack) + if err != nil { + reportResult(stack.Version, stack, err) + return + } + + reportResult(stack.Version, stack, nil) + }(ctx, request) + } + + return nil +} + +func (r *Runner) getStackForBatchID(id string) (common.Stack, error) { + r.batchToStackMx.Lock() + res, ok := r.batchToStack[id] + if ok { + r.batchToStackMx.Unlock() + return res.stack, res.err + } + _, ok = r.batchToStackCh[id] + if ok { + return common.Stack{}, fmt.Errorf("getStackForBatchID called twice; this is not allowed") + } + ch := make(chan stackRes, 1) + r.batchToStackCh[id] = ch + r.batchToStackMx.Unlock() + + // 12 minutes is because the stack should have been ready after 10 minutes or returned an error + // this only exists to ensure that if that code is not blocking that this doesn't block forever + t := time.NewTimer(12 * time.Minute) + defer t.Stop() + select { + case <-t.C: + return common.Stack{}, fmt.Errorf("failed waiting for a response after 12 minutes") + case res = <-ch: + return res.stack, res.err + } +} + +func (r *Runner) findInstance(id string) (StateInstance, bool) { + r.stateMx.Lock() + defer r.stateMx.Unlock() + for _, existing := range r.state.Instances { + if existing.Same(StateInstance{ + Instance: common.Instance{ID: id, Provisioner: r.ip.Name()}}) { + return existing, true + } + } + return StateInstance{}, false +} + +func (r *Runner) addOrUpdateInstance(instance StateInstance) error { + r.stateMx.Lock() + defer r.stateMx.Unlock() + + state := r.state + found := false + for idx, existing := range state.Instances { + if existing.Same(instance) { + state.Instances[idx] = instance + found = true + break + } + } + if !found { + state.Instances = append(state.Instances, instance) + } + r.state = state + return r.writeState() +} + +func (r *Runner) findStack(id string) *common.Stack { + r.stateMx.Lock() + defer r.stateMx.Unlock() + for _, existing := range r.state.Stacks { + if existing.Same(common.Stack{ID: id, Provisioner: r.sp.Name()}) { + return &existing + } + } + return nil +} + +func (r *Runner) addOrUpdateStack(stack common.Stack) error { + r.stateMx.Lock() + defer r.stateMx.Unlock() + + state := r.state + found := false + for idx, existing := range state.Stacks { + if existing.Same(stack) { + state.Stacks[idx] = stack + found = true + break + } + } + if !found { + state.Stacks = append(state.Stacks, stack) + } + r.state = state + return r.writeState() +} + +func (r *Runner) loadState() error { + data, err := os.ReadFile(r.getStatePath()) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to read state file %s: %w", r.getStatePath(), err) + } + var state State + err = yaml.Unmarshal(data, &state) + if err != nil { + return fmt.Errorf("failed unmarshal state file %s: %w", r.getStatePath(), err) + } + r.state = state + return nil +} + +func (r *Runner) writeState() error { + data, err := yaml.Marshal(&r.state) + if err != nil { + return fmt.Errorf("failed to marshal state: %w", err) + } + err = os.WriteFile(r.getStatePath(), data, 0644) + if err != nil { + return fmt.Errorf("failed to write state file %s: %w", r.getStatePath(), err) + } + return nil +} + +func (r *Runner) getStatePath() string { + return filepath.Join(r.cfg.StateDir, "state.yml") +} + +func (r *Runner) mergeResults(results map[string]common.OSRunnerResult) (Result, error) { + var rawOutput bytes.Buffer + var jsonOutput bytes.Buffer + var suites JUnitTestSuites + for id, res := range results { + for _, pkg := range res.Packages { + err := mergePackageResult(pkg, id, false, &rawOutput, &jsonOutput, &suites) + if err != nil { + return Result{}, err + } + } + for _, pkg := range res.SudoPackages { + err := mergePackageResult(pkg, id, true, &rawOutput, &jsonOutput, &suites) + if err != nil { + return Result{}, err + } + } + } + var junitBytes bytes.Buffer + err := writeJUnit(&junitBytes, suites) + if err != nil { + return Result{}, fmt.Errorf("failed to marshal junit: %w", err) + } + + var complete Result + for _, suite := range suites.Suites { + complete.Tests += suite.Tests + complete.Failures += suite.Failures + } + complete.Output = rawOutput.Bytes() + complete.JSONOutput = jsonOutput.Bytes() + complete.XMLOutput = junitBytes.Bytes() + return complete, nil +} + +// Same returns true if other is the same instance as this one. +// Two instances are considered the same if their provider and ID are the same. +func (s StateInstance) Same(other StateInstance) bool { + return s.Provisioner == other.Provisioner && + s.ID == other.ID +} + +func mergePackageResult(pkg common.OSRunnerPackageResult, batchName string, sudo bool, rawOutput io.Writer, jsonOutput io.Writer, suites *JUnitTestSuites) error { + suffix := "" + sudoStr := "false" + if sudo { + suffix = "(sudo)" + sudoStr = "true" + } + if pkg.Output != nil { + rawLogger := &runnerLogger{writer: rawOutput, timestamp: false} + pkgWriter := common.NewPrefixOutput(rawLogger, fmt.Sprintf("%s(%s)%s: ", pkg.Name, batchName, suffix)) + _, err := pkgWriter.Write(pkg.Output) + if err != nil { + return fmt.Errorf("failed to write raw output from %s %s: %w", batchName, pkg.Name, err) + } + } + if pkg.JSONOutput != nil { + jsonSuffix, err := suffixJSONResults(pkg.JSONOutput, fmt.Sprintf("(%s)%s", batchName, suffix)) + if err != nil { + return fmt.Errorf("failed to suffix json output from %s %s: %w", batchName, pkg.Name, err) + } + _, err = jsonOutput.Write(jsonSuffix) + if err != nil { + return fmt.Errorf("failed to write json output from %s %s: %w", batchName, pkg.Name, err) + } + } + if pkg.XMLOutput != nil { + pkgSuites, err := parseJUnit(pkg.XMLOutput) + if err != nil { + return fmt.Errorf("failed to parse junit from %s %s: %w", batchName, pkg.Name, err) + } + for _, pkgSuite := range pkgSuites.Suites { + // append the batch information to the suite name + pkgSuite.Name = fmt.Sprintf("%s(%s)%s", pkgSuite.Name, batchName, suffix) + pkgSuite.Properties = append(pkgSuite.Properties, JUnitProperty{ + Name: "batch", + Value: batchName, + }, JUnitProperty{ + Name: "sudo", + Value: sudoStr, + }) + suites.Suites = append(suites.Suites, pkgSuite) + } + } + return nil +} + +func findBatchByID(id string, batches []common.OSBatch) (common.OSBatch, bool) { + for _, batch := range batches { + if batch.ID == id { + return batch, true + } + } + return common.OSBatch{}, false +} + +type runnerLogger struct { + writer io.Writer + timestamp bool +} + +func (l *runnerLogger) Logf(format string, args ...any) { + if l.timestamp { + _, _ = fmt.Fprintf(l.writer, "[%s] >>> %s\n", time.Now().Format(time.StampMilli), fmt.Sprintf(format, args...)) + } else { + _, _ = fmt.Fprintf(l.writer, ">>> %s\n", fmt.Sprintf(format, args...)) + } +} + +type batchLogger struct { + wrapped common.Logger + prefix string +} + +func filterSupportedOS(batches []common.OSBatch, provisioner common.InstanceProvisioner) []common.OSBatch { + var filtered []common.OSBatch + for _, batch := range batches { + if ok := provisioner.Supported(batch.OS.OS); ok { + filtered = append(filtered, batch) + } + } + return filtered +} + +func (b *batchLogger) Logf(format string, args ...any) { + b.wrapped.Logf("(%s) %s", b.prefix, fmt.Sprintf(format, args...)) +} + +type stackRes struct { + stack common.Stack + err error +} + +type stackReq struct { + request common.StackRequest + stack *common.Stack +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/runner/utils.go b/dev-tools/mage/target/srvrlesstest/testing/runner/utils.go new file mode 100644 index 000000000000..4b81e086b5ec --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/runner/utils.go @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package runner + +import ( + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/core/process" + "io" + "os" + "os/exec" + "path/filepath" +) + +// WorkDir returns the current absolute working directory. +func WorkDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("failed to get work directory: %w", err) + } + wd, err = filepath.Abs(wd) + if err != nil { + return "", fmt.Errorf("failed to get absolute path to work directory: %w", err) + } + return wd, nil +} + +func AttachOut(w io.Writer) process.CmdOption { + return func(c *exec.Cmd) error { + c.Stdout = w + return nil + } +} + +func AttachErr(w io.Writer) process.CmdOption { + return func(c *exec.Cmd) error { + c.Stderr = w + return nil + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/client.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/client.go new file mode 100644 index 000000000000..831d325ba8c7 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/client.go @@ -0,0 +1,288 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ssh + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "os" + "strings" + "time" + + "golang.org/x/crypto/ssh" +) + +type logger interface { + // Logf logs the message for this runner. + Logf(format string, args ...any) +} + +type sshClient struct { + ip string + username string + auth ssh.AuthMethod + logger logger + c *ssh.Client +} + +// NewClient creates a new SSH client connection to the host. +func NewClient(ip string, username string, sshAuth ssh.AuthMethod, logger logger) SSHClient { + return &sshClient{ + ip: ip, + username: username, + auth: sshAuth, + logger: logger, + } +} + +// Connect connects to the host. +func (s *sshClient) Connect(ctx context.Context) error { + var lastErr error + config := &ssh.ClientConfig{ + User: s.username, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), //nolint:gosec // it's the tests framework test + Auth: []ssh.AuthMethod{s.auth}, + Timeout: 30 * time.Second, + } + addr := net.JoinHostPort(s.ip, "22") + + tcpAddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + return fmt.Errorf("unable to resolve ssh address %q :%w", addr, err) + } + delay := 1 * time.Second + for { + if ctx.Err() != nil { + if lastErr == nil { + return ctx.Err() + } + return lastErr + } + if lastErr != nil { + s.logger.Logf("ssh connect error: %q, will try again in %s", lastErr, delay) + time.Sleep(delay) + delay = 2 * delay + + } + conn, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + lastErr = fmt.Errorf("error dialing tcp address %q :%w", addr, err) + continue + } + err = conn.SetKeepAlive(true) + if err != nil { + _ = conn.Close() + lastErr = fmt.Errorf("error setting TCP keepalive for ssh to %q :%w", addr, err) + continue + } + err = conn.SetKeepAlivePeriod(config.Timeout) + if err != nil { + _ = conn.Close() + lastErr = fmt.Errorf("error setting TCP keepalive period for ssh to %q :%w", addr, err) + continue + } + sshConn, chans, reqs, err := ssh.NewClientConn(conn, addr, config) + if err != nil { + _ = conn.Close() + lastErr = fmt.Errorf("error NewClientConn for ssh to %q :%w", addr, err) + continue + } + s.c = ssh.NewClient(sshConn, chans, reqs) + return nil + } +} + +// ConnectWithTimeout connects to the host with a timeout. +func (s *sshClient) ConnectWithTimeout(ctx context.Context, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return s.Connect(ctx) +} + +// Close closes the client. +func (s *sshClient) Close() error { + if s.c != nil { + err := s.c.Close() + s.c = nil + return err + } + return nil +} + +// Reconnect disconnects and reconnected to the host. +func (s *sshClient) Reconnect(ctx context.Context) error { + _ = s.Close() + return s.Connect(ctx) +} + +// ReconnectWithTimeout disconnects and reconnected to the host with a timeout. +func (s *sshClient) ReconnectWithTimeout(ctx context.Context, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return s.Reconnect(ctx) +} + +// NewSession opens a new Session for this host. +func (s *sshClient) NewSession() (*ssh.Session, error) { + return s.c.NewSession() +} + +// Exec runs a command on the host. +func (s *sshClient) Exec(ctx context.Context, cmd string, args []string, stdin io.Reader) ([]byte, []byte, error) { + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } + + var session *ssh.Session + cmdArgs := []string{cmd} + cmdArgs = append(cmdArgs, args...) + cmdStr := strings.Join(cmdArgs, " ") + session, err := s.NewSession() + if err != nil { + s.logger.Logf("new session failed: %q, trying reconnect", err) + lErr := s.Reconnect(ctx) + if lErr != nil { + return nil, nil, fmt.Errorf("ssh reconnect failed: %w, after new session failed: %w", lErr, err) + } + session, lErr = s.NewSession() + if lErr != nil { + return nil, nil, fmt.Errorf("new session failed after reconnect: %w, original new session failure was: %w", lErr, err) + } + } + defer session.Close() + + var stdout bytes.Buffer + var stderr bytes.Buffer + session.Stdout = &stdout + session.Stderr = &stderr + if stdin != nil { + session.Stdin = stdin + } + err = session.Run(cmdStr) + if err != nil { + return stdout.Bytes(), stderr.Bytes(), fmt.Errorf("could not run %q though SSH: %w", + cmdStr, err) + } + return stdout.Bytes(), stderr.Bytes(), err +} + +// ExecWithRetry runs the command on loop waiting the interval between calls +func (s *sshClient) ExecWithRetry(ctx context.Context, cmd string, args []string, interval time.Duration) ([]byte, []byte, error) { + var lastErr error + var lastStdout []byte + var lastStderr []byte + for { + // the length of time for running the command is not blocked on the interval + // don't create a new context with the interval as its timeout + stdout, stderr, err := s.Exec(ctx, cmd, args, nil) + if err == nil { + return stdout, stderr, nil + } + s.logger.Logf("ssh exec error: %q, will try again in %s", err, interval) + lastErr = err + lastStdout = stdout + lastStderr = stderr + + // wait for the interval or ctx to be cancelled + select { + case <-ctx.Done(): + if lastErr != nil { + return lastStdout, lastStderr, lastErr + } + return nil, nil, ctx.Err() + case <-time.After(interval): + } + } +} + +// Copy copies the filePath to the host at dest. +func (s *sshClient) Copy(filePath string, dest string) error { + f, err := os.Open(filePath) + if err != nil { + return err + } + defer f.Close() + fs, err := f.Stat() + if err != nil { + return err + } + + session, err := s.NewSession() + if err != nil { + return err + } + defer session.Close() + + w, err := session.StdinPipe() + if err != nil { + return err + } + + cmd := fmt.Sprintf("scp -t %s", dest) + if err := session.Start(cmd); err != nil { + _ = w.Close() + return err + } + + errCh := make(chan error) + go func() { + errCh <- session.Wait() + }() + + _, err = fmt.Fprintf(w, "C%#o %d %s\n", fs.Mode().Perm(), fs.Size(), dest) + if err != nil { + _ = w.Close() + <-errCh + return err + } + _, err = io.Copy(w, f) + if err != nil { + _ = w.Close() + <-errCh + return err + } + _, _ = fmt.Fprint(w, "\x00") + _ = w.Close() + return <-errCh +} + +// GetFileContents returns the file content. +func (s *sshClient) GetFileContents(ctx context.Context, filename string, opts ...FileContentsOpt) ([]byte, error) { + var stdout bytes.Buffer + err := s.GetFileContentsOutput(ctx, filename, &stdout, opts...) + if err != nil { + return nil, err + } + return stdout.Bytes(), nil +} + +// GetFileContentsOutput returns the file content writing into output. +func (s *sshClient) GetFileContentsOutput(ctx context.Context, filename string, output io.Writer, opts ...FileContentsOpt) error { + if ctx.Err() != nil { + return ctx.Err() + } + + var fco fileContentsOpts + fco.command = "cat" + for _, opt := range opts { + opt(&fco) + } + + session, err := s.NewSession() + if err != nil { + return err + } + defer session.Close() + + session.Stdout = output + err = session.Run(fmt.Sprintf("%s %s", fco.command, filename)) + if err != nil { + return err + } + return nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/file.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/file.go new file mode 100644 index 000000000000..f40d050c75fe --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/file.go @@ -0,0 +1,19 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ssh + +type fileContentsOpts struct { + command string +} + +// FileContentsOpt provides an option to modify how fetching files from the remote host work. +type FileContentsOpt func(opts *fileContentsOpts) + +// WithContentFetchCommand changes the command to use for fetching the file contents. +func WithContentFetchCommand(command string) FileContentsOpt { + return func(opts *fileContentsOpts) { + opts.command = command + } +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/interface.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/interface.go new file mode 100644 index 000000000000..00eccbe5c7ac --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/interface.go @@ -0,0 +1,49 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ssh + +import ( + "context" + "io" + "time" + + "golang.org/x/crypto/ssh" +) + +// SSHClient is a *ssh.Client that provides a nice interface to work with. +type SSHClient interface { + // Connect connects to the host. + Connect(ctx context.Context) error + + // ConnectWithTimeout connects to the host with a timeout. + ConnectWithTimeout(ctx context.Context, timeout time.Duration) error + + // Close closes the client. + Close() error + + // Reconnect disconnects and reconnected to the host. + Reconnect(ctx context.Context) error + + // ReconnectWithTimeout disconnects and reconnected to the host with a timeout. + ReconnectWithTimeout(ctx context.Context, timeout time.Duration) error + + // NewSession opens a new Session for this host. + NewSession() (*ssh.Session, error) + + // Exec runs a command on the host. + Exec(ctx context.Context, cmd string, args []string, stdin io.Reader) ([]byte, []byte, error) + + // ExecWithRetry runs the command on loop waiting the interval between calls + ExecWithRetry(ctx context.Context, cmd string, args []string, interval time.Duration) ([]byte, []byte, error) + + // Copy copies the filePath to the host at dest. + Copy(filePath string, dest string) error + + // GetFileContents returns the file content. + GetFileContents(ctx context.Context, filename string, opts ...FileContentsOpt) ([]byte, error) + + // GetFileContentsOutput returns the file content writing to output. + GetFileContentsOutput(ctx context.Context, filename string, output io.Writer, opts ...FileContentsOpt) error +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/ssh/keys.go b/dev-tools/mage/target/srvrlesstest/testing/ssh/keys.go new file mode 100644 index 000000000000..5f53a88a0ed3 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/ssh/keys.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package ssh + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + + "golang.org/x/crypto/ssh" +) + +// NewPrivateKey creates RSA private key +func NewPrivateKey() (*rsa.PrivateKey, error) { + pk, err := rsa.GenerateKey(rand.Reader, 2056) + if err != nil { + return nil, err + } + err = pk.Validate() + if err != nil { + return nil, err + } + return pk, nil +} + +// EncodeToPEM encodes private key to PEM format +func EncodeToPEM(privateKey *rsa.PrivateKey) []byte { + der := x509.MarshalPKCS1PrivateKey(privateKey) + privBlock := pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: der, + } + return pem.EncodeToMemory(&privBlock) +} + +// NewPublicKey returns bytes for writing to .pub file +func NewPublicKey(pk *rsa.PublicKey) ([]byte, error) { + pub, err := ssh.NewPublicKey(pk) + if err != nil { + return nil, err + } + return ssh.MarshalAuthorizedKey(pub), nil +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/supported/batch.go b/dev-tools/mage/target/srvrlesstest/testing/supported/batch.go new file mode 100644 index 000000000000..f11de8fbac42 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/supported/batch.go @@ -0,0 +1,182 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package supported + +import ( + "crypto/sha512" + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "strings" + "unicode/utf8" +) + +// CreateBatches creates the OSBatch set based on the defined supported OS's. +func CreateBatches(batches []define.Batch, platforms []define.OS, groups []string, matrix bool, singleTest string) ([]common.OSBatch, error) { + var err error + var osBatches []common.OSBatch + for _, b := range batches { + lbs, err := createBatchesFromBatch(b, platforms, groups, matrix) + if err != nil { + return nil, err + } + if lbs != nil { + osBatches = append(osBatches, lbs...) + } + } + if singleTest != "" { + osBatches, err = filterSingleTest(osBatches, singleTest) + if err != nil { + return nil, err + } + } + + return osBatches, nil +} + +func createBatchesFromBatch(batch define.Batch, platforms []define.OS, groups []string, matrix bool) ([]common.OSBatch, error) { + var batches []common.OSBatch + if len(groups) > 0 && !batchInGroups(batch, groups) { + return nil, nil + } + specifics, err := getSupported(batch.OS, platforms) + if errors.Is(err, ErrOSNotSupported) { + var s common.SupportedOS + s.OS.Type = batch.OS.Type + s.OS.Arch = batch.OS.Arch + s.OS.Distro = batch.OS.Distro + if s.OS.Distro == "" { + s.OS.Distro = "unknown" + } + if s.OS.Version == "" { + s.OS.Version = "unknown" + } + b := common.OSBatch{ + OS: s, + Batch: batch, + Skip: true, + } + b.ID = createBatchID(b) + batches = append(batches, b) + return batches, nil + } else if err != nil { + return nil, err + } + if matrix { + for _, s := range specifics { + b := common.OSBatch{ + OS: s, + Batch: batch, + Skip: false, + } + b.ID = createBatchID(b) + batches = append(batches, b) + } + } else { + b := common.OSBatch{ + OS: specifics[0], + Batch: batch, + Skip: false, + } + b.ID = createBatchID(b) + batches = append(batches, b) + } + return batches, nil +} + +func batchInGroups(batch define.Batch, groups []string) bool { + for _, g := range groups { + if batch.Group == g { + return true + } + } + return false +} + +func filterSingleTest(batches []common.OSBatch, singleTest string) ([]common.OSBatch, error) { + var filtered []common.OSBatch + for _, batch := range batches { + batch, ok := filterSingleTestBatch(batch, singleTest) + if ok { + filtered = append(filtered, batch) + } + } + if len(filtered) == 0 { + return nil, fmt.Errorf("test not found: %s", singleTest) + } + return filtered, nil +} + +func filterSingleTestBatch(batch common.OSBatch, testName string) (common.OSBatch, bool) { + for _, pt := range batch.Batch.Tests { + for _, t := range pt.Tests { + if t.Name == testName { + // filter batch to only run one test + batch.Batch.Tests = []define.BatchPackageTests{ + { + Name: pt.Name, + Tests: []define.BatchPackageTest{t}, + }, + } + batch.Batch.SudoTests = nil + // remove stack requirement when the test doesn't need a stack + if !t.Stack { + batch.Batch.Stack = nil + } + return batch, true + } + } + } + for _, pt := range batch.Batch.SudoTests { + for _, t := range pt.Tests { + if t.Name == testName { + // filter batch to only run one test + batch.Batch.SudoTests = []define.BatchPackageTests{ + { + Name: pt.Name, + Tests: []define.BatchPackageTest{t}, + }, + } + batch.Batch.Tests = nil + // remove stack requirement when the test doesn't need a stack + if !t.Stack { + batch.Batch.Stack = nil + } + return batch, true + } + } + } + return batch, false +} + +// createBatchID creates a consistent/unique ID for the batch +// +// ID needs to be consistent so each execution of the runner always +// selects the same ID for each batch. +func createBatchID(batch common.OSBatch) string { + id := batch.OS.Type + "-" + batch.OS.Arch + if batch.OS.Type == define.Linux { + id += "-" + batch.OS.Distro + } + if batch.OS.Version != "" { + id += "-" + strings.Replace(batch.OS.Version, ".", "", -1) + } + if batch.OS.Type == define.Kubernetes && batch.OS.DockerVariant != "" { + id += "-" + batch.OS.DockerVariant + } + id += "-" + strings.Replace(batch.Batch.Group, ".", "", -1) + + // The batchID needs to be at most 63 characters long otherwise + // OGC will fail to instantiate the VM. + maxIDLen := 63 + if len(id) > maxIDLen { + hash := fmt.Sprintf("%x", sha512.Sum384([]byte(id))) + hashLen := utf8.RuneCountInString(hash) + id = id[:maxIDLen-hashLen-1] + "-" + hash + } + + return strings.ToLower(id) +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/supported/supported.go b/dev-tools/mage/target/srvrlesstest/testing/supported/supported.go new file mode 100644 index 000000000000..9973c2a08e50 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/supported/supported.go @@ -0,0 +1,274 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package supported + +import ( + "errors" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/kubernetes" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/linux" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/windows" +) + +const ( + Rhel = "rhel" + // Ubuntu is a Linux distro. + Ubuntu = "ubuntu" +) + +var ( + // ErrOSNotSupported returned when it's an unsupported OS. + ErrOSNotSupported = errors.New("os/arch not currently supported") +) + +var ( + // UbuntuAMD64_2404 - Ubuntu (amd64) 24.04 + UbuntuAMD64_2404 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Ubuntu, + Version: "24.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuAMD64_2204 - Ubuntu (amd64) 22.04 + UbuntuAMD64_2204 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Ubuntu, + Version: "22.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuAMD64_2004 - Ubuntu (amd64) 20.04 + UbuntuAMD64_2004 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Ubuntu, + Version: "20.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuARM64_2404 - Ubuntu (arm64) 24.04 + UbuntuARM64_2404 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.ARM64, + Distro: Ubuntu, + Version: "24.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuARM64_2204 - Ubuntu (arm64) 22.04 + UbuntuARM64_2204 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.ARM64, + Distro: Ubuntu, + Version: "22.04", + }, + Runner: linux.DebianRunner{}, + } + // UbuntuARM64_2004 - Ubuntu (arm64) 20.04 + UbuntuARM64_2004 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.ARM64, + Distro: Ubuntu, + Version: "20.04", + }, + Runner: linux.DebianRunner{}, + } + // RhelAMD64_8 - RedHat Enterprise Linux (amd64) 8 + RhelAMD64_8 = common.SupportedOS{ + OS: define.OS{ + Type: define.Linux, + Arch: define.AMD64, + Distro: Rhel, + Version: "8", + }, + Runner: linux.RhelRunner{}, + } + // WindowsAMD64_2022 - Windows (amd64) Server 2022 + WindowsAMD64_2022 = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2022_Core - Windows (amd64) Server 2022 Core + WindowsAMD64_2022_Core = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2022-core", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2019 - Windows (amd64) Server 2019 + WindowsAMD64_2019 = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2019_Core - Windows (amd64) Server 2019 Core + WindowsAMD64_2019_Core = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2019-core", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2016 - Windows (amd64) Server 2016 + WindowsAMD64_2016 = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016", + }, + Runner: windows.WindowsRunner{}, + } + // WindowsAMD64_2016_Core - Windows (amd64) Server 2016 Core + WindowsAMD64_2016_Core = common.SupportedOS{ + OS: define.OS{ + Type: define.Windows, + Arch: define.AMD64, + Version: "2016-core", + }, + Runner: windows.WindowsRunner{}, + } +) + +// supported defines the set of supported OS's. +// +// A provisioner might support a lesser number of this OS's, but the following +// are known to be supported by out OS runner logic. +// +// In the case that a batch is not specific on the version and/or distro the first +// one in this list will be picked. So it's best to place the one that we want the +// most testing at the top. +var supported = []common.SupportedOS{ + UbuntuAMD64_2404, + UbuntuAMD64_2204, + UbuntuAMD64_2004, + UbuntuARM64_2404, + UbuntuARM64_2204, + UbuntuARM64_2004, + RhelAMD64_8, + WindowsAMD64_2022, + WindowsAMD64_2022_Core, + WindowsAMD64_2019, + WindowsAMD64_2019_Core, + // https://github.com/elastic/ingest-dev/issues/3484 + // WindowsAMD64_2016, + // WindowsAMD64_2016_Core, +} + +// init injects the kubernetes support list into the support list above +func init() { + for _, k8sSupport := range kubernetes.GetSupported() { + supported = append(supported, common.SupportedOS{ + OS: k8sSupport, + Runner: kubernetes.Runner{}, + }) + } +} + +// osMatch returns true when the specific OS is a match for a non-specific OS. +func osMatch(specific define.OS, notSpecific define.OS) bool { + if specific.Type != notSpecific.Type || specific.Arch != notSpecific.Arch { + return false + } + if notSpecific.Distro != "" && specific.Distro != notSpecific.Distro { + return false + } + if notSpecific.Version != "" && specific.Version != notSpecific.Version { + return false + } + if notSpecific.DockerVariant != "" && specific.DockerVariant != notSpecific.DockerVariant { + return false + } + return true +} + +// getSupported returns all the supported based on the provided OS profile while using +// the provided platforms as a filter. +func getSupported(os define.OS, platforms []define.OS) ([]common.SupportedOS, error) { + var match []common.SupportedOS + for _, s := range supported { + if osMatch(s.OS, os) && allowedByPlatforms(s.OS, platforms) { + match = append(match, s) + } + } + if len(match) > 0 { + return match, nil + } + return nil, fmt.Errorf("%w: %s/%s", ErrOSNotSupported, os.Type, os.Arch) +} + +// allowedByPlatforms determines if the os is in the allowed list of platforms. +func allowedByPlatforms(os define.OS, platforms []define.OS) bool { + if len(platforms) == 0 { + return true + } + for _, platform := range platforms { + if ok := allowedByPlatform(os, platform); ok { + return true + } + } + return false +} + +// allowedByPlatform determines if the platform allows this os. +func allowedByPlatform(os define.OS, platform define.OS) bool { + if os.Type != platform.Type { + return false + } + if platform.Arch == "" { + // not specific on arch + return true + } + if os.Arch != platform.Arch { + return false + } + if platform.Type == define.Linux { + // on linux distro is supported + if platform.Distro == "" { + // not specific on distro + return true + } + if os.Distro != platform.Distro { + return false + } + } + if platform.Version == "" { + // not specific on version + return true + } + if os.Version != platform.Version { + return false + } + if platform.Type == define.Kubernetes { + // on kubernetes docker variant is supported + if platform.DockerVariant == "" { + return true + } + if os.DockerVariant != platform.DockerVariant { + return false + } + } + return true +} diff --git a/dev-tools/mage/target/srvrlesstest/testing/windows/windows.go b/dev-tools/mage/target/srvrlesstest/testing/windows/windows.go new file mode 100644 index 000000000000..77d677e5edca --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/testing/windows/windows.go @@ -0,0 +1,329 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package windows + +import ( + "context" + "fmt" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/common" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/define" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest/testing/ssh" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +// WindowsRunner is a handler for running tests on Windows +type WindowsRunner struct{} + +// Prepare the test +func (WindowsRunner) Prepare(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, arch string, goVersion string) error { + // install chocolatey + logger.Logf("Installing chocolatey") + chocoInstall := `"[System.Net.ServicePointManager]::SecurityProtocol = 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))"` + updateCtx, updateCancel := context.WithTimeout(ctx, 3*time.Minute) + defer updateCancel() + stdOut, errOut, err := sshRunPowershell(updateCtx, sshClient, chocoInstall) + if err != nil { + return fmt.Errorf("failed to install chocolatey: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + // reconnect to get updated environment variables (1 minute as it should be quick to reconnect) + err = sshClient.ReconnectWithTimeout(ctx, 1*time.Minute) + if err != nil { + return fmt.Errorf("failed to reconnect: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // install curl + logger.Logf("Installing curl") + stdOut, errOut, err = sshClient.Exec(ctx, "choco", []string{"install", "-y", "curl"}, nil) + if err != nil { + return fmt.Errorf("failed to install curl: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + // install make + logger.Logf("Installing make") + stdOut, errOut, err = sshClient.Exec(ctx, "choco", []string{"install", "-y", "make"}, nil) + if err != nil { + return fmt.Errorf("failed to install make: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // install golang (doesn't use choco, because sometimes it doesn't have the required version) + logger.Logf("Installing golang %s (%s)", goVersion, arch) + downloadURL := fmt.Sprintf("https://go.dev/dl/go%s.windows-%s.msi", goVersion, arch) + filename := path.Base(downloadURL) + stdOut, errOut, err = sshClient.Exec(ctx, "curl", []string{"-Ls", downloadURL, "--output", filename}, nil) + if err != nil { + return fmt.Errorf("failed to download go from %s with curl: %w (stdout: %s, stderr: %s)", downloadURL, err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "msiexec", []string{"/i", filename, "/qn"}, nil) + if err != nil { + return fmt.Errorf("failed to install go: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + // reconnect to get updated environment variables (1 minute as it should be quick to reconnect) + err = sshClient.ReconnectWithTimeout(ctx, 1*time.Minute) + if err != nil { + return fmt.Errorf("failed to reconnect: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + return nil +} + +// Copy places the required files on the host. +func (WindowsRunner) Copy(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, repoArchive string, builds []common.Build) error { + // copy the archive and extract it on the host (tar exists and can extract zip on windows) + logger.Logf("Copying repo") + destRepoName := filepath.Base(repoArchive) + err := sshClient.Copy(repoArchive, destRepoName) + if err != nil { + return fmt.Errorf("failed to SCP repo archive %s: %w", repoArchive, err) + } + + // ensure that agent directory is removed (possible it already exists if instance already used) + // Windows errors if the directory doesn't exist, it's okay if it doesn't ignore any error here + _, _, _ = sshClient.Exec(ctx, "rmdir", []string{"agent", "/s", "/q"}, nil) + + stdOut, errOut, err := sshClient.Exec(ctx, "mkdir", []string{"agent"}, nil) + if err != nil { + return fmt.Errorf("failed to mkdir agent: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + stdOut, errOut, err = sshClient.Exec(ctx, "tar", []string{"-xf", destRepoName, "-C", "agent"}, nil) + if err != nil { + return fmt.Errorf("failed to unzip %s to agent directory: %w (stdout: %s, stderr: %s)", destRepoName, err, stdOut, errOut) + } + + // install mage and prepare for testing + logger.Logf("Running make mage and prepareOnRemote") + stdOut, errOut, err = sshClient.Exec(ctx, "cd", []string{"agent", "&&", "make", "mage", "&&", "mage", "integration:prepareOnRemote"}, nil) + if err != nil { + return fmt.Errorf("failed to to perform make mage and prepareOnRemote: %w (stdout: %s, stderr: %s)", err, stdOut, errOut) + } + + // determine if the build needs to be replaced on the host + // if it already exists and the SHA512 are the same contents, then + // there is no reason to waste time uploading the build + for _, build := range builds { + copyBuild := true + localSHA512, err := os.ReadFile(build.SHA512Path) + if err != nil { + return fmt.Errorf("failed to read local SHA52 contents %s: %w", build.SHA512Path, err) + } + hostSHA512Path := filepath.Base(build.SHA512Path) + hostSHA512, err := sshClient.GetFileContents(ctx, hostSHA512Path, ssh.WithContentFetchCommand("type")) + if err == nil { + if string(localSHA512) == string(hostSHA512) { + logger.Logf("Skipping copy agent build %s; already the same", filepath.Base(build.Path)) + copyBuild = false + } + } + + if copyBuild { + // ensure the existing copies are removed first + toRemove := filepath.Base(build.Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "del", []string{toRemove, "/f", "/q"}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + toRemove = filepath.Base(build.SHA512Path) + stdOut, errOut, err = sshClient.Exec(ctx, + "del", []string{toRemove, "/f", "/q"}, nil) + if err != nil { + return fmt.Errorf("failed to remove %q: %w (stdout: %q, stderr: %q)", + toRemove, err, stdOut, errOut) + } + + logger.Logf("Copying agent build %s", filepath.Base(build.Path)) + } + + for _, buildPath := range []string{build.Path, build.SHA512Path} { + if copyBuild { + err = sshClient.Copy(buildPath, filepath.Base(buildPath)) + if err != nil { + return fmt.Errorf("failed to SCP build %s: %w", filepath.Base(buildPath), err) + } + } + insideAgentDir := filepath.Join("agent", buildPath) + // possible the build path already exists, 'mkdir' on windows will fail if it already exists + // error from this call is ignored because of it + _, _, _ = sshClient.Exec(ctx, "mkdir", []string{toWindowsPath(filepath.Dir(insideAgentDir))}, nil) + stdOut, errOut, err = sshClient.Exec(ctx, "mklink", []string{"/h", toWindowsPath(insideAgentDir), filepath.Base(buildPath)}, nil) + if err != nil { + return fmt.Errorf("failed to hard link %s to %s: %w (stdout: %s, stderr: %s)", filepath.Base(buildPath), toWindowsPath(insideAgentDir), err, stdOut, errOut) + } + } + } + + return nil +} + +// Run the test +func (WindowsRunner) Run(ctx context.Context, verbose bool, c ssh.SSHClient, logger common.Logger, agentVersion string, prefix string, batch define.Batch, env map[string]string) (common.OSRunnerResult, error) { + var tests []string + for _, pkg := range batch.Tests { + for _, test := range pkg.Tests { + tests = append(tests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + var sudoTests []string + for _, pkg := range batch.SudoTests { + for _, test := range pkg.Tests { + sudoTests = append(sudoTests, fmt.Sprintf("%s:%s", pkg.Name, test.Name)) + } + } + + var result common.OSRunnerResult + if len(tests) > 0 { + script := toPowershellScript(agentVersion, prefix, verbose, tests, env) + + results, err := runTestsOnWindows(ctx, logger, "non-sudo", prefix, script, c, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running non-sudo tests: %w", err) + } + result.Packages = results + } + + if len(sudoTests) > 0 { + prefix := fmt.Sprintf("%s-sudo", prefix) + script := toPowershellScript(agentVersion, prefix, verbose, sudoTests, env) + + results, err := runTestsOnWindows(ctx, logger, "sudo", prefix, script, c, batch.SudoTests) + if err != nil { + return common.OSRunnerResult{}, fmt.Errorf("error running sudo tests: %w", err) + } + result.SudoPackages = results + + } + return result, nil +} + +// Diagnostics gathers any diagnostics from the host. +func (WindowsRunner) Diagnostics(ctx context.Context, sshClient ssh.SSHClient, logger common.Logger, destination string) error { + diagnosticDir := "agent\\build\\diagnostics" + stdOut, _, err := sshClient.Exec(ctx, "dir", []string{diagnosticDir, "/b"}, nil) + if err != nil { + //nolint:nilerr // failed to list the directory, probably don't have any diagnostics (do nothing) + return nil + } + eachDiagnostic := strings.Split(string(stdOut), "\n") + for _, filename := range eachDiagnostic { + filename = strings.TrimSpace(filename) + if filename == "" { + continue + } + + // don't use filepath.Join as we need this to work in Linux/Darwin as well + // this is because if we use `filepath.Join` on a Linux/Darwin host connected to a Windows host + // it will use a `/` and that will be incorrect for Windows + fp := fmt.Sprintf("%s\\%s", diagnosticDir, filename) + // use filepath.Join on this path because it's a path on this specific host platform + dp := filepath.Join(destination, filename) + logger.Logf("Copying diagnostic %s", filename) + out, err := os.Create(dp) + if err != nil { + return fmt.Errorf("failed to create file %s: %w", dp, err) + } + err = sshClient.GetFileContentsOutput(ctx, fp, out, ssh.WithContentFetchCommand("type")) + _ = out.Close() + if err != nil { + return fmt.Errorf("failed to copy file from remote host to %s: %w", dp, err) + } + } + return nil +} + +func sshRunPowershell(ctx context.Context, sshClient ssh.SSHClient, cmd string) ([]byte, []byte, error) { + return sshClient.ExecWithRetry(ctx, "powershell", []string{ + "-NoProfile", + "-InputFormat", "None", + "-ExecutionPolicy", "Bypass", + "-Command", cmd, + }, 15*time.Second) +} + +func toPowershellScript(agentVersion string, prefix string, verbose bool, tests []string, env map[string]string) string { + var sb strings.Builder + for k, v := range env { + sb.WriteString("$env:") + sb.WriteString(k) + sb.WriteString("=\"") + sb.WriteString(v) + sb.WriteString("\"\n") + } + sb.WriteString("$env:AGENT_VERSION=\"") + sb.WriteString(agentVersion) + sb.WriteString("\"\n") + sb.WriteString("$env:TEST_DEFINE_PREFIX=\"") + sb.WriteString(prefix) + sb.WriteString("\"\n") + sb.WriteString("$env:TEST_DEFINE_TESTS=\"") + sb.WriteString(strings.Join(tests, ",")) + sb.WriteString("\"\n") + sb.WriteString("cd agent\n") + sb.WriteString("mage ") + if verbose { + sb.WriteString("-v ") + } + sb.WriteString("integration:testOnRemote\n") + return sb.String() +} + +func runTestsOnWindows(ctx context.Context, logger common.Logger, name string, prefix string, script string, sshClient ssh.SSHClient, tests []define.BatchPackageTests) ([]common.OSRunnerPackageResult, error) { + execTest := strings.NewReader(script) + + session, err := sshClient.NewSession() + if err != nil { + return nil, fmt.Errorf("failed to start session: %w", err) + } + + session.Stdout = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stdout): ", name)) + session.Stderr = common.NewPrefixOutput(logger, fmt.Sprintf("Test output (%s) (stderr): ", name)) + session.Stdin = execTest + // allowed to fail because tests might fail + logger.Logf("Running %s tests...", name) + err = session.Run("powershell -noprofile -noninteractive -") + if err != nil { + logger.Logf("%s tests failed: %s", name, err) + } + // this seems to always return an error + _ = session.Close() + + var result []common.OSRunnerPackageResult + // fetch the contents for each package + for _, pkg := range tests { + resultPkg, err := getWindowsRunnerPackageResult(ctx, sshClient, pkg, prefix) + if err != nil { + return nil, err + } + result = append(result, resultPkg) + } + return result, nil +} + +func toWindowsPath(path string) string { + return strings.ReplaceAll(path, "/", "\\") +} + +func getWindowsRunnerPackageResult(ctx context.Context, sshClient ssh.SSHClient, pkg define.BatchPackageTests, prefix string) (common.OSRunnerPackageResult, error) { + var err error + var resultPkg common.OSRunnerPackageResult + resultPkg.Name = pkg.Name + outputPath := fmt.Sprintf("%%home%%\\agent\\build\\TEST-go-remote-%s.%s", prefix, filepath.Base(pkg.Name)) + resultPkg.Output, err = sshClient.GetFileContents(ctx, outputPath+".out", ssh.WithContentFetchCommand("type")) + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out", outputPath) + } + resultPkg.JSONOutput, err = sshClient.GetFileContents(ctx, outputPath+".out.json", ssh.WithContentFetchCommand("type")) + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.out.json", outputPath) + } + resultPkg.XMLOutput, err = sshClient.GetFileContents(ctx, outputPath+".xml", ssh.WithContentFetchCommand("type")) + if err != nil { + return common.OSRunnerPackageResult{}, fmt.Errorf("failed to fetched test output at %s.xml", outputPath) + } + return resultPkg, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/utils/root_unix.go b/dev-tools/mage/target/srvrlesstest/utils/root_unix.go new file mode 100644 index 000000000000..a575ce57f1e4 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/utils/root_unix.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build !windows + +package utils + +import "os" + +const ( + // PermissionUser is the permission level the user needs to be. + PermissionUser = "root" +) + +// HasRoot returns true if the user has root permissions. +// Added extra `nil` value to return since the HasRoot for windows will return an error as well +func HasRoot() (bool, error) { + return os.Geteuid() == 0, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/utils/root_windows.go b/dev-tools/mage/target/srvrlesstest/utils/root_windows.go new file mode 100644 index 000000000000..1c9849fa6723 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/utils/root_windows.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build windows + +package utils + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +const ( + // PermissionUser is the permission level the user needs to be. + PermissionUser = "Administrator" +) + +// HasRoot returns true if the user has Administrator/SYSTEM permissions. +func HasRoot() (bool, error) { + var sid *windows.SID + // See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-checktokenmembership for more on the api + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false, fmt.Errorf("allocate sid error: %w", err) + } + defer func() { + _ = windows.FreeSid(sid) + }() + + token := windows.Token(0) + + member, err := token.IsMember(sid) + if err != nil { + return false, fmt.Errorf("token membership error: %w", err) + } + + return member, nil +} diff --git a/dev-tools/mage/target/srvrlesstest/utils/root_windows_test.go b/dev-tools/mage/target/srvrlesstest/utils/root_windows_test.go new file mode 100644 index 000000000000..1e7b6820d060 --- /dev/null +++ b/dev-tools/mage/target/srvrlesstest/utils/root_windows_test.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build windows + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHasRoot(t *testing.T) { + t.Run("check if user is admin", func(t *testing.T) { + _, err := HasRoot() + assert.NoError(t, err) + }) +} diff --git a/go.mod b/go.mod index 3e2fe304b676..e49e30e30c5c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/elastic/beats/v7 -go 1.22.0 +go 1.22.3 + +toolchain go1.22.7 require ( cloud.google.com/go/bigquery v1.62.0 @@ -155,9 +157,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 gotest.tools/gotestsum v1.7.0 howett.net/plist v1.0.1 - k8s.io/api v0.29.5 - k8s.io/apimachinery v0.29.5 - k8s.io/client-go v0.29.5 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 kernel.org/pub/linux/libs/security/libcap/cap v1.2.57 ) @@ -204,7 +206,7 @@ require ( github.com/go-ldap/ldap/v3 v3.4.6 github.com/gofrs/uuid/v5 v5.2.0 github.com/golang-jwt/jwt/v5 v5.2.1 - github.com/google/cel-go v0.19.0 + github.com/google/cel-go v0.20.1 github.com/googleapis/gax-go/v2 v2.13.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 @@ -290,8 +292,10 @@ require ( github.com/elazarl/goproxy/ext v0.0.0-20240909085733-6741dbfc16a1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -349,7 +353,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/iochan v1.0.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -370,6 +374,7 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect @@ -392,12 +397,14 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect kernel.org/pub/linux/libs/security/libcap/psx v1.2.57 // indirect mvdan.cc/garble v0.12.1 // indirect + sigs.k8s.io/controller-runtime v0.19.0 // indirect + sigs.k8s.io/e2e-framework v0.5.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) require ( diff --git a/go.sum b/go.sum index e27981da519d..9e47ac355978 100644 --- a/go.sum +++ b/go.sum @@ -409,6 +409,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= @@ -427,6 +429,8 @@ github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-faker/faker/v4 v4.2.0 h1:dGebOupKwssrODV51E0zbMrv5e2gO9VWSLNC1WDCpWg= @@ -524,6 +528,7 @@ github.com/gomodule/redigo v1.8.3 h1:HR0kYDX2RJZvAup8CsiJwxB4dTCSC0AaUq6S4SiLwUc github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/cel-go v0.19.0 h1:vVgaZoHPBDd1lXCYGQOh5A06L4EtuIfmqQ/qnSXSKiU= github.com/google/cel-go v0.19.0/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -731,6 +736,8 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= @@ -892,6 +899,8 @@ github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvV github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.39.0 h1:soLZ08Q2zvjRSinNup8xVlw0KDDCJPPA1rIDmBhi7As= github.com/vmware/govmomi v0.39.0/go.mod h1:oHzAQ1r6152zYDGcUqeK+EO8LhKo5wjtvWZBGHws2Hc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -1269,16 +1278,24 @@ howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.29.5 h1:levS+umUigHCfI3riD36pMY1vQEbrzh4r1ivVWAhHaI= k8s.io/api v0.29.5/go.mod h1:7b18TtPcJzdjk7w5zWyIHgoAtpGeRvGGASxlS7UZXdQ= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= k8s.io/apimachinery v0.29.5 h1:Hofa2BmPfpoT+IyDTlcPdCHSnHtEQMoJYGVoQpRTfv4= k8s.io/apimachinery v0.29.5/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.29.5 h1:nlASXmPQy190qTteaVP31g3c/wi2kycznkTP7Sv1zPc= k8s.io/client-go v0.29.5/go.mod h1:aY5CnqUUvXYccJhm47XHoPcRyX6vouHdIBHaKZGTbK4= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kernel.org/pub/linux/libs/security/libcap/cap v1.2.57 h1:2nmqI+aw7EQZuelYktkQHBE4jESD2tOR+lOJEnv/Apo= kernel.org/pub/linux/libs/security/libcap/cap v1.2.57/go.mod h1:uI99C3r4SXvJeuqoEtx/eWt7UbmfqqZ80H8q+9t/A7I= kernel.org/pub/linux/libs/security/libcap/psx v1.2.57 h1:NOFATXSf5z/cMR3HIwQ3Xrd3nwnWl5xThmNr5U/F0pI= @@ -1287,9 +1304,15 @@ mvdan.cc/garble v0.12.1 h1:GyKeyqr4FKhWz12ZD9kKT9VnDqFILVYxgmAE8RKd3x8= mvdan.cc/garble v0.12.1/go.mod h1:rJ4GvtUEuVCRAYQkpd1iG6bolz9NEnkk0iu6gdTwWqA= nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/e2e-framework v0.5.0 h1:YLhk8R7EHuTFQAe6Fxy5eBzn5Vb+yamR5u8MH1Rq3cE= +sigs.k8s.io/e2e-framework v0.5.0/go.mod h1:jJSH8u2RNmruekUZgHAtmRjb5Wj67GErli9UjLSY7Zc= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/x-pack/agentbeat/magefile.go b/x-pack/agentbeat/magefile.go index bd72a558ba39..52f7581ba69f 100644 --- a/x-pack/agentbeat/magefile.go +++ b/x-pack/agentbeat/magefile.go @@ -9,11 +9,9 @@ package main import ( "context" "fmt" - "log" + "github.com/elastic/beats/v7/dev-tools/mage/target/srvrlesstest" "os" - "os/exec" "path/filepath" - "strings" "time" "github.com/magefile/mage/sh" @@ -217,78 +215,8 @@ func PythonIntegTest(ctx context.Context) error { return devtools.PythonIntegTestFromHost(devtools.DefaultPythonTestIntegrationFromHostArgs()) } -// TestWithSpec executes unique commands from agentbeat.spec.yml and validates that app haven't exited with non-zero -func TestWithSpec(ctx context.Context) { - specPath := os.Getenv("AGENTBEAT_SPEC") - if specPath == "" { - log.Fatal("AGENTBEAT_SPEC is not defined\n") - } - - platform := os.Getenv("PLATFORM") - if platform == "" { - log.Fatal("PLATFORM is not defined\n") - } - - var commands = devtools.SpecCommands(specPath, platform) - - agentbeatPath := os.Getenv("AGENTBEAT_PATH") - - cmdResults := make(map[string]bool) - - for _, command := range commands { - cmdResults[command] = runCmd(agentbeatPath, strings.Split(command, " ")) - } - - hasFailures := false - for cmd, res := range cmdResults { - if res { - fmt.Printf("--- :large_green_circle: Succeeded: [%s.10s...]\n", cmd) - } else { - fmt.Printf("--- :bangbang: Failed: [%s.10s...]\n", cmd) - hasFailures = true - } - } - - if hasFailures { - fmt.Printf("Some inputs failed. Exiting with error\n") - os.Exit(1) - } -} - -func runCmd(agentbeatPath string, command []string) bool { - cmd := exec.Command(agentbeatPath, command...) - fmt.Printf("Executing: %s\n", cmd.String()) - - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin - - if err := cmd.Start(); err != nil { - fmt.Printf("failed to start command: %v\n", err) - } - - defer func() { - if err := cmd.Process.Kill(); err != nil { - fmt.Printf("failed to kill process: %v\n", err) - } else { - fmt.Print("command process killed\n") - } - }() - - done := make(chan error, 1) - go func() { - done <- cmd.Wait() - }() - timeout := 2 * time.Second - deadline := time.After(timeout) - - select { - case err := <-done: - fmt.Printf("command exited before %s: %v\n", timeout.String(), err) - return false - - case <-deadline: - fmt.Printf("%s\n", cmd.Stdout) - return true - } +// ServerlessTest starts serverless integration tests +func ServerlessTest(ctx context.Context) error { + mg.Deps(devtools.TestBeatServerless) + return srvrlesstest.IntegRunner(ctx, false, "TestBeatsServerless") } diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index c00099c36670..cefe6c23d602 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -1,1513 +1,1513 @@ -######################## Filebeat Configuration ############################ - -# This file is a full configuration example documenting all non-deprecated -# options in comments. For a shorter configuration example, that contains only -# the most common options, please see filebeat.yml in the same directory. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/filebeat/index.html - +######################## Filebeat Configuration ############################ + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see filebeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + #========================== Modules configuration ============================= filebeat.modules: #-------------------------------- System Module -------------------------------- -#- module: system - # Syslog - #syslog: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # Input configuration (advanced). - # Any input configuration option - # can be added under this section. - #input: - - # Authorization logs - #auth: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # A list of tags to include in events. Including 'forwarded' - # indicates that the events did not originate on this host and - # causes host.name to not be added to events. Include - # 'preserve_orginal_event' causes the pipeline to retain the raw log - # in event.original. Defaults to []. - #var.tags: [] - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: +#- module: system + # Syslog + #syslog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # Input configuration (advanced). + # Any input configuration option + # can be added under this section. + #input: + + # Authorization logs + #auth: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Force using journald to collect system logs + #var.use_journald: true|false + + # Force using log files to collect system logs + #var.use_files: true|false + + # If use_journald and use_files are false, then + # Filebeat will autodetect whether use to journald + # to collect system logs. + + # A list of tags to include in events. Including 'forwarded' + # indicates that the events did not originate on this host and + # causes host.name to not be added to events. Include + # 'preserve_orginal_event' causes the pipeline to retain the raw log + # in event.original. Defaults to []. + #var.tags: [] + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: #------------------------------- ActiveMQ Module ------------------------------- -- module: activemq - # Audit logs - audit: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Application logs - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: +- module: activemq + # Audit logs + audit: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Application logs + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: #-------------------------------- Apache Module -------------------------------- -#- module: apache - # Access logs - #access: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - - # Error logs - #error: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: +#- module: apache + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: #-------------------------------- Auditd Module -------------------------------- -#- module: auditd - #log: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: +#- module: auditd + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: #--------------------------------- AWS Module --------------------------------- -- module: aws - cloudtrail: - enabled: false - - # AWS SQS queue url - #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue - - # AWS S3 bucket arn - #var.bucket_arn: 'arn:aws:s3:::mybucket' - - # AWS S3 list prefix - #var.bucket_list_prefix: 'prefix' - - # Bucket list interval on S3 bucket - #var.bucket_list_interval: 300s - - # Number of workers on S3 bucket or SQS queue - #var.number_of_workers: 5 - - # Process CloudTrail logs - # default is true, set to false to skip Cloudtrail logs - # var.process_cloudtrail_logs: false - - # Process CloudTrail Digest logs - # default true, set to false to skip CloudTrail Digest logs - # var.process_digest_logs: false - - # Process CloudTrail Insight logs - # default true, set to false to skip CloudTrail Insight logs - # var.process_insight_logs: false - - # Filename of AWS credential file - # If not set "$HOME/.aws/credentials" is used on Linux/Mac - # "%UserProfile%\.aws\credentials" is used on Windows - #var.shared_credential_file: /etc/filebeat/aws_credentials +- module: aws + cloudtrail: + enabled: false + + # AWS SQS queue url + #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue + + # AWS S3 bucket arn + #var.bucket_arn: 'arn:aws:s3:::mybucket' + + # AWS S3 list prefix + #var.bucket_list_prefix: 'prefix' + + # Bucket list interval on S3 bucket + #var.bucket_list_interval: 300s + + # Number of workers on S3 bucket or SQS queue + #var.number_of_workers: 5 + + # Process CloudTrail logs + # default is true, set to false to skip Cloudtrail logs + # var.process_cloudtrail_logs: false + + # Process CloudTrail Digest logs + # default true, set to false to skip CloudTrail Digest logs + # var.process_digest_logs: false + + # Process CloudTrail Insight logs + # default true, set to false to skip CloudTrail Insight logs + # var.process_insight_logs: false + + # Filename of AWS credential file + # If not set "$HOME/.aws/credentials" is used on Linux/Mac + # "%UserProfile%\.aws\credentials" is used on Windows + #var.shared_credential_file: /etc/filebeat/aws_credentials + + # Profile name for aws credential + # If not set the default profile is used + #var.credential_profile_name: fb-aws + + # Use access_key_id, secret_access_key and/or session_token instead of shared credential file + #var.access_key_id: access_key_id + #var.secret_access_key: secret_access_key + #var.session_token: session_token + + # The duration that the received messages are hidden from ReceiveMessage request + # Default to be 300s + #var.visibility_timeout: 300s + + # Maximum duration before AWS API request will be interrupted + # Default to be 120s + #var.api_timeout: 120s + + # Custom endpoint used to access AWS APIs + #var.endpoint: amazonaws.com + + # Default region to query if no other region is set + #var.default_region: us-east-1 + + # AWS IAM Role to assume + #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + + cloudwatch: + enabled: false + + # AWS SQS queue url + #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue + + # AWS S3 bucket arn + #var.bucket_arn: 'arn:aws:s3:::mybucket' + + # AWS S3 list prefix + #var.bucket_list_prefix: 'prefix' + + # Bucket list interval on S3 bucket + #var.bucket_list_interval: 300s + + # Number of workers on S3 bucket or SQS queue + #var.number_of_workers: 5 + + # Filename of AWS credential file + # If not set "$HOME/.aws/credentials" is used on Linux/Mac + # "%UserProfile%\.aws\credentials" is used on Windows + #var.shared_credential_file: /etc/filebeat/aws_credentials + + # Profile name for aws credential + # If not set the default profile is used + #var.credential_profile_name: fb-aws + + # Use access_key_id, secret_access_key and/or session_token instead of shared credential file + #var.access_key_id: access_key_id + #var.secret_access_key: secret_access_key + #var.session_token: session_token + + # The duration that the received messages are hidden from ReceiveMessage request + # Default to be 300s + #var.visibility_timeout: 300s + + # Maximum duration before AWS API request will be interrupted + # Default to be 120s + #var.api_timeout: 120s + + # Custom endpoint used to access AWS APIs + #var.endpoint: amazonaws.com + + # Default region to query if no other region is set + #var.default_region: us-east-1 + + # AWS IAM Role to assume + #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + + ec2: + enabled: false + + # AWS SQS queue url + #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue + + # AWS S3 bucket arn + #var.bucket_arn: 'arn:aws:s3:::mybucket' + + # AWS S3 list prefix + #var.bucket_list_prefix: 'prefix' + + # Bucket list interval on S3 bucket + #var.bucket_list_interval: 300s + + # Number of workers on S3 bucket or SQS queue + #var.number_of_workers: 5 + + # Filename of AWS credential file + # If not set "$HOME/.aws/credentials" is used on Linux/Mac + # "%UserProfile%\.aws\credentials" is used on Windows + #var.shared_credential_file: /etc/filebeat/aws_credentials + + # Profile name for aws credential + # If not set the default profile is used + #var.credential_profile_name: fb-aws + + # Use access_key_id, secret_access_key and/or session_token instead of shared credential file + #var.access_key_id: access_key_id + #var.secret_access_key: secret_access_key + #var.session_token: session_token + + # The duration that the received messages are hidden from ReceiveMessage request + # Default to be 300s + #var.visibility_timeout: 300s + + # Maximum duration before AWS API request will be interrupted + # Default to be 120s + #var.api_timeout: 120s + + # Custom endpoint used to access AWS APIs + #var.endpoint: amazonaws.com + + # Default region to query if no other region is set + #var.default_region: us-east-1 + + # AWS IAM Role to assume + #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + + elb: + enabled: false + + # AWS SQS queue url + #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue + + # AWS S3 bucket arn + #var.bucket_arn: 'arn:aws:s3:::mybucket' + + # AWS S3 list prefix + #var.bucket_list_prefix: 'prefix' + + # Bucket list interval on S3 bucket + #var.bucket_list_interval: 300s + + # Number of workers on S3 bucket or SQS queue + #var.number_of_workers: 5 + + # Filename of AWS credential file + # If not set "$HOME/.aws/credentials" is used on Linux/Mac + # "%UserProfile%\.aws\credentials" is used on Windows + #var.shared_credential_file: /etc/filebeat/aws_credentials + + # Profile name for aws credential + # If not set the default profile is used + #var.credential_profile_name: fb-aws + + # Use access_key_id, secret_access_key and/or session_token instead of shared credential file + #var.access_key_id: access_key_id + #var.secret_access_key: secret_access_key + #var.session_token: session_token + + # The duration that the received messages are hidden from ReceiveMessage request + # Default to be 300s + #var.visibility_timeout: 300s + + # Maximum duration before AWS API request will be interrupted + # Default to be 120s + #var.api_timeout: 120s + + # Custom endpoint used to access AWS APIs + #var.endpoint: amazonaws.com + + # Default region to query if no other region is set + #var.default_region: us-east-1 + + # AWS IAM Role to assume + #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + + s3access: + enabled: false + + # AWS SQS queue url + #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue + + # AWS S3 bucket arn + #var.bucket_arn: 'arn:aws:s3:::mybucket' + + # AWS S3 list prefix + #var.bucket_list_prefix: 'prefix' + + # Bucket list interval on S3 bucket + #var.bucket_list_interval: 300s + + # Number of workers on S3 bucket or SQS queue + #var.number_of_workers: 5 + + # Filename of AWS credential file + # If not set "$HOME/.aws/credentials" is used on Linux/Mac + # "%UserProfile%\.aws\credentials" is used on Windows + #var.shared_credential_file: /etc/filebeat/aws_credentials + + # Profile name for aws credential + # If not set the default profile is used + #var.credential_profile_name: fb-aws + + # Use access_key_id, secret_access_key and/or session_token instead of shared credential file + #var.access_key_id: access_key_id + #var.secret_access_key: secret_access_key + #var.session_token: session_token + + # The duration that the received messages are hidden from ReceiveMessage request + # Default to be 300s + #var.visibility_timeout: 300s + + # Maximum duration before AWS API request will be interrupted + # Default to be 120s + #var.api_timeout: 120s + + # Custom endpoint used to access AWS APIs + #var.endpoint: amazonaws.com + + # Default region to query if no other region is set + #var.default_region: us-east-1 + + # AWS IAM Role to assume + #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + + vpcflow: + enabled: false + + # AWS SQS queue url + #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue + + # AWS S3 bucket arn + #var.bucket_arn: 'arn:aws:s3:::mybucket' + + # AWS S3 list prefix + #var.bucket_list_prefix: 'prefix' + + # Bucket list interval on S3 bucket + #var.bucket_list_interval: 300s + + # Number of workers on S3 bucket or SQS queue + #var.number_of_workers: 5 + + # Filename of AWS credential file + # If not set "$HOME/.aws/credentials" is used on Linux/Mac + # "%UserProfile%\.aws\credentials" is used on Windows + #var.shared_credential_file: /etc/filebeat/aws_credentials + + # Profile name for aws credential + # If not set the default profile is used + #var.credential_profile_name: fb-aws + + # Use access_key_id, secret_access_key and/or session_token instead of shared credential file + #var.access_key_id: access_key_id + #var.secret_access_key: secret_access_key + #var.session_token: session_token + + # The duration that the received messages are hidden from ReceiveMessage request + # Default to be 300s + #var.visibility_timeout: 300s + + # Maximum duration before AWS API request will be interrupted + # Default to be 120s + #var.api_timeout: 120s + + # Custom endpoint used to access AWS APIs + #var.endpoint: amazonaws.com + + # Default region to query if no other region is set + #var.default_region: us-east-1 + + # AWS IAM Role to assume + #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + + # Specify a custom VPC flow log format. + #var.format: - # Profile name for aws credential - # If not set the default profile is used - #var.credential_profile_name: fb-aws +#----------------------------- AWS Fargate Module ----------------------------- +- module: awsfargate + log: + enabled: false + + # Filename of AWS credential file + # If not set "$HOME/.aws/credentials" is used on Linux/Mac + # "%UserProfile%\.aws\credentials" is used on Windows + #var.shared_credential_file: /etc/filebeat/aws_credentials + + # Profile name for aws credential + # If not set the default profile is used + #var.credential_profile_name: fb-aws + + # Use access_key_id, secret_access_key and/or session_token instead of shared credential file + #var.access_key_id: access_key_id + #var.secret_access_key: secret_access_key + #var.session_token: session_token + + # Maximum duration before AWS API request will be interrupted + # Default to be 120s + #var.api_timeout: 120s + + # Custom endpoint used to access AWS APIs + #var.endpoint: amazonaws.com + + # AWS IAM Role to assume + #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # ARN of the log group to collect logs from + #var.log_group_arn: arn:aws:logs:us-east-1:123456789012:log-group:test:* + + # Name of the log group to collect logs from + #var.log_group_name: /ecs/test-log-group + + # Region that the specified log group belongs to + #var.region_name: us-east-1 + + # A list of strings of log streams names that Filebeat collect log events from + #var.log_streams: ["/ecs/test-log-group/test-log-stream"] + + # A string to filter the results to include only log events from log streams that have names starting with this prefix + #var.log_stream_prefix: /ecs/test-log-group/ + + # Specify if Filebeat should read log files from the beginning or from the end + # Default start_position is beginning + #var.start_position: beginning + + # How often Filebeat checks for new log events from the specified log group + # Default scan_frequency is 1 minute + #var.scan_frequency: 1m + + # Time used to sleep between AWS FilterLogEvents API calls inside the same collection period + # Default api_sleep is 200 ms + #var.api_sleep: 200ms + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: - # Use access_key_id, secret_access_key and/or session_token instead of shared credential file - #var.access_key_id: access_key_id - #var.secret_access_key: secret_access_key - #var.session_token: session_token +#-------------------------------- Azure Module -------------------------------- +- module: azure + # All logs + activitylogs: + enabled: false + var: + # eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub + eventhub: "insights-operational-logs" + # consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module + consumer_group: "$Default" + # the connection string required to communicate with Event Hubs, steps to generate one here https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string + connection_string: "" + # the name of the storage account the state/offsets will be stored and updated + storage_account: "" + # the name of the storage account container you would like to store the offset information in. + storage_account_container: "" + # the storage account key, this key will be used to authorize access to data in your storage account + storage_account_key: "" + + platformlogs: + enabled: false + # var: + # eventhub: "" + # consumer_group: "$Default" + # connection_string: "" + # storage_account: "" + # storage_account_key: "" + + + auditlogs: + enabled: false + # var: + # eventhub: "insights-logs-auditlogs" + # consumer_group: "$Default" + # connection_string: "" + # storage_account: "" + # storage_account_key: "" + signinlogs: + enabled: false + # var: + # eventhub: "insights-logs-signinlogs" + # consumer_group: "$Default" + # connection_string: "" + # storage_account: "" + # storage_account_key: "" - # The duration that the received messages are hidden from ReceiveMessage request - # Default to be 300s - #var.visibility_timeout: 300s +#--------------------------------- CEF Module --------------------------------- +- module: cef + log: + enabled: false + var: + syslog_host: localhost + syslog_port: 9003 + + # Set internal security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.internal_zones: [ "Internal" ] + + # Set external security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.external_zones: [ "External" ] - # Maximum duration before AWS API request will be interrupted - # Default to be 120s - #var.api_timeout: 120s +#------------------------------ Checkpoint Module ------------------------------ +- module: checkpoint + firewall: + enabled: false + + # Set which input to use between syslog (default) or file. + #var.input: syslog + + # The interface to listen to UDP based syslog traffic. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.syslog_host: localhost + + # The UDP port to listen for syslog traffic. Defaults to 9001. + #var.syslog_port: 9001 + + # Set internal security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.internal_zones: [ "Internal" ] + + # Set external security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.external_zones: [ "External" ] + + # IANA time zone or time offset (e.g. `+0200`) to use when interpreting syslog + # timestamps without a time zone. + #var.timezone_offset: UTC - # Custom endpoint used to access AWS APIs - #var.endpoint: amazonaws.com +#-------------------------------- Cisco Module -------------------------------- +- module: cisco + asa: + enabled: false + + # Set which input to use between udp (default), tcp or file. + #var.input: udp + + # The interface to listen to udp or tcp syslog traffic. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.syslog_host: localhost + + # The port to listen for udp or tcp syslog traffic. Defaults to 9001. + #var.syslog_port: 9001 + + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + + # Set the log level from 1 (alerts only) to 7 (include all messages). + # Messages with a log level higher than the specified will be dropped. + # See https://www.cisco.com/c/en/us/td/docs/security/asa/syslog/b_syslog/syslogs-sev-level.html + #var.log_level: 7 + + # Set internal security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.internal_zones: [ "Internal" ] + + # Set external security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.external_zones: [ "External" ] + + # IANA time zone or time offset (e.g. `+0200`) to use when interpreting syslog + # timestamps without a time zone. + #var.timezone_offset: UTC + + ftd: + enabled: false + + # Set which input to use between udp (default), tcp or file. + #var.input: udp + + # The interface to listen to tcp or udp syslog traffic. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.syslog_host: localhost + + # The UDP port to listen for tcp or udp syslog traffic. Defaults to 9003. + #var.syslog_port: 9003 + + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + + # Set the log level from 1 (alerts only) to 7 (include all messages). + # Messages with a log level higher than the specified will be dropped. + # See https://www.cisco.com/c/en/us/td/docs/security/firepower/Syslogs/b_fptd_syslog_guide/syslogs-sev-level.html + #var.log_level: 7 + + # Set internal security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.internal_zones: [ "Internal" ] + + # Set external security zones. used to override parsed network.direction + # based on zone egress and ingress + #var.external_zones: [ "External" ] + + # IANA time zone or time offset (e.g. `+0200`) to use when interpreting syslog + # timestamps without a time zone. + #var.timezone_offset: UTC + + ios: + enabled: false + + # Set which input to use between syslog (default) or file. + #var.input: syslog + + # The interface to listen to syslog traffic. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.syslog_host: localhost + + # The port to listen on for syslog traffic. Defaults to 9002. + #var.syslog_port: 9002 + + # Set which protocol to use between udp (default) or tcp. + #var.syslog_protocol: udp + + # Set custom paths for the log files when using file input. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + umbrella: + enabled: false + + #var.input: aws-s3 + # AWS SQS queue url + #var.queue_url: https://sqs.us-east-1.amazonaws.com/ID/CiscoQueue + # Access ID to authenticate with the S3 input + #var.access_key_id: 123456 + # Access key to authenticate with the S3 input + #var.secret_access_key: PASSWORD + # The duration that the received messages are hidden from ReceiveMessage request + #var.visibility_timeout: 300s + # Maximum duration before AWS API request will be interrupted + #var.api_timeout: 120s + + amp: + enabled: false + + # Set which input to use between httpjson (default) or file. + #var.input: httpjson + + # The API URL + #var.url: https://api.amp.cisco.com/v1/events + # The client ID used as a username for the API requests. + #var.client_id: + # The API key related to the client ID. + #var.api_key: + # How far to look back the first time the module is started. Expects an amount of hours. + #var.first_interval: 24h + # Overriding the default request timeout, optional. + #var.request_timeout: 60s - # Default region to query if no other region is set - #var.default_region: us-east-1 +#------------------------------- Coredns Module ------------------------------- +- module: coredns + # Fileset for native deployment + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # AWS IAM Role to assume - #var.role_arn: arn:aws:iam::123456789012:role/test-mb +#----------------------------- Crowdstrike Module ----------------------------- +- module: crowdstrike + + falcon: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. - #var.fips_enabled: false +#----------------------------- CyberArk PAS Module ----------------------------- +- module: cyberarkpas + audit: + enabled: false + + # Set which input to use between tcp (default), udp, or file. + # + # var.input: tcp + + # var.syslog_host: localhost + # var.syslog_port: 9301 + + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + + # Uncoment to keep the original syslog event under event.original. + # var.preserve_original_event: true + + # Set paths for the log files when file input is used. + # var.paths: + - # URL to proxy AWS API calls - #var.proxy_url: http://proxy:3128 - - # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... - #var.ssl: - - cloudwatch: - enabled: false - - # AWS SQS queue url - #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue - - # AWS S3 bucket arn - #var.bucket_arn: 'arn:aws:s3:::mybucket' - - # AWS S3 list prefix - #var.bucket_list_prefix: 'prefix' - - # Bucket list interval on S3 bucket - #var.bucket_list_interval: 300s - - # Number of workers on S3 bucket or SQS queue - #var.number_of_workers: 5 - - # Filename of AWS credential file - # If not set "$HOME/.aws/credentials" is used on Linux/Mac - # "%UserProfile%\.aws\credentials" is used on Windows - #var.shared_credential_file: /etc/filebeat/aws_credentials - - # Profile name for aws credential - # If not set the default profile is used - #var.credential_profile_name: fb-aws - - # Use access_key_id, secret_access_key and/or session_token instead of shared credential file - #var.access_key_id: access_key_id - #var.secret_access_key: secret_access_key - #var.session_token: session_token - - # The duration that the received messages are hidden from ReceiveMessage request - # Default to be 300s - #var.visibility_timeout: 300s - - # Maximum duration before AWS API request will be interrupted - # Default to be 120s - #var.api_timeout: 120s - - # Custom endpoint used to access AWS APIs - #var.endpoint: amazonaws.com - - # Default region to query if no other region is set - #var.default_region: us-east-1 - - # AWS IAM Role to assume - #var.role_arn: arn:aws:iam::123456789012:role/test-mb - - # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. - #var.fips_enabled: false - - # URL to proxy AWS API calls - #var.proxy_url: http://proxy:3128 - - # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... - #var.ssl: - - ec2: - enabled: false - - # AWS SQS queue url - #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue - - # AWS S3 bucket arn - #var.bucket_arn: 'arn:aws:s3:::mybucket' - - # AWS S3 list prefix - #var.bucket_list_prefix: 'prefix' - - # Bucket list interval on S3 bucket - #var.bucket_list_interval: 300s - - # Number of workers on S3 bucket or SQS queue - #var.number_of_workers: 5 - - # Filename of AWS credential file - # If not set "$HOME/.aws/credentials" is used on Linux/Mac - # "%UserProfile%\.aws\credentials" is used on Windows - #var.shared_credential_file: /etc/filebeat/aws_credentials - - # Profile name for aws credential - # If not set the default profile is used - #var.credential_profile_name: fb-aws - - # Use access_key_id, secret_access_key and/or session_token instead of shared credential file - #var.access_key_id: access_key_id - #var.secret_access_key: secret_access_key - #var.session_token: session_token - - # The duration that the received messages are hidden from ReceiveMessage request - # Default to be 300s - #var.visibility_timeout: 300s - - # Maximum duration before AWS API request will be interrupted - # Default to be 120s - #var.api_timeout: 120s - - # Custom endpoint used to access AWS APIs - #var.endpoint: amazonaws.com - - # Default region to query if no other region is set - #var.default_region: us-east-1 - - # AWS IAM Role to assume - #var.role_arn: arn:aws:iam::123456789012:role/test-mb - - # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. - #var.fips_enabled: false - - # URL to proxy AWS API calls - #var.proxy_url: http://proxy:3128 - - # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... - #var.ssl: - - elb: - enabled: false - - # AWS SQS queue url - #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue - - # AWS S3 bucket arn - #var.bucket_arn: 'arn:aws:s3:::mybucket' - - # AWS S3 list prefix - #var.bucket_list_prefix: 'prefix' - - # Bucket list interval on S3 bucket - #var.bucket_list_interval: 300s - - # Number of workers on S3 bucket or SQS queue - #var.number_of_workers: 5 - - # Filename of AWS credential file - # If not set "$HOME/.aws/credentials" is used on Linux/Mac - # "%UserProfile%\.aws\credentials" is used on Windows - #var.shared_credential_file: /etc/filebeat/aws_credentials - - # Profile name for aws credential - # If not set the default profile is used - #var.credential_profile_name: fb-aws - - # Use access_key_id, secret_access_key and/or session_token instead of shared credential file - #var.access_key_id: access_key_id - #var.secret_access_key: secret_access_key - #var.session_token: session_token - - # The duration that the received messages are hidden from ReceiveMessage request - # Default to be 300s - #var.visibility_timeout: 300s - - # Maximum duration before AWS API request will be interrupted - # Default to be 120s - #var.api_timeout: 120s - - # Custom endpoint used to access AWS APIs - #var.endpoint: amazonaws.com - - # Default region to query if no other region is set - #var.default_region: us-east-1 - - # AWS IAM Role to assume - #var.role_arn: arn:aws:iam::123456789012:role/test-mb - - # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. - #var.fips_enabled: false - - # URL to proxy AWS API calls - #var.proxy_url: http://proxy:3128 - - # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... - #var.ssl: - - s3access: - enabled: false - - # AWS SQS queue url - #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue - - # AWS S3 bucket arn - #var.bucket_arn: 'arn:aws:s3:::mybucket' - - # AWS S3 list prefix - #var.bucket_list_prefix: 'prefix' - - # Bucket list interval on S3 bucket - #var.bucket_list_interval: 300s - - # Number of workers on S3 bucket or SQS queue - #var.number_of_workers: 5 - - # Filename of AWS credential file - # If not set "$HOME/.aws/credentials" is used on Linux/Mac - # "%UserProfile%\.aws\credentials" is used on Windows - #var.shared_credential_file: /etc/filebeat/aws_credentials - - # Profile name for aws credential - # If not set the default profile is used - #var.credential_profile_name: fb-aws - - # Use access_key_id, secret_access_key and/or session_token instead of shared credential file - #var.access_key_id: access_key_id - #var.secret_access_key: secret_access_key - #var.session_token: session_token - - # The duration that the received messages are hidden from ReceiveMessage request - # Default to be 300s - #var.visibility_timeout: 300s - - # Maximum duration before AWS API request will be interrupted - # Default to be 120s - #var.api_timeout: 120s - - # Custom endpoint used to access AWS APIs - #var.endpoint: amazonaws.com - - # Default region to query if no other region is set - #var.default_region: us-east-1 - - # AWS IAM Role to assume - #var.role_arn: arn:aws:iam::123456789012:role/test-mb - - # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. - #var.fips_enabled: false - - # URL to proxy AWS API calls - #var.proxy_url: http://proxy:3128 - - # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... - #var.ssl: - - vpcflow: - enabled: false - - # AWS SQS queue url - #var.queue_url: https://sqs.myregion.amazonaws.com/123456/myqueue - - # AWS S3 bucket arn - #var.bucket_arn: 'arn:aws:s3:::mybucket' - - # AWS S3 list prefix - #var.bucket_list_prefix: 'prefix' - - # Bucket list interval on S3 bucket - #var.bucket_list_interval: 300s - - # Number of workers on S3 bucket or SQS queue - #var.number_of_workers: 5 - - # Filename of AWS credential file - # If not set "$HOME/.aws/credentials" is used on Linux/Mac - # "%UserProfile%\.aws\credentials" is used on Windows - #var.shared_credential_file: /etc/filebeat/aws_credentials +#---------------------------- Elasticsearch Module ---------------------------- +- module: elasticsearch + # Server log + server: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + gc: + enabled: false + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + audit: + enabled: false + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + slowlog: + enabled: false + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + deprecation: + enabled: false + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Profile name for aws credential - # If not set the default profile is used - #var.credential_profile_name: fb-aws +#------------------------------ Envoyproxy Module ------------------------------ +- module: envoyproxy + # Fileset for native deployment + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Use access_key_id, secret_access_key and/or session_token instead of shared credential file - #var.access_key_id: access_key_id - #var.secret_access_key: secret_access_key - #var.session_token: session_token +#------------------------------- Fortinet Module ------------------------------- +- module: fortinet + firewall: + enabled: false + + # Set which input to use between tcp, udp (default) or file. + #var.input: udp + + # The interface to listen to syslog traffic. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.syslog_host: localhost + + # The port to listen for syslog traffic. Defaults to 9004. + #var.syslog_port: 9004 + + # Set internal interfaces. used to override parsed network.direction + # based on a tagged interface. Both internal and external interfaces must be + # set to leverage this functionality. + #var.internal_interfaces: [ "LAN" ] + + # Set external interfaces. used to override parsed network.direction + # based on a tagged interface. Both internal and external interfaces must be + # set to leverage this functionality. + #var.external_interfaces: [ "WAN" ] + + # List of internal networks. Supports IPv4 and IPv6 addresses and ranges in CIDR notation. + # Also supports the named ranges listed + # [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/network-direction-processor.html#supported-named-network-ranges). + # This can't be used in combination with internal_interfaces and external_interfaces. + #var.internal_networks: [ "private" ] - # The duration that the received messages are hidden from ReceiveMessage request - # Default to be 300s - #var.visibility_timeout: 300s +#--------------------- Google Cloud Platform (GCP) Module --------------------- +- module: gcp + vpcflow: + enabled: false + + # Google Cloud project ID. + var.project_id: my-gcp-project-id + + # Google Pub/Sub topic containing VPC flow logs. Stackdriver must be + # configured to use this topic as a sink for VPC flow logs. + var.topic: gcp-vpc-flowlogs + + # Google Pub/Sub subscription for the topic. Filebeat will create this + # subscription if it does not exist. + var.subscription_name: filebeat-gcp-vpc-flowlogs-sub + + # Credentials file for the service account with authorization to read from + # the subscription. + var.credentials_file: ${path.config}/gcp-service-account-xyz.json + + # Set internal networks. This is used to classify network.direction based + # off of what networks are considered "internal" either base off of a CIDR + # block or named network conditions. If this is not specified, then traffic + # direction is determined by whether it is between source and destination + # instance information rather than IP. + # + # For a full list of network conditions see: + # https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network + #var.internal_networks: [ "private" ] + + firewall: + enabled: false + + # Google Cloud project ID. + var.project_id: my-gcp-project-id + + # Google Pub/Sub topic containing firewall logs. Stackdriver must be + # configured to use this topic as a sink for firewall logs. + var.topic: gcp-vpc-firewall + + # Google Pub/Sub subscription for the topic. Filebeat will create this + # subscription if it does not exist. + var.subscription_name: filebeat-gcp-firewall-sub + + # Credentials file for the service account with authorization to read from + # the subscription. + var.credentials_file: ${path.config}/gcp-service-account-xyz.json + + # Set internal networks. This is used to classify network.direction based + # off of what networks are considered "internal" either base off of a CIDR + # block or named network conditions. If this is not specified, then traffic + # is taken from the direction data in the rule_details event payload. + # + # For a full list of network conditions see: + # https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network + #var.internal_networks: [ "private" ] + + audit: + enabled: false + + # Google Cloud project ID. + var.project_id: my-gcp-project-id + + # Google Pub/Sub topic containing firewall logs. Stackdriver must be + # configured to use this topic as a sink for firewall logs. + var.topic: gcp-vpc-audit + + # Google Pub/Sub subscription for the topic. Filebeat will create this + # subscription if it does not exist. + var.subscription_name: filebeat-gcp-audit + + # Credentials file for the service account with authorization to read from + # the subscription. + var.credentials_file: ${path.config}/gcp-service-account-xyz.json - # Maximum duration before AWS API request will be interrupted - # Default to be 120s - #var.api_timeout: 120s +#--------------------------- Google_workspace Module --------------------------- +- module: google_workspace + saml: + enabled: false + # var.jwt_file: credentials.json + # var.delegated_account: admin@example.com + # var.initial_interval: 24h + # var.http_client_timeout: 60s + # var.user_key: all + # var.interval: 2h + user_accounts: + enabled: false + # var.jwt_file: credentials.json + # var.delegated_account: admin@example.com + # var.initial_interval: 24h + # var.http_client_timeout: 60s + # var.user_key: all + # var.interval: 2h + login: + enabled: false + # var.jwt_file: credentials.json + # var.delegated_account: admin@example.com + # var.initial_interval: 24h + # var.http_client_timeout: 60s + # var.user_key: all + # var.interval: 2h + admin: + enabled: false + # var.jwt_file: credentials.json + # var.delegated_account: admin@example.com + # var.initial_interval: 24h + # var.http_client_timeout: 60s + # var.user_key: all + # var.interval: 2h + drive: + enabled: false + # var.jwt_file: credentials.json + # var.delegated_account: admin@example.com + # var.initial_interval: 24h + # var.http_client_timeout: 60s + # var.user_key: all + # var.interval: 2h + groups: + enabled: false + # var.jwt_file: credentials.json + # var.delegated_account: admin@example.com + # var.initial_interval: 24h + # var.http_client_timeout: 60s + # var.user_key: all + # var.interval: 2h + - # Custom endpoint used to access AWS APIs - #var.endpoint: amazonaws.com +#------------------------------- HAProxy Module ------------------------------- +- module: haproxy + # All logs + log: + enabled: false + + # Set which input to use between syslog (default) or file. + #var.input: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Default region to query if no other region is set - #var.default_region: us-east-1 +#-------------------------------- Ibmmq Module -------------------------------- +- module: ibmmq + # All logs + errorlog: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # AWS IAM Role to assume - #var.role_arn: arn:aws:iam::123456789012:role/test-mb +#-------------------------------- Icinga Module -------------------------------- +#- module: icinga + # Main logs + #main: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Debug logs + #debug: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Startup logs + #startup: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: - # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. - #var.fips_enabled: false +#--------------------------------- IIS Module --------------------------------- +#- module: iis + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: - # URL to proxy AWS API calls - #var.proxy_url: http://proxy:3128 +#------------------------------- Iptables Module ------------------------------- +- module: iptables + log: + enabled: false + + # Set which input to use between syslog (default) or file. + #var.input: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... - #var.ssl: +#---------------------------- Juniper JUNOS Module ---------------------------- +- module: juniper + srx: + enabled: false + + # Set which input to use between tcp, udp (default) or file. + #var.input: udp + + # The interface to listen to syslog traffic. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.syslog_host: localhost + + # The port to listen for syslog traffic. Defaults to 9006. + #var.syslog_port: 9006 - # Specify a custom VPC flow log format. - #var.format: +#-------------------------------- Kafka Module -------------------------------- +- module: kafka + # All logs + log: + enabled: false + + # Set custom paths for Kafka. If left empty, + # Filebeat will look under /opt. + #var.kafka_home: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: -#----------------------------- AWS Fargate Module ----------------------------- -- module: awsfargate - log: - enabled: false +#-------------------------------- Kibana Module -------------------------------- +- module: kibana + # Server logs + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Audit logs + audit: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Filename of AWS credential file - # If not set "$HOME/.aws/credentials" is used on Linux/Mac - # "%UserProfile%\.aws\credentials" is used on Windows - #var.shared_credential_file: /etc/filebeat/aws_credentials +#------------------------------- Logstash Module ------------------------------- +#- module: logstash + # logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + # var.paths: + + # Slow logs + #slowlog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Profile name for aws credential - # If not set the default profile is used - #var.credential_profile_name: fb-aws +#------------------------------ Microsoft Module ------------------------------ +- module: microsoft + # ATP configuration + defender_atp: + enabled: false + # How often the API should be polled + #var.interval: 5m + + # Oauth Client ID + #var.oauth2.client.id: "" + + # Oauth Client Secret + #var.oauth2.client.secret: "" + + # Oauth Token URL, should include the tenant ID + #var.oauth2.token_url: "https://login.microsoftonline.com/TENANT-ID/oauth2/token" + m365_defender: + enabled: false + # How often the API should be polled + #var.interval: 5m + + # Oauth Client ID + #var.oauth2.client.id: "" + + # Oauth Client Secret + #var.oauth2.client.secret: "" + + # Oauth Token URL, should include the tenant ID + #var.oauth2.token_url: "https://login.microsoftonline.com/TENANT-ID/oauth2/v2.0/token" + + # Related scopes, default should be included + #var.oauth2.scopes: + # - "https://api.security.microsoft.com/.default" - # Use access_key_id, secret_access_key and/or session_token instead of shared credential file - #var.access_key_id: access_key_id - #var.secret_access_key: secret_access_key - #var.session_token: session_token +#--------------------------------- MISP Module --------------------------------- +# Deprecated in 7.14.0: Recommended to migrate to the Threat Intel module. + +- module: misp + threat: + enabled: false + # API key to access MISP + #var.api_key + + # Array object in MISP response + #var.http_request_body.limit: 1000 + + # URL of the MISP REST API + #var.url + + # You can also pass SSL options. For example: + #var.ssl.verification_mode: none - # Maximum duration before AWS API request will be interrupted - # Default to be 120s - #var.api_timeout: 120s +#------------------------------- Mongodb Module ------------------------------- +#- module: mongodb + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: - # Custom endpoint used to access AWS APIs - #var.endpoint: amazonaws.com +#-------------------------------- Mssql Module -------------------------------- +- module: mssql + # Fileset for native deployment + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ['C:\Program Files\Microsoft SQL Server\MSSQL.150\MSSQL\LOG\ERRORLOG*'] - # AWS IAM Role to assume - #var.role_arn: arn:aws:iam::123456789012:role/test-mb +#-------------------------------- MySQL Module -------------------------------- +#- module: mysql + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Slow logs + #slowlog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: - # ARN of the log group to collect logs from - #var.log_group_arn: arn:aws:logs:us-east-1:123456789012:log-group:test:* +#--------------------------- MySQL Enterprise Module --------------------------- +- module: mysqlenterprise + audit: + enabled: false + + # Sets the input type. Currently only supports file + #var.input: file + + # Set paths for the log files when file input is used. + # Should only be used together with file input + # var.paths: + # - /home/user/mysqlauditlogs/audit.*.log - # Name of the log group to collect logs from - #var.log_group_name: /ecs/test-log-group +#--------------------------------- NATS Module --------------------------------- +- module: nats + # All logs + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: - # Region that the specified log group belongs to - #var.region_name: us-east-1 +#------------------------------- NetFlow Module ------------------------------- +- module: netflow + log: + enabled: false + var: + netflow_host: localhost + netflow_port: 2055 + # internal_networks specifies which networks are considered internal or private + # you can specify either a CIDR block or any of the special named ranges listed + # at: https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network + internal_networks: + - private - # A list of strings of log streams names that Filebeat collect log events from - #var.log_streams: ["/ecs/test-log-group/test-log-stream"] +#-------------------------------- Nginx Module -------------------------------- +#- module: nginx + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Ingress-nginx controller logs. This is disabled by default. It could be used in Kubernetes environments to parse ingress-nginx logs + #ingress_controller: + # enabled: false + # + # # Set custom paths for the log files. If left empty, + # # Filebeat will choose the paths depending on your OS. + # #var.paths: - # A string to filter the results to include only log events from log streams that have names starting with this prefix - #var.log_stream_prefix: /ecs/test-log-group/ +#------------------------------ Office 365 Module ------------------------------ +- module: o365 + audit: + enabled: false + + # Set the application_id (also known as client ID): + var.application_id: "" + + # Configure the tenants to monitor: + # Use the tenant ID (also known as directory ID) and the domain name. + # var.tenants: + # - id: "tenant_id_1" + # name: "mydomain.onmicrosoft.com" + # - id: "tenant_id_2" + # name: "mycompany.com" + var.tenants: + - id: "" + name: "mytenant.onmicrosoft.com" + + # List of content-types to fetch. By default all known content-types + # are retrieved: + # var.content_type: + # - "Audit.AzureActiveDirectory" + # - "Audit.Exchange" + # - "Audit.SharePoint" + # - "Audit.General" + # - "DLP.All" + + # Use the following settings to enable certificate-based authentication: + # var.certificate: "/path/to/certificate.pem" + # var.key: "/path/to/private_key.pem" + # var.key_passphrase: "myPrivateKeyPassword" + + # Client-secret based authentication: + # Comment the following line if using certificate authentication. + var.client_secret: "" + + # Advanced settings, use with care: + # var.api: + # # Settings for custom endpoints: + # authentication_endpoint: "https://login.microsoftonline.us/" + # resource: "https://manage.office365.us" + # + # max_retention: 168h + # max_requests_per_minute: 2000 + # poll_interval: 3m - # Specify if Filebeat should read log files from the beginning or from the end - # Default start_position is beginning - #var.start_position: beginning +#--------------------------------- Okta Module --------------------------------- +- module: okta + system: + enabled: false + # You must configure the URL with your Okta domain and provide an + # API token to access the logs API. + #var.url: https://yourOktaDomain/api/v1/logs + #var.api_key: 'yourApiTokenHere' - # How often Filebeat checks for new log events from the specified log group - # Default scan_frequency is 1 minute - #var.scan_frequency: 1m +#-------------------------------- Oracle Module -------------------------------- +- module: oracle + database_audit: + enabled: false + + # Set which input to use between syslog or file (default). + #var.input: file + + # Set paths for the log files when file input is used. + # Should only be used together with file input + #var.paths: ["/home/user/oracleauditlogs/*.aud"] - # Time used to sleep between AWS FilterLogEvents API calls inside the same collection period - # Default api_sleep is 200 ms - #var.api_sleep: 200ms +#------------------------------- Osquery Module ------------------------------- +#- module: osquery + #result: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # If true, all fields created by this module are prefixed with + # `osquery.result`. Set to false to copy the fields in the root + # of the document. The default is true. + #var.use_namespace: true - # URL to proxy AWS API calls - #var.proxy_url: http://proxy:3128 +#--------------------------------- Panw Module --------------------------------- +- module: panw + panos: + enabled: false + + # Set which input to use between syslog (default) or file. + #var.input: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Set internal security zones. used to determine network.direction + # default "trust" + #var.internal_zones: + + # Set external security zones. used to determine network.direction + # default "untrust" + #var.external_zones: + - # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... - #var.ssl: +#------------------------------- Pensando Module ------------------------------- +- module: pensando +# Firewall logs + dfw: + enabled: false + var.syslog_host: 0.0.0.0 + var.syslog_port: 9001 + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + # var.paths: -#-------------------------------- Azure Module -------------------------------- -- module: azure - # All logs - activitylogs: - enabled: false - var: - # eventhub name containing the activity logs, overwrite he default value if the logs are exported in a different eventhub - eventhub: "insights-operational-logs" - # consumer group name that has access to the event hub, we advise creating a dedicated consumer group for the azure module - consumer_group: "$Default" - # the connection string required to communicate with Event Hubs, steps to generate one here https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string - connection_string: "" - # the name of the storage account the state/offsets will be stored and updated - storage_account: "" - # the name of the storage account container you would like to store the offset information in. - storage_account_container: "" - # the storage account key, this key will be used to authorize access to data in your storage account - storage_account_key: "" +#------------------------------ PostgreSQL Module ------------------------------ +#- module: postgresql + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: - platformlogs: - enabled: false - # var: - # eventhub: "" - # consumer_group: "$Default" - # connection_string: "" - # storage_account: "" - # storage_account_key: "" +#------------------------------- RabbitMQ Module ------------------------------- +- module: rabbitmq + # All logs + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ["/var/log/rabbitmq/rabbit@localhost.log*"] - - auditlogs: - enabled: false - # var: - # eventhub: "insights-logs-auditlogs" - # consumer_group: "$Default" - # connection_string: "" - # storage_account: "" - # storage_account_key: "" - signinlogs: - enabled: false - # var: - # eventhub: "insights-logs-signinlogs" - # consumer_group: "$Default" - # connection_string: "" - # storage_account: "" - # storage_account_key: "" - -#--------------------------------- CEF Module --------------------------------- -- module: cef - log: - enabled: false - var: - syslog_host: localhost - syslog_port: 9003 - - # Set internal security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.internal_zones: [ "Internal" ] - - # Set external security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.external_zones: [ "External" ] - -#------------------------------ Checkpoint Module ------------------------------ -- module: checkpoint - firewall: - enabled: false - - # Set which input to use between syslog (default) or file. - #var.input: syslog - - # The interface to listen to UDP based syslog traffic. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.syslog_host: localhost - - # The UDP port to listen for syslog traffic. Defaults to 9001. - #var.syslog_port: 9001 - - # Set internal security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.internal_zones: [ "Internal" ] - - # Set external security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.external_zones: [ "External" ] - - # IANA time zone or time offset (e.g. `+0200`) to use when interpreting syslog - # timestamps without a time zone. - #var.timezone_offset: UTC - -#-------------------------------- Cisco Module -------------------------------- -- module: cisco - asa: - enabled: false - - # Set which input to use between udp (default), tcp or file. - #var.input: udp - - # The interface to listen to udp or tcp syslog traffic. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.syslog_host: localhost - - # The port to listen for udp or tcp syslog traffic. Defaults to 9001. - #var.syslog_port: 9001 - - # With tcp input, set the optional tls configuration: - #var.ssl: - # enabled: true - # certificate: /path/to/cert.pem - # key: /path/to/privatekey.pem - # key_passphrase: 'password for my key' - - # Set the log level from 1 (alerts only) to 7 (include all messages). - # Messages with a log level higher than the specified will be dropped. - # See https://www.cisco.com/c/en/us/td/docs/security/asa/syslog/b_syslog/syslogs-sev-level.html - #var.log_level: 7 - - # Set internal security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.internal_zones: [ "Internal" ] - - # Set external security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.external_zones: [ "External" ] - - # IANA time zone or time offset (e.g. `+0200`) to use when interpreting syslog - # timestamps without a time zone. - #var.timezone_offset: UTC - - ftd: - enabled: false - - # Set which input to use between udp (default), tcp or file. - #var.input: udp - - # The interface to listen to tcp or udp syslog traffic. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.syslog_host: localhost - - # The UDP port to listen for tcp or udp syslog traffic. Defaults to 9003. - #var.syslog_port: 9003 - - # With tcp input, set the optional tls configuration: - #var.ssl: - # enabled: true - # certificate: /path/to/cert.pem - # key: /path/to/privatekey.pem - # key_passphrase: 'password for my key' - - # Set the log level from 1 (alerts only) to 7 (include all messages). - # Messages with a log level higher than the specified will be dropped. - # See https://www.cisco.com/c/en/us/td/docs/security/firepower/Syslogs/b_fptd_syslog_guide/syslogs-sev-level.html - #var.log_level: 7 - - # Set internal security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.internal_zones: [ "Internal" ] - - # Set external security zones. used to override parsed network.direction - # based on zone egress and ingress - #var.external_zones: [ "External" ] - - # IANA time zone or time offset (e.g. `+0200`) to use when interpreting syslog - # timestamps without a time zone. - #var.timezone_offset: UTC - - ios: - enabled: false - - # Set which input to use between syslog (default) or file. - #var.input: syslog - - # The interface to listen to syslog traffic. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.syslog_host: localhost - - # The port to listen on for syslog traffic. Defaults to 9002. - #var.syslog_port: 9002 - - # Set which protocol to use between udp (default) or tcp. - #var.syslog_protocol: udp - - # Set custom paths for the log files when using file input. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - umbrella: - enabled: false - - #var.input: aws-s3 - # AWS SQS queue url - #var.queue_url: https://sqs.us-east-1.amazonaws.com/ID/CiscoQueue - # Access ID to authenticate with the S3 input - #var.access_key_id: 123456 - # Access key to authenticate with the S3 input - #var.secret_access_key: PASSWORD - # The duration that the received messages are hidden from ReceiveMessage request - #var.visibility_timeout: 300s - # Maximum duration before AWS API request will be interrupted - #var.api_timeout: 120s - - amp: - enabled: false - - # Set which input to use between httpjson (default) or file. - #var.input: httpjson - - # The API URL - #var.url: https://api.amp.cisco.com/v1/events - # The client ID used as a username for the API requests. - #var.client_id: - # The API key related to the client ID. - #var.api_key: - # How far to look back the first time the module is started. Expects an amount of hours. - #var.first_interval: 24h - # Overriding the default request timeout, optional. - #var.request_timeout: 60s - -#------------------------------- Coredns Module ------------------------------- -- module: coredns - # Fileset for native deployment - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#----------------------------- Crowdstrike Module ----------------------------- -- module: crowdstrike - - falcon: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#----------------------------- CyberArk PAS Module ----------------------------- -- module: cyberarkpas - audit: - enabled: false - - # Set which input to use between tcp (default), udp, or file. - # - # var.input: tcp - - # var.syslog_host: localhost - # var.syslog_port: 9301 - - # With tcp input, set the optional tls configuration: - #var.ssl: - # enabled: true - # certificate: /path/to/cert.pem - # key: /path/to/privatekey.pem - # key_passphrase: 'password for my key' - - # Uncoment to keep the original syslog event under event.original. - # var.preserve_original_event: true - - # Set paths for the log files when file input is used. - # var.paths: - - -#---------------------------- Elasticsearch Module ---------------------------- -- module: elasticsearch - # Server log - server: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - gc: - enabled: false - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - audit: - enabled: false - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - slowlog: - enabled: false - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - deprecation: - enabled: false - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#------------------------------ Envoyproxy Module ------------------------------ -- module: envoyproxy - # Fileset for native deployment - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#------------------------------- Fortinet Module ------------------------------- -- module: fortinet - firewall: - enabled: false - - # Set which input to use between tcp, udp (default) or file. - #var.input: udp - - # The interface to listen to syslog traffic. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.syslog_host: localhost - - # The port to listen for syslog traffic. Defaults to 9004. - #var.syslog_port: 9004 - - # Set internal interfaces. used to override parsed network.direction - # based on a tagged interface. Both internal and external interfaces must be - # set to leverage this functionality. - #var.internal_interfaces: [ "LAN" ] - - # Set external interfaces. used to override parsed network.direction - # based on a tagged interface. Both internal and external interfaces must be - # set to leverage this functionality. - #var.external_interfaces: [ "WAN" ] - - # List of internal networks. Supports IPv4 and IPv6 addresses and ranges in CIDR notation. - # Also supports the named ranges listed - # [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/network-direction-processor.html#supported-named-network-ranges). - # This can't be used in combination with internal_interfaces and external_interfaces. - #var.internal_networks: [ "private" ] - -#--------------------- Google Cloud Platform (GCP) Module --------------------- -- module: gcp - vpcflow: - enabled: false - - # Google Cloud project ID. - var.project_id: my-gcp-project-id - - # Google Pub/Sub topic containing VPC flow logs. Stackdriver must be - # configured to use this topic as a sink for VPC flow logs. - var.topic: gcp-vpc-flowlogs - - # Google Pub/Sub subscription for the topic. Filebeat will create this - # subscription if it does not exist. - var.subscription_name: filebeat-gcp-vpc-flowlogs-sub - - # Credentials file for the service account with authorization to read from - # the subscription. - var.credentials_file: ${path.config}/gcp-service-account-xyz.json - - # Set internal networks. This is used to classify network.direction based - # off of what networks are considered "internal" either base off of a CIDR - # block or named network conditions. If this is not specified, then traffic - # direction is determined by whether it is between source and destination - # instance information rather than IP. - # - # For a full list of network conditions see: - # https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network - #var.internal_networks: [ "private" ] - - firewall: - enabled: false - - # Google Cloud project ID. - var.project_id: my-gcp-project-id - - # Google Pub/Sub topic containing firewall logs. Stackdriver must be - # configured to use this topic as a sink for firewall logs. - var.topic: gcp-vpc-firewall - - # Google Pub/Sub subscription for the topic. Filebeat will create this - # subscription if it does not exist. - var.subscription_name: filebeat-gcp-firewall-sub - - # Credentials file for the service account with authorization to read from - # the subscription. - var.credentials_file: ${path.config}/gcp-service-account-xyz.json - - # Set internal networks. This is used to classify network.direction based - # off of what networks are considered "internal" either base off of a CIDR - # block or named network conditions. If this is not specified, then traffic - # is taken from the direction data in the rule_details event payload. - # - # For a full list of network conditions see: - # https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network - #var.internal_networks: [ "private" ] - - audit: - enabled: false - - # Google Cloud project ID. - var.project_id: my-gcp-project-id - - # Google Pub/Sub topic containing firewall logs. Stackdriver must be - # configured to use this topic as a sink for firewall logs. - var.topic: gcp-vpc-audit - - # Google Pub/Sub subscription for the topic. Filebeat will create this - # subscription if it does not exist. - var.subscription_name: filebeat-gcp-audit - - # Credentials file for the service account with authorization to read from - # the subscription. - var.credentials_file: ${path.config}/gcp-service-account-xyz.json - -#--------------------------- Google_workspace Module --------------------------- -- module: google_workspace - saml: - enabled: false - # var.jwt_file: credentials.json - # var.delegated_account: admin@example.com - # var.initial_interval: 24h - # var.http_client_timeout: 60s - # var.user_key: all - # var.interval: 2h - user_accounts: - enabled: false - # var.jwt_file: credentials.json - # var.delegated_account: admin@example.com - # var.initial_interval: 24h - # var.http_client_timeout: 60s - # var.user_key: all - # var.interval: 2h - login: - enabled: false - # var.jwt_file: credentials.json - # var.delegated_account: admin@example.com - # var.initial_interval: 24h - # var.http_client_timeout: 60s - # var.user_key: all - # var.interval: 2h - admin: - enabled: false - # var.jwt_file: credentials.json - # var.delegated_account: admin@example.com - # var.initial_interval: 24h - # var.http_client_timeout: 60s - # var.user_key: all - # var.interval: 2h - drive: - enabled: false - # var.jwt_file: credentials.json - # var.delegated_account: admin@example.com - # var.initial_interval: 24h - # var.http_client_timeout: 60s - # var.user_key: all - # var.interval: 2h - groups: - enabled: false - # var.jwt_file: credentials.json - # var.delegated_account: admin@example.com - # var.initial_interval: 24h - # var.http_client_timeout: 60s - # var.user_key: all - # var.interval: 2h - - -#------------------------------- HAProxy Module ------------------------------- -- module: haproxy - # All logs - log: - enabled: false - - # Set which input to use between syslog (default) or file. - #var.input: - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#-------------------------------- Ibmmq Module -------------------------------- -- module: ibmmq - # All logs - errorlog: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#-------------------------------- Icinga Module -------------------------------- -#- module: icinga - # Main logs - #main: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - - # Debug logs - #debug: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - - # Startup logs - #startup: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - -#--------------------------------- IIS Module --------------------------------- -#- module: iis - # Access logs - #access: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - - # Error logs - #error: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - -#------------------------------- Iptables Module ------------------------------- -- module: iptables - log: - enabled: false - - # Set which input to use between syslog (default) or file. - #var.input: - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#---------------------------- Juniper JUNOS Module ---------------------------- -- module: juniper - srx: - enabled: false - - # Set which input to use between tcp, udp (default) or file. - #var.input: udp - - # The interface to listen to syslog traffic. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.syslog_host: localhost - - # The port to listen for syslog traffic. Defaults to 9006. - #var.syslog_port: 9006 - -#-------------------------------- Kafka Module -------------------------------- -- module: kafka - # All logs - log: - enabled: false - - # Set custom paths for Kafka. If left empty, - # Filebeat will look under /opt. - #var.kafka_home: - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#-------------------------------- Kibana Module -------------------------------- -- module: kibana - # Server logs - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Audit logs - audit: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#------------------------------- Logstash Module ------------------------------- -#- module: logstash - # logs - #log: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - # var.paths: - - # Slow logs - #slowlog: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#------------------------------ Microsoft Module ------------------------------ -- module: microsoft - # ATP configuration - defender_atp: - enabled: false - # How often the API should be polled - #var.interval: 5m - - # Oauth Client ID - #var.oauth2.client.id: "" - - # Oauth Client Secret - #var.oauth2.client.secret: "" - - # Oauth Token URL, should include the tenant ID - #var.oauth2.token_url: "https://login.microsoftonline.com/TENANT-ID/oauth2/token" - m365_defender: - enabled: false - # How often the API should be polled - #var.interval: 5m - - # Oauth Client ID - #var.oauth2.client.id: "" - - # Oauth Client Secret - #var.oauth2.client.secret: "" - - # Oauth Token URL, should include the tenant ID - #var.oauth2.token_url: "https://login.microsoftonline.com/TENANT-ID/oauth2/v2.0/token" - - # Related scopes, default should be included - #var.oauth2.scopes: - # - "https://api.security.microsoft.com/.default" - -#--------------------------------- MISP Module --------------------------------- -# Deprecated in 7.14.0: Recommended to migrate to the Threat Intel module. - -- module: misp - threat: - enabled: false - # API key to access MISP - #var.api_key - - # Array object in MISP response - #var.http_request_body.limit: 1000 - - # URL of the MISP REST API - #var.url - - # You can also pass SSL options. For example: - #var.ssl.verification_mode: none - -#------------------------------- Mongodb Module ------------------------------- -#- module: mongodb - # Logs - #log: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - -#-------------------------------- Mssql Module -------------------------------- -- module: mssql - # Fileset for native deployment - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: ['C:\Program Files\Microsoft SQL Server\MSSQL.150\MSSQL\LOG\ERRORLOG*'] - -#-------------------------------- MySQL Module -------------------------------- -#- module: mysql - # Error logs - #error: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - - # Slow logs - #slowlog: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - -#--------------------------- MySQL Enterprise Module --------------------------- -- module: mysqlenterprise - audit: - enabled: false - - # Sets the input type. Currently only supports file - #var.input: file - - # Set paths for the log files when file input is used. - # Should only be used together with file input - # var.paths: - # - /home/user/mysqlauditlogs/audit.*.log - -#--------------------------------- NATS Module --------------------------------- -- module: nats - # All logs - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - -#------------------------------- NetFlow Module ------------------------------- -- module: netflow - log: - enabled: false - var: - netflow_host: localhost - netflow_port: 2055 - # internal_networks specifies which networks are considered internal or private - # you can specify either a CIDR block or any of the special named ranges listed - # at: https://www.elastic.co/guide/en/beats/filebeat/current/defining-processors.html#condition-network - internal_networks: - - private - -#-------------------------------- Nginx Module -------------------------------- -#- module: nginx - # Access logs - #access: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - - # Error logs - #error: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - - # Ingress-nginx controller logs. This is disabled by default. It could be used in Kubernetes environments to parse ingress-nginx logs - #ingress_controller: - # enabled: false - # - # # Set custom paths for the log files. If left empty, - # # Filebeat will choose the paths depending on your OS. - # #var.paths: - -#------------------------------ Office 365 Module ------------------------------ -- module: o365 - audit: - enabled: false - - # Set the application_id (also known as client ID): - var.application_id: "" - - # Configure the tenants to monitor: - # Use the tenant ID (also known as directory ID) and the domain name. - # var.tenants: - # - id: "tenant_id_1" - # name: "mydomain.onmicrosoft.com" - # - id: "tenant_id_2" - # name: "mycompany.com" - var.tenants: - - id: "" - name: "mytenant.onmicrosoft.com" - - # List of content-types to fetch. By default all known content-types - # are retrieved: - # var.content_type: - # - "Audit.AzureActiveDirectory" - # - "Audit.Exchange" - # - "Audit.SharePoint" - # - "Audit.General" - # - "DLP.All" - - # Use the following settings to enable certificate-based authentication: - # var.certificate: "/path/to/certificate.pem" - # var.key: "/path/to/private_key.pem" - # var.key_passphrase: "myPrivateKeyPassword" - - # Client-secret based authentication: - # Comment the following line if using certificate authentication. - var.client_secret: "" - - # Advanced settings, use with care: - # var.api: - # # Settings for custom endpoints: - # authentication_endpoint: "https://login.microsoftonline.us/" - # resource: "https://manage.office365.us" - # - # max_retention: 168h - # max_requests_per_minute: 2000 - # poll_interval: 3m - -#--------------------------------- Okta Module --------------------------------- -- module: okta - system: - enabled: false - # You must configure the URL with your Okta domain and provide an - # API token to access the logs API. - #var.url: https://yourOktaDomain/api/v1/logs - #var.api_key: 'yourApiTokenHere' - -#-------------------------------- Oracle Module -------------------------------- -- module: oracle - database_audit: - enabled: false - - # Set which input to use between syslog or file (default). - #var.input: file - - # Set paths for the log files when file input is used. - # Should only be used together with file input - #var.paths: ["/home/user/oracleauditlogs/*.aud"] - -#------------------------------- Osquery Module ------------------------------- -#- module: osquery - #result: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # If true, all fields created by this module are prefixed with - # `osquery.result`. Set to false to copy the fields in the root - # of the document. The default is true. - #var.use_namespace: true - -#--------------------------------- Panw Module --------------------------------- -- module: panw - panos: - enabled: false - - # Set which input to use between syslog (default) or file. - #var.input: - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Set internal security zones. used to determine network.direction - # default "trust" - #var.internal_zones: - - # Set external security zones. used to determine network.direction - # default "untrust" - #var.external_zones: - - -#------------------------------- Pensando Module ------------------------------- -- module: pensando -# Firewall logs - dfw: - enabled: false - var.syslog_host: 0.0.0.0 - var.syslog_port: 9001 - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - # var.paths: - -#------------------------------ PostgreSQL Module ------------------------------ -#- module: postgresql - # Logs - #log: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: - -#------------------------------- RabbitMQ Module ------------------------------- -- module: rabbitmq - # All logs - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: ["/var/log/rabbitmq/rabbit@localhost.log*"] - -#-------------------------------- Redis Module -------------------------------- -#- module: redis - # Main logs - #log: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: ["/var/log/redis/redis-server.log*"] - - # Slow logs, retrieved via the Redis API (SLOWLOG) - #slowlog: - #enabled: true - - # The Redis hosts to connect to. - #var.hosts: ["localhost:6379"] - - # Optional, the password to use when connecting to Redis. - #var.password: +#-------------------------------- Redis Module -------------------------------- +#- module: redis + # Main logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ["/var/log/redis/redis-server.log*"] + + # Slow logs, retrieved via the Redis API (SLOWLOG) + #slowlog: + #enabled: true + + # The Redis hosts to connect to. + #var.hosts: ["localhost:6379"] + + # Optional, the password to use when connecting to Redis. + #var.password: #------------------------------ Salesforce Module ------------------------------ # Configuration file for Salesforce module in Filebeat @@ -1653,3264 +1653,3264 @@ filebeat.modules: var.real_time: true var.real_time_interval: 5m #----------------------------- Google Santa Module ----------------------------- -- module: santa - log: - enabled: false - # Set custom paths for the log files. If left empty, - # Filebeat will choose the the default path. - #var.paths: +- module: santa + log: + enabled: false + # Set custom paths for the log files. If left empty, + # Filebeat will choose the the default path. + #var.paths: #--------------------------------- Snyk Module --------------------------------- -- module: snyk - audit: - enabled: false - # Set which input to use between httpjson (default) or file. - #var.input: httpjson - # - # What audit type to collect, can be either "group" or "organization". - #var.audit_type: organization - # - # The ID related to the audit_type. If audit type is group, then this value should be - # the group ID and if it is organization it should be the organization ID to collect from. - #var.audit_id: 1235432-asdfdf-2341234-asdgjhg - # How often the API should be polled, defaults to 1 hour. - #var.interval: 1h - # How far to look back the first time the module starts up. (Only works with full days, 24 hours, 48 hours etc). - #var.first_interval: 24h - # The API token that is created for a specific user, found in the Snyk management dashboard. - #var.api_token: - # Event filtering. - # All configuration items below is OPTIONAL and the default options will be overwritten - # for each entry that is not commented out. - # Will return only logs for this specific project. - #var.project_id: "" - # User public ID. Will fetch only audit logs originated from this user's actions. - #var.user_id: "" - # Will return only logs for this specific event. - #var.event: "" - # User email address. Will fetch only audit logs originated from this user's actions. - #var.email_address: "" - - vulnerabilities: - enabled: false - # Set which input to use between httpjson (default) or file. - #var.input: httpjson - # How often the API should be polled. Data from the Snyk API is automatically updated - # once per day, so the default interval is 24 hours. - #var.interval: 24h - # How far to look back the first time the module starts up. (Only works with full days, 24 hours, 48 hours etc). - #var.first_interval: 24h - # The API token that is created for a specific user, found in the Snyk management dashboard. - #var.api_token: - # The list of org IDs to filter the results by. - # One organization ID per line, starting with a - sign - #var.orgs: - # - 12354-asdfdf-123543-asdsdfg - # - 76554-jhggfd-654342-hgrfasd - # Event filtering. - # All configuration items below is OPTIONAL and the default options will be overwritten - # for each entry that is not commented out. - # The severity levels of issues to filter the results by. - #var.included_severity: - # - critical - # - high - # - medium - # - low - # - # The exploit maturity levels of issues to filter the results by. - #var.exploit_maturity: - # - mature - # - proof-of-concept - # - no-known-exploit - # - no-data - # - # The type of issues to filter the results by. - #var.types: - # - vuln - # - license - # - configuration - # - # The type of languages to filter the results by. - #var.languages: - # - javascript - # - ruby - # - java - # - scala - # - python - # - golang - # - php - # - dotnet - # - swift-objective-c - # - elixir - # - docker - # - terraform - # - kubernetes - # - helm - # - cloudformation - # - # Search term to filter issue name by, or an exact CVE or CWE. - #var.identifier: - # - "" - # - # If set to true, only include issues which are ignored, if set to false, only include issues which are not ignored. - #var.ignored: false - #var.patched: false - #var.fixable: false - #var.is_fixed: false - #var.is_patchable: false - #var.is_pinnable: false - # - # The priority score ranging between 0-1000 - #var.min_priority_score: 0 - #var.max_priority_score: 1000 +- module: snyk + audit: + enabled: false + # Set which input to use between httpjson (default) or file. + #var.input: httpjson + # + # What audit type to collect, can be either "group" or "organization". + #var.audit_type: organization + # + # The ID related to the audit_type. If audit type is group, then this value should be + # the group ID and if it is organization it should be the organization ID to collect from. + #var.audit_id: 1235432-asdfdf-2341234-asdgjhg + # How often the API should be polled, defaults to 1 hour. + #var.interval: 1h + # How far to look back the first time the module starts up. (Only works with full days, 24 hours, 48 hours etc). + #var.first_interval: 24h + # The API token that is created for a specific user, found in the Snyk management dashboard. + #var.api_token: + # Event filtering. + # All configuration items below is OPTIONAL and the default options will be overwritten + # for each entry that is not commented out. + # Will return only logs for this specific project. + #var.project_id: "" + # User public ID. Will fetch only audit logs originated from this user's actions. + #var.user_id: "" + # Will return only logs for this specific event. + #var.event: "" + # User email address. Will fetch only audit logs originated from this user's actions. + #var.email_address: "" + + vulnerabilities: + enabled: false + # Set which input to use between httpjson (default) or file. + #var.input: httpjson + # How often the API should be polled. Data from the Snyk API is automatically updated + # once per day, so the default interval is 24 hours. + #var.interval: 24h + # How far to look back the first time the module starts up. (Only works with full days, 24 hours, 48 hours etc). + #var.first_interval: 24h + # The API token that is created for a specific user, found in the Snyk management dashboard. + #var.api_token: + # The list of org IDs to filter the results by. + # One organization ID per line, starting with a - sign + #var.orgs: + # - 12354-asdfdf-123543-asdsdfg + # - 76554-jhggfd-654342-hgrfasd + # Event filtering. + # All configuration items below is OPTIONAL and the default options will be overwritten + # for each entry that is not commented out. + # The severity levels of issues to filter the results by. + #var.included_severity: + # - critical + # - high + # - medium + # - low + # + # The exploit maturity levels of issues to filter the results by. + #var.exploit_maturity: + # - mature + # - proof-of-concept + # - no-known-exploit + # - no-data + # + # The type of issues to filter the results by. + #var.types: + # - vuln + # - license + # - configuration + # + # The type of languages to filter the results by. + #var.languages: + # - javascript + # - ruby + # - java + # - scala + # - python + # - golang + # - php + # - dotnet + # - swift-objective-c + # - elixir + # - docker + # - terraform + # - kubernetes + # - helm + # - cloudformation + # + # Search term to filter issue name by, or an exact CVE or CWE. + #var.identifier: + # - "" + # + # If set to true, only include issues which are ignored, if set to false, only include issues which are not ignored. + #var.ignored: false + #var.patched: false + #var.fixable: false + #var.is_fixed: false + #var.is_patchable: false + #var.is_pinnable: false + # + # The priority score ranging between 0-1000 + #var.min_priority_score: 0 + #var.max_priority_score: 1000 #-------------------------------- Sophos Module -------------------------------- -- module: sophos - xg: - enabled: false - - # Set which input to use between tcp, udp (default) or file. - #var.input: udp - - # The interface to listen to syslog traffic. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.syslog_host: localhost - - # The port to listen for syslog traffic. Defaults to 9004. - #var.syslog_port: 9005 - - # firewall default hostname - #var.default_host_name: firewall.localgroup.local - - # known firewalls - #var.known_devices: - #- serial_number: "1234567890123457" - # hostname: "a.host.local" - #- serial_number: "1234234590678557" - # hostname: "b.host.local" - +- module: sophos + xg: + enabled: false + + # Set which input to use between tcp, udp (default) or file. + #var.input: udp + + # The interface to listen to syslog traffic. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.syslog_host: localhost + + # The port to listen for syslog traffic. Defaults to 9004. + #var.syslog_port: 9005 + + # firewall default hostname + #var.default_host_name: firewall.localgroup.local + + # known firewalls + #var.known_devices: + #- serial_number: "1234567890123457" + # hostname: "a.host.local" + #- serial_number: "1234234590678557" + # hostname: "b.host.local" + #------------------------------- Suricata Module ------------------------------- -- module: suricata - # All logs - eve: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: +- module: suricata + # All logs + eve: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: #----------------------------- Threatintel Module ----------------------------- -- module: threatintel - abuseurl: - enabled: false - - # Input used for ingesting threat intel data. - var.input: httpjson - - # The URL used for Threat Intel API calls. - var.url: https://urlhaus-api.abuse.ch/v1/urls/recent/ - - # The interval to poll the API for updates. - var.interval: 10m - - abusemalware: - enabled: false - - # Input used for ingesting threat intel data. - var.input: httpjson - - # The URL used for Threat Intel API calls. - var.url: https://urlhaus-api.abuse.ch/v1/payloads/recent/ - - # The interval to poll the API for updates. - var.interval: 10m - - malwarebazaar: - enabled: false - - # Input used for ingesting threat intel data. - var.input: httpjson - - # The URL used for Threat Intel API calls. - var.url: https://mb-api.abuse.ch/api/v1/ - - # The interval to poll the API for updates. - var.interval: 10m - - misp: - enabled: false - - # Input used for ingesting threat intel data, defaults to JSON. - var.input: httpjson - - # The URL of the MISP instance, should end with "/events/restSearch". - var.url: https://SERVER/events/restSearch - - # The authentication token used to contact the MISP API. Found when looking at user account in the MISP UI. - var.api_token: API_KEY - - # Configures the type of SSL verification done, if MISP is running on self signed certificates - # then the certificate would either need to be trusted, or verification_mode set to none. - #var.ssl.verification_mode: none - - # Optional filters that can be applied to the API for filtering out results. This should support the majority of fields in a MISP context. - # For examples please reference the filebeat module documentation. - #var.filters: - # - threat_level: [4, 5] - # - to_ids: true - - # How far back to look once the beat starts up for the first time, the value has to be in hours. Each request afterwards will filter on any event newer - # than the last event that was already ingested. - var.first_interval: 300h - - # The interval to poll the API for updates. - var.interval: 5m - - otx: - enabled: false - - # Input used for ingesting threat intel data - var.input: httpjson - - # The URL used for OTX Threat Intel API calls. - var.url: https://otx.alienvault.com/api/v1/indicators/export - - # The authentication token used to contact the OTX API, can be found on the OTX UI. - var.api_token: API_KEY - - # Optional filters that can be applied to retrieve only specific indicators. - #var.types: "domain,IPv4,hostname,url,FileHash-SHA256" - - # The timeout of the HTTP client connecting to the OTX API - #var.http_client_timeout: 120s - - # How many hours to look back for each request, should be close to the configured interval. Deduplication of events is handled by the module. - var.lookback_range: 1h - - # How far back to look once the beat starts up for the first time, the value has to be in hours. - var.first_interval: 400h - - # The interval to poll the API for updates - var.interval: 5m - - anomali: - enabled: false - - # Input used for ingesting threat intel data - var.input: httpjson - - # The URL used for Threat Intel API calls. Limo has multiple different possibilities for URL's depending - # on the type of threat intel source that is needed. - var.url: https://limo.anomali.com/api/v1/taxii2/feeds/collections/41/objects - - # The Username used by anomali Limo, defaults to guest. - #var.username: guest - - # The password used by anomali Limo, defaults to guest. - #var.password: guest - - # How far back to look once the beat starts up for the first time, the value has to be in hours. - var.first_interval: 400h - - # The interval to poll the API for updates - var.interval: 5m - - anomalithreatstream: - enabled: false - - # Input used for ingesting threat intel data - var.input: http_endpoint - - # Address to bind to in order to receive HTTP requests - # from the Integrator SDK. Use 0.0.0.0 to bind to all - # existing interfaces. - var.listen_address: localhost - - # Port to use to receive HTTP requests from the - # Integrator SDK. - var.listen_port: 8080 - - # Secret key to authenticate requests from the SDK. - var.secret: "" - - # Uncomment the following and set the absolute paths - # to the server SSL certificate and private key to - # enable HTTPS secure connections. - # - # var.ssl_certificate: path/to/server_ssl_cert.pem - # var.ssl_key: path/to/ssl_key.pem - - threatq: - enabled: false - - # Input used for ingesting threat intel data - var.input: httpjson - - # The URL used for ThreatQ ThreatLibrary API calls. - # Remember to put a slash at the end of the host URL - var.host: https://www.threatq.com/ - - # Oauth 2.0 Access Token URL - var.token_url: https://www.threatq.com/api/token - - # Oauth 2.0 Client ID - var.client_id: "INSERT_CLIENT_ID" - - # Oauth 2.0 Client Secret - var.client_secret: "INSERT_CLIENT_SECRET" - - # The interval to poll the API for updates - var.interval: 1m - - # The ID for the ThreatQ smart data collection - var.data_collection_id: "INSERT_THREATQ_DATA_COLLECTION_ID" - - # The URL of the proxy if used - #var.proxy_url: http://proxy:8000 - - # Customize the HTTP timeout configured for the API requests - #var.http_client_timeout: 30s +- module: threatintel + abuseurl: + enabled: false + + # Input used for ingesting threat intel data. + var.input: httpjson + + # The URL used for Threat Intel API calls. + var.url: https://urlhaus-api.abuse.ch/v1/urls/recent/ + + # The interval to poll the API for updates. + var.interval: 10m + + abusemalware: + enabled: false + + # Input used for ingesting threat intel data. + var.input: httpjson + + # The URL used for Threat Intel API calls. + var.url: https://urlhaus-api.abuse.ch/v1/payloads/recent/ + + # The interval to poll the API for updates. + var.interval: 10m + + malwarebazaar: + enabled: false + + # Input used for ingesting threat intel data. + var.input: httpjson + + # The URL used for Threat Intel API calls. + var.url: https://mb-api.abuse.ch/api/v1/ + + # The interval to poll the API for updates. + var.interval: 10m + + misp: + enabled: false + + # Input used for ingesting threat intel data, defaults to JSON. + var.input: httpjson + + # The URL of the MISP instance, should end with "/events/restSearch". + var.url: https://SERVER/events/restSearch + + # The authentication token used to contact the MISP API. Found when looking at user account in the MISP UI. + var.api_token: API_KEY + + # Configures the type of SSL verification done, if MISP is running on self signed certificates + # then the certificate would either need to be trusted, or verification_mode set to none. + #var.ssl.verification_mode: none + + # Optional filters that can be applied to the API for filtering out results. This should support the majority of fields in a MISP context. + # For examples please reference the filebeat module documentation. + #var.filters: + # - threat_level: [4, 5] + # - to_ids: true + + # How far back to look once the beat starts up for the first time, the value has to be in hours. Each request afterwards will filter on any event newer + # than the last event that was already ingested. + var.first_interval: 300h + + # The interval to poll the API for updates. + var.interval: 5m + + otx: + enabled: false + + # Input used for ingesting threat intel data + var.input: httpjson + + # The URL used for OTX Threat Intel API calls. + var.url: https://otx.alienvault.com/api/v1/indicators/export + + # The authentication token used to contact the OTX API, can be found on the OTX UI. + var.api_token: API_KEY + + # Optional filters that can be applied to retrieve only specific indicators. + #var.types: "domain,IPv4,hostname,url,FileHash-SHA256" + + # The timeout of the HTTP client connecting to the OTX API + #var.http_client_timeout: 120s + + # How many hours to look back for each request, should be close to the configured interval. Deduplication of events is handled by the module. + var.lookback_range: 1h + + # How far back to look once the beat starts up for the first time, the value has to be in hours. + var.first_interval: 400h + + # The interval to poll the API for updates + var.interval: 5m + + anomali: + enabled: false + + # Input used for ingesting threat intel data + var.input: httpjson + + # The URL used for Threat Intel API calls. Limo has multiple different possibilities for URL's depending + # on the type of threat intel source that is needed. + var.url: https://limo.anomali.com/api/v1/taxii2/feeds/collections/41/objects + + # The Username used by anomali Limo, defaults to guest. + #var.username: guest + + # The password used by anomali Limo, defaults to guest. + #var.password: guest + + # How far back to look once the beat starts up for the first time, the value has to be in hours. + var.first_interval: 400h + + # The interval to poll the API for updates + var.interval: 5m + + anomalithreatstream: + enabled: false + + # Input used for ingesting threat intel data + var.input: http_endpoint + + # Address to bind to in order to receive HTTP requests + # from the Integrator SDK. Use 0.0.0.0 to bind to all + # existing interfaces. + var.listen_address: localhost + + # Port to use to receive HTTP requests from the + # Integrator SDK. + var.listen_port: 8080 + + # Secret key to authenticate requests from the SDK. + var.secret: "" + + # Uncomment the following and set the absolute paths + # to the server SSL certificate and private key to + # enable HTTPS secure connections. + # + # var.ssl_certificate: path/to/server_ssl_cert.pem + # var.ssl_key: path/to/ssl_key.pem + + threatq: + enabled: false + + # Input used for ingesting threat intel data + var.input: httpjson + + # The URL used for ThreatQ ThreatLibrary API calls. + # Remember to put a slash at the end of the host URL + var.host: https://www.threatq.com/ + + # Oauth 2.0 Access Token URL + var.token_url: https://www.threatq.com/api/token + + # Oauth 2.0 Client ID + var.client_id: "INSERT_CLIENT_ID" + + # Oauth 2.0 Client Secret + var.client_secret: "INSERT_CLIENT_SECRET" + + # The interval to poll the API for updates + var.interval: 1m + + # The ID for the ThreatQ smart data collection + var.data_collection_id: "INSERT_THREATQ_DATA_COLLECTION_ID" + + # The URL of the proxy if used + #var.proxy_url: http://proxy:8000 + + # Customize the HTTP timeout configured for the API requests + #var.http_client_timeout: 30s #------------------------------- Traefik Module ------------------------------- -#- module: traefik - # Access logs - #access: - #enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - - # Input configuration (advanced). Any input configuration option - # can be added under this section. - #input: +#- module: traefik + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: #--------------------------------- Zeek Module --------------------------------- -- module: zeek - capture_loss: - enabled: false - connection: - enabled: false - dce_rpc: - enabled: false - dhcp: - enabled: false - dnp3: - enabled: false - dns: - enabled: false - dpd: - enabled: false - files: - enabled: false - ftp: - enabled: false - http: - enabled: false - intel: - enabled: false - irc: - enabled: false - kerberos: - enabled: false - modbus: - enabled: false - mysql: - enabled: false - notice: - enabled: false - ntp: - enabled: false - ntlm: - enabled: false - ocsp: - enabled: false - pe: - enabled: false - radius: - enabled: false - rdp: - enabled: false - rfb: - enabled: false - signature: - enabled: false - sip: - enabled: false - smb_cmd: - enabled: false - smb_files: - enabled: false - smb_mapping: - enabled: false - smtp: - enabled: false - snmp: - enabled: false - socks: - enabled: false - ssh: - enabled: false - ssl: - enabled: false - stats: - enabled: false - syslog: - enabled: false - traceroute: - enabled: false - tunnel: - enabled: false - weird: - enabled: false - x509: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: +- module: zeek + capture_loss: + enabled: false + connection: + enabled: false + dce_rpc: + enabled: false + dhcp: + enabled: false + dnp3: + enabled: false + dns: + enabled: false + dpd: + enabled: false + files: + enabled: false + ftp: + enabled: false + http: + enabled: false + intel: + enabled: false + irc: + enabled: false + kerberos: + enabled: false + modbus: + enabled: false + mysql: + enabled: false + notice: + enabled: false + ntp: + enabled: false + ntlm: + enabled: false + ocsp: + enabled: false + pe: + enabled: false + radius: + enabled: false + rdp: + enabled: false + rfb: + enabled: false + signature: + enabled: false + sip: + enabled: false + smb_cmd: + enabled: false + smb_files: + enabled: false + smb_mapping: + enabled: false + smtp: + enabled: false + snmp: + enabled: false + socks: + enabled: false + ssh: + enabled: false + ssl: + enabled: false + stats: + enabled: false + syslog: + enabled: false + traceroute: + enabled: false + tunnel: + enabled: false + weird: + enabled: false + x509: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: #------------------------------ ZooKeeper Module ------------------------------ -- module: zookeeper - # All logs - audit: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: - # All logs - log: - enabled: false - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: +- module: zookeeper + # All logs + audit: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + # All logs + log: + enabled: false + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: #--------------------------------- Zoom Module --------------------------------- -- module: zoom - webhook: - enabled: false - - # The type of input to use - #var.input: http_endpoint - - # The interface to listen for incoming HTTP requests. Defaults to - # localhost. Set to 0.0.0.0 to bind to all available interfaces. - #var.listen_address: localhost - - # The port to bind to - #var.listen_port: 80 - - # The header Zoom uses to send its secret token, defaults to "Authorization" - #secret.header: Authorization - - # The custom secret token value created when configuring the Zoom webhook - #secret.value: my-custom-value - - # Enable the CRC webhook validation - #crc.enabled: false - - # The secret token value provided by Zoom for CRC validation - #crc.secret: ZOOMSECRETTOKEN - - - -#=========================== Filebeat inputs ============================= - -# List of inputs to fetch data. -filebeat.inputs: -# Each - is an input. Most options can be set at the input level, so -# you can use different inputs for various configurations. -# Below are the input specific configurations. - -# Type of the files. Based on this the way the file is read is decided. -# The different types cannot be mixed in one input -# -# Possible options are: -# * filestream: Reads every line of the log file -# * log: Reads every line of the log file (deprecated) -# * stdin: Reads the standard in - -#------------------------------ Log input -------------------------------- -- type: log - - # Change to true to enable this input configuration. - enabled: false - - # Paths that should be crawled and fetched. Glob based paths. - # To fetch all ".log" files from a specific level of subdirectories - # /var/log/*/*.log can be used. - # For each file found under this path, a harvester is started. - # Make sure no file is defined twice as this can lead to unexpected behaviour. - paths: - - /var/log/*.log - #- c:\programdata\elasticsearch\logs\* - - # Configure the file encoding for reading files with international characters - # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). - # Some sample encodings: - # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, - # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... - #encoding: plain - - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, no lines are dropped. - #exclude_lines: ['^DBG'] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, all the lines are exported. - #include_lines: ['^ERR', '^WARN'] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: ['.gz$'] - - # Method to determine if two files are the same or not. By default - # the Beat considers two files the same if their inode and device id are the same. - #file_identity.native: ~ - - # Optional additional fields. These fields can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - # Set to true to store the additional fields as top-level fields instead - # of under the "fields" sub-dictionary. In case of name conflicts with the - # fields added by Filebeat itself, the custom fields overwrite the default - # fields. - #fields_under_root: false - - # Set to true to publish fields with null values in events. - #keep_null: false - - # By default, all events contain `host.name`. This option can be set to true - # to disable the addition of this field to all events. The default value is - # false. - #publisher_pipeline.disable_host: false - - # Ignore files that were modified more than the defined timespan in the past. - # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #ignore_older: 0 - - # How often the input checks for new files in the paths that are specified - # for harvesting. Specify 1s to scan the directory as frequently as possible - # without causing Filebeat to scan too frequently. Default: 10s. - #scan_frequency: 10s - - # Defines the buffer size every harvester uses when fetching the file - #harvester_buffer_size: 16384 - - # Maximum number of bytes a single log event can have - # All bytes after max_bytes are discarded and not sent. The default is 10MB. - # This is especially useful for multiline log messages which can get large. - #max_bytes: 10485760 - - # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, - # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, - # null_terminator - #line_terminator: auto - - ### Recursive glob configuration - - # Expand "**" patterns into regular glob patterns. - #recursive_glob.enabled: true - - ### JSON configuration - - # Decode JSON options. Enable this if your logs are structured in JSON. - # JSON key on which to apply the line filtering and multiline settings. This key - # must be top level and its value must be string, otherwise it is ignored. If - # no text key is defined, the line filtering and multiline features cannot be used. - #json.message_key: - - # By default, the decoded JSON is placed under a "json" key in the output document. - # If you enable this setting, the keys are copied top level in the output document. - #json.keys_under_root: false - - # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrites the fields that Filebeat normally adds (type, source, offset, etc.) - # in case of conflicts. - #json.overwrite_keys: false - - # If this setting is enabled, then keys in the decoded JSON object will be recursively - # de-dotted, and expanded into a hierarchical object structure. - # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. - #json.expand_keys: false - - # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON - # unmarshaling errors or when a text key is defined in the configuration but cannot - # be used. - #json.add_error_key: false - - ### Multiline options - - # Multiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under the pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - # The maximum number of lines that are combined into one event. - # In case there are more the max_lines the additional lines are discarded. - # Default is 500 - #multiline.max_lines: 500 - - # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event - # Default is 5s. - #multiline.timeout: 5s - - # To aggregate constant number of lines into a single event use the count mode of multiline. - #multiline.type: count - - # The number of lines to aggregate into a single event. - #multiline.count_lines: 3 - - # Do not add new line characters when concatenating lines. - #multiline.skip_newline: false - - # Setting tail_files to true means filebeat starts reading new files at the end - # instead of the beginning. If this is used in combination with log rotation - # this can mean that the first entries of a new file are skipped. - #tail_files: false - - # The ingest pipeline ID associated with this input. If this is set, it - # overwrites the pipeline option from the Elasticsearch output. - #pipeline: - - # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as the source. - #symlinks: false - - # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it has to wait - # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real-time crawling. - # Every time a new line appears, backoff is reset to the initial value. - #backoff: 1s - - # Max backoff defines what the maximum backoff time is. After having backed off multiple times - # from checking the files, the waiting time will never exceed max_backoff independent of the - # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log - # file after having backed off multiple times, it takes a maximum of 10s to read the new line - #max_backoff: 10s - - # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, - # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. - # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached - #backoff_factor: 2 - - # Max number of harvesters that are started in parallel. - # Default is 0 which means unlimited - #harvester_limit: 0 - - ### Harvester closing options - - # Close inactive closes the file handler after the predefined period. - # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours), and 5m (5 minutes) can be used. - #close_inactive: 5m - - # Close renamed closes a file handler when the file is renamed or rotated. - # Note: Potential data loss. Make sure to read and understand the docs for this option. - #close_renamed: false - - # When enabling this option, a file handler is closed immediately in case a file can't be found - # any more. In case the file shows up again later, harvesting will continue at the last known position - # after scan_frequency. - #close_removed: true - - # Closes the file handler as soon as the harvesters reach the end of the file. - # By default this option is disabled. - # Note: Potential data loss. Make sure to read and understand the docs for this option. - #close_eof: false - - ### State options - - # Files for the modification data are older than clean_inactive the state from the registry is removed - # By default this is disabled. - #clean_inactive: 0 - - # Removes the state for files which cannot be found on disk anymore immediately - #clean_removed: true - - # Close timeout closes the harvester after the predefined time. - # This is independent if the harvester did finish reading the file or not. - # By default this option is disabled. - # Note: Potential data loss. Make sure to read and understand the docs for this option. - #close_timeout: 0 - - # Defines if inputs are enabled - #enabled: true - -#--------------------------- Filestream input ---------------------------- -- type: filestream - - # Unique ID among all inputs, an ID is required. - id: my-filestream-id - - # Change to true to enable this input configuration. - enabled: false - - # Paths that should be crawled and fetched. Glob based paths. - # To fetch all ".log" files from a specific level of subdirectories - # /var/log/*/*.log can be used. - # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. - paths: - - /var/log/*.log - #- c:\programdata\elasticsearch\logs\* - - # Configure the file encoding for reading files with international characters - # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). - # Some sample encodings: - # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, - # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... - #encoding: plain - - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, no lines are dropped. - # Line filtering happens after the parsers pipeline. If you would like to filter lines - # before parsers, use include_message parser. - #exclude_lines: ['^DBG'] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, all the lines are exported. - # Line filtering happens after the parsers pipeline. If you would like to filter lines - # before parsers, use include_message parser. - #include_lines: ['^ERR', '^WARN'] - - ### Prospector options - - # How often the input checks for new files in the paths that are specified - # for harvesting. Specify 1s to scan the directory as frequently as possible - # without causing Filebeat to scan too frequently. Default: 10s. - #prospector.scanner.check_interval: 10s - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #prospector.scanner.exclude_files: ['.gz$'] - - # Include files. A list of regular expressions to match. Filebeat keeps only the files that - # are matching any regular expression from the list. By default, no files are dropped. - #prospector.scanner.include_files: ['/var/log/.*'] - - # Expand "**" patterns into regular glob patterns. - #prospector.scanner.recursive_glob: true - - # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as the source. - #prospector.scanner.symlinks: false - - # If enabled, instead of relying on the device ID and inode values when comparing files, - # compare hashes of the given byte ranges in files. A file becomes an ingest target - # when its size grows larger than offset+length (see below). Until then it's ignored. - #prospector.scanner.fingerprint.enabled: false - - # If fingerprint mode is enabled, sets the offset from the beginning of the file - # for the byte range used for computing the fingerprint value. - #prospector.scanner.fingerprint.offset: 0 - - # If fingerprint mode is enabled, sets the length of the byte range used for - # computing the fingerprint value. Cannot be less than 64 bytes. - #prospector.scanner.fingerprint.length: 1024 - - ### Parsers configuration - - #### JSON configuration - - #parsers: - #- ndjson: - # Decode JSON options. Enable this if your logs are structured in JSON. - # JSON key on which to apply the line filtering and multiline settings. This key - # must be top level and its value must be a string, otherwise it is ignored. If - # no text key is defined, the line filtering and multiline features cannot be used. - #message_key: - - # By default, the decoded JSON is placed under a "json" key in the output document. - # If you enable this setting, the keys are copied to the top level of the output document. - #keys_under_root: false - - # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) - # in case of conflicts. - #overwrite_keys: false - - # If this setting is enabled, then keys in the decoded JSON object will be recursively - # de-dotted, and expanded into a hierarchical object structure. - # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. - #expand_keys: false - - # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON - # unmarshaling errors or when a text key is defined in the configuration but cannot - # be used. - #add_error_key: false - - #### Filtering messages - - # You can filter messsages in the parsers pipeline. Use this method if you would like to - # include or exclude lines before they are aggregated into multiline or the JSON contents - # are parsed. - - #parsers: - #- include_message.patterns: - #- ["WARN", "ERR"] +- module: zoom + webhook: + enabled: false + + # The type of input to use + #var.input: http_endpoint + + # The interface to listen for incoming HTTP requests. Defaults to + # localhost. Set to 0.0.0.0 to bind to all available interfaces. + #var.listen_address: localhost + + # The port to bind to + #var.listen_port: 80 + + # The header Zoom uses to send its secret token, defaults to "Authorization" + #secret.header: Authorization + + # The custom secret token value created when configuring the Zoom webhook + #secret.value: my-custom-value + + # Enable the CRC webhook validation + #crc.enabled: false + + # The secret token value provided by Zoom for CRC validation + #crc.secret: ZOOMSECRETTOKEN - #### Multiline options - - # Multiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - #parsers: - #- multiline: - #type: pattern - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #pattern: ^\[ - - # Defines if the pattern set under the pattern setting should be negated or not. Default is false. - #negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to next in Logstash - #match: after - - # The maximum number of lines that are combined into one event. - # In case there are more than max_lines the additional lines are discarded. - # Default is 500 - #max_lines: 500 - - # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event - # Default is 5s. - #timeout: 5s - - # Do not add new line character when concatenating lines. - #skip_newline: false - - # To aggregate constant number of lines into a single event use the count mode of multiline. - - #parsers: - #- multiline: - #type: count - - # The number of lines to aggregate into a single event. - #count_lines: 3 - - # The maximum number of lines that are combined into one event. - # In case there are more than max_lines the additional lines are discarded. - # Default is 500 - #max_lines: 500 - - # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event - # Default is 5s. - #timeout: 5s - - # Do not add new line characters when concatenating lines. - #skip_newline: false - - #### Parsing container events - - # You can parse container events with different formats from all streams. - - #parsers: - #- container: - # Source of container events. Available options: all, stdin, stderr. - #stream: all - - # Format of the container events. Available options: auto, cri, docker, json-file - #format: auto - - ### Log rotation - - # When an external tool rotates the input files with copytruncate strategy - # use this section to help the input find the rotated files. - #rotation.external.strategy.copytruncate: - # Regex that matches the rotated files. - # suffix_regex: \.\d$ - # If the rotated filename suffix is a datetime, set it here. - # dateformat: -20060102 - - ### State options - - # Files for the modification data is older than clean_inactive the state from the registry is removed - # By default this is disabled. - #clean_inactive: -1 - - # Removes the state for files which cannot be found on disk anymore immediately - #clean_removed: true - - # Method to determine if two files are the same or not. By default - # the Beat considers two files the same if their inode and device id are the same. - #file_identity.native: ~ - - # Optional additional fields. These fields can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - # Set to true to publish fields with null values in events. - #keep_null: false - - # By default, all events contain `host.name`. This option can be set to true - # to disable the addition of this field to all events. The default value is - # false. - #publisher_pipeline.disable_host: false - - # Ignore files that were modified more than the defined timespan in the past. - # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. - #ignore_older: 0 - - # Ignore files that have not been updated since the selected event. - # ignore_inactive is disabled by default, so no files are ignored by setting it to "". - # Available options: since_first_start, since_last_start. - #ignore_inactive: "" - - # If `take_over` is set to `true`, this `filestream` will take over all files - # from `log` inputs if they match at least one of the `paths` set in the `filestream`. - # This functionality is still in beta. - #take_over: false - - # Defines the buffer size every harvester uses when fetching the file - #harvester_buffer_size: 16384 - - # Maximum number of bytes a single log event can have - # All bytes after max_bytes are discarded and not sent. The default is 10MB. - # This is especially useful for multiline log messages which can get large. - #message_max_bytes: 10485760 - - # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, - # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, - # null_terminator - #line_terminator: auto - - # The ingest pipeline ID associated with this input. If this is set, it - # overwrites the pipeline option from the Elasticsearch output. - #pipeline: - - # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it has to wait - # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real-time crawling. - # Every time a new line appears, backoff is reset to the initial value. - #backoff.init: 1s - - # Max backoff defines what the maximum backoff time is. After having backed off multiple times - # from checking the files, the waiting time will never exceed max_backoff independent of the - # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log - # file after having backed off multiple times, it takes a maximum of 10s to read the new line - #backoff.max: 10s - - ### Harvester closing options - - # Close inactive closes the file handler after the predefined period. - # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. - #close.on_state_change.inactive: 5m - - # Close renamed closes a file handler when the file is renamed or rotated. - # Note: Potential data loss. Make sure to read and understand the docs for this option. - #close.on_state_change.renamed: false - - # When enabling this option, a file handler is closed immediately in case a file can't be found - # any more. In case the file shows up again later, harvesting will continue at the last known position - # after scan_frequency. - #close.on_state_change.removed: true - - # Closes the file handler as soon as the harvesters reaches the end of the file. - # By default this option is disabled. - # Note: Potential data loss. Make sure to read and understand the docs for this option. - #close.reader.on_eof: false - - # Close timeout closes the harvester after the predefined time. - # This is independent if the harvester did finish reading the file or not. - # By default this option is disabled. - # Note: Potential data loss. Make sure to read and understand the docs for this option. - #close.reader.after_interval: 0 - -#----------------------------- Stdin input ------------------------------- -# Configuration to use stdin input -#- type: stdin - -#------------------------- Redis slowlog input --------------------------- -# Experimental: Config options for the redis slow log input -#- type: redis - #enabled: false - - # List of hosts to pool to retrieve the slow log information. - #hosts: ["localhost:6379"] - - # How often the input checks for redis slow log. - #scan_frequency: 10s - - # Timeout after which time the input should return an error - #timeout: 1s - - # Network type to be used for redis connection. Default: tcp - #network: tcp - - # Max number of concurrent connections. Default: 10 - #maxconn: 10 - - # Redis AUTH password. Empty by default. - #password: foobared - -#------------------------------ Udp input -------------------------------- -# Experimental: Config options for the udp input -#- type: udp - #enabled: false - - # Maximum size of the message received over UDP - #max_message_size: 10KiB - - # Size of the UDP read buffer in bytes - #read_buffer: 0 - - -#------------------------------ TCP input -------------------------------- -# Experimental: Config options for the TCP input -#- type: tcp - #enabled: false - - # The host and port to receive the new event - #host: "localhost:9000" - - # Character used to split new message - #line_delimiter: "\n" - - # Maximum size in bytes of the message received over TCP - #max_message_size: 20MiB - - # Max number of concurrent connections, or 0 for no limit. Default: 0 - #max_connections: 0 - - # The number of seconds of inactivity before a remote connection is closed. - #timeout: 300s - - # Use SSL settings for TCP. - #ssl.enabled: true - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # SSL configuration. By default is off. - # List of root certificates for client verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL server authentication. - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Server Certificate Key, - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections. - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites. - #ssl.curve_types: [] - - # Configure what types of client authentication are supported. Valid options - # are `none`, `optional`, and `required`. When `certificate_authorities` is set it will - # default to `required` otherwise it will be set to `none`. - #ssl.client_authentication: "required" - - -#------------------------------ Kafka input -------------------------------- -# Accept events from topics in a Kafka cluster. -#- type: kafka - #enabled: false - - # A list of hosts/ports for the initial Kafka brokers. - #hosts: - #- kafka-broker-1:9092 - #- kafka-broker-2:9092 - - # A list of topics to read from. - #topics: ["my-topic", "important-logs"] - - # The Kafka consumer group id to use when connecting. - #group_id: "filebeat" - - # An optional Kafka client id to attach to Kafka requests. - #client_id: "my-client" - - # The version of the Kafka protocol to use. - #version: 1.0 - - # Set to "newest" to start reading from the most recent message when connecting to a - # new topic, otherwise the input will begin reading at the oldest remaining event. - #initial_offset: oldest - - # How long to wait before trying to reconnect to the kafka cluster after a fatal error. - #connect_backoff: 30s - - # How long to wait before retrying a failed read. - #consume_backoff: 2s - - # How long to wait for the minimum number of input bytes while reading. - #max_wait_time: 250ms - - # The Kafka isolation level, "read_uncommitted" or "read_committed". - #isolation_level: read_uncommitted - - # Some Kafka deployments such as Microsoft Azure can return multiple events packed into a - # single data field. Set this field to specify where events should be unpacked from. - #expand_event_list_from_field: "records" - - # The minimum number of bytes to wait for. - #fetch.min: 1 - - # The default number of bytes to read per request. - #fetch.default: 1MB - - # The maximum number of bytes to read per request (0 for no limit). - #fetch.max: 0 - - # Consumer rebalance strategy, "range" or "roundrobin" - #rebalance.strategy: "range" - - # How long to wait for an attempted rebalance. - #rebalance.timeout: 60s - - # How many times to retry if rebalancing fails. - #rebalance.max_retries: 4 - - # How long to wait after an unsuccessful rebalance attempt. - #rebalance.retry_backoff: 2s - - # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. - # Defaults to PLAIN when `username` and `password` are configured. - #sasl.mechanism: '' - - # Parsers can be used with the Kafka input. The available parsers are "ndjson" and - # "multiline". See the filestream input configuration for more details. - #parsers: - #- ndjson: - # ... - #- multiline: - # ... - - -#------------------------------ Syslog input -------------------------------- -# Accept RFC3164 formatted syslog event via UDP. -#- type: syslog - #enabled: false - #format: rfc3164 - #protocol.udp: - # The host and port to receive the new event - #host: "localhost:9000" - - # Maximum size of the message received over UDP - #max_message_size: 10KiB - -# Accept RFC5424 formatted syslog event via TCP. -#- type: syslog - #enabled: false - #format: rfc5424 - - #protocol.tcp: - # The host and port to receive the new event - #host: "localhost:9000" - - # Character used to split new message - #line_delimiter: "\n" - - # Maximum size in bytes of the message received over TCP - #max_message_size: 20MiB - - # The number of seconds of inactivity before a remote connection is closed. - #timeout: 300s - - # Use SSL settings for TCP. - #ssl.enabled: true - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # SSL configuration. By default is off. - # List of root certificates for client verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL server authentication. - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Server Certificate Key, - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections. - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites. - #ssl.curve_types: [] - - # Configure what types of client authentication are supported. Valid options - # are `none`, `optional`, and `required`. When `certificate_authorities` is set it will - # default to `required` otherwise it will be set to `none`. - #ssl.client_authentication: "required" - -#------------------------------ Container input -------------------------------- -#- type: container - #enabled: false - - # Paths for container logs that should be crawled and fetched. - #paths: - # -/var/lib/docker/containers/*/*.log - - # Configure stream to filter to a specific stream: stdout, stderr or all (default) - #stream: all - -#------------------------------ Journald input -------------------------------- -# Journald input is experimental. -#- type: journald - #enabled: true - #id: service-foo - - # You may wish to have separate inputs for each service. You can use - # include_matches.or to specify a list of filter expressions that are - # applied as a logical OR. You may specify filter - #include_matches.match: - #- _SYSTEMD_UNIT=foo.service - - # List of syslog identifiers - #syslog_identifiers: ["audit"] - - # Collect events from the service and messages about the service, - # including coredumps. - #units: ["docker.service"] - - # The list of transports (_TRANSPORT field of journald entries) - #transports: ["audit"] - - # Parsers are also supported, here is an example of the multiline - # parser. - #parsers: - #- multiline: - #type: count - #count_lines: 3 - -#------------------------------ NetFlow input -------------------------------- -# Experimental: Config options for the Netflow/IPFIX collector over UDP input -#- type: netflow - #enabled: false - - # Address where the NetFlow Collector will bind - #host: ":2055" - - # Maximum size of the message received over UDP - #max_message_size: 10KiB - - # List of enabled protocols. - # Valid values are 'v1', 'v5', 'v6', 'v7', 'v8', 'v9' and 'ipfix' - #protocols: [ v5, v9, ipfix ] - - # Expiration timeout - # This is the time before an idle session or unused template is expired. - # Only applicable to v9 and ipfix protocols. A value of zero disables expiration. - #expiration_timeout: 30m - - # Share Templates - # This option allows v9 and ipfix templates to be shared within a session without - # reference to the origin of the template. - # - # Setting this to true is not recommended as it can result in the wrong template - # being applied under certain conditions, but it may be required for some systems. - #share_templates: false - - # Queue size limits the number of netflow packets that are queued awaiting - # processing. - #queue_size: 8192 - - # Custom field definitions for NetFlow V9 / IPFIX. - # List of files with YAML fields definition. - #custom_definitions: - #- path/to/ipfix.yaml - #- path/to/netflow.yaml - -#---------------------------- Google Cloud Pub/Sub Input ----------------------- -# Input for reading messages from a Google Cloud Pub/Sub topic subscription. -- type: gcp-pubsub - enabled: false - - # Google Cloud project ID. Required. - project_id: my-gcp-project-id - - # Google Cloud Pub/Sub topic name. Required. - topic: my-gcp-pubsub-topic-name - - # Google Cloud Pub/Sub topic subscription name. Required. - subscription.name: my-gcp-pubsub-subscription-name - - # Create subscription if it does not exist. - #subscription.create: true - - # Number of goroutines to create to read from the subscription. - #subscription.num_goroutines: 1 - - # Maximum number of unprocessed messages to allow at any time. - # This must be at least queue.mem.flush.min_events to prevent input blockage. - #subscription.max_outstanding_messages: 1600 - - # Path to a JSON file containing the credentials and key used to subscribe. - credentials_file: ${path.config}/my-pubsub-subscriber-credentials.json - -#------------------------------ AWS S3 input -------------------------------- -# Beta: Config options for AWS S3 input -#- type: aws-s3 - #enabled: false - - # AWS Credentials - # If access_key_id and secret_access_key are configured, then use them to make api calls. - # If not, aws-s3 input will load default AWS config or load with given profile name. - #access_key_id: '${AWS_ACCESS_KEY_ID:""}' - #secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' - #session_token: '${AWS_SESSION_TOKEN:"”}' - #credential_profile_name: test-aws-s3-input - - # SQS queue URL to receive messages from (required). - #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-aws-s3-logs-queue" - - # Number of workers on S3 bucket or SQS queue - #number_of_workers: 5 - - # Maximum duration of an AWS API call (excluding S3 GetObject calls). - #api_timeout: 120s - - # Duration that received SQS messages are hidden from subsequent - # requests after being retrieved by a ReceiveMessage request. - #visibility_timeout: 300s - - # List of S3 object metadata keys to include in events. - #include_s3_metadata: [] - - # The max number of times an SQS message should be received (retried) before deleting it. - #sqs.max_receive_count: 5 - - # Maximum duration for which the SQS ReceiveMessage call waits for a message - # to arrive in the queue before returning. - #sqs.wait_time: 20s - - # Bucket ARN used for polling AWS S3 buckets - #bucket_arn: arn:aws:s3:::test-s3-bucket - - # Bucket Name used for polling non-AWS S3 buckets - #non_aws_bucket_name: test-s3-bucket - - # Configures the AWS S3 API to use path style instead of virtual host style (default) - #path_style: false - - # Overrides the `cloud.provider` field for non-AWS S3 buckets. See docs for auto recognized providers. - #provider: minio - - # Configures backing up processed files to another (or the same) bucket - #backup_to_bucket_arn: 'arn:aws:s3:::mybucket' - #non_aws_backup_to_bucket_name: 'mybucket' - - # Sets a prefix to prepend to object keys when backing up - #backup_to_bucket_prefix: 'backup/' - - # Controls deletion of objects after backing them up - #delete_after_backup: false - -#------------------------------ AWS CloudWatch input -------------------------------- -# Beta: Config options for AWS CloudWatch input -#- type: aws-cloudwatch - #enabled: false - - # AWS Credentials - # If access_key_id and secret_access_key are configured, then use them to make api calls. - # If not, aws-cloudwatch input will load default AWS config or load with given profile name. - #access_key_id: '${AWS_ACCESS_KEY_ID:""}' - #secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' - #session_token: '${AWS_SESSION_TOKEN:"”}' - #credential_profile_name: test-aws-s3-input - - # ARN of the log group to collect logs from - # This ARN could refer to a log group from a linked source account - # Note: This property precedes over `log_group_name` & `log_group_name_prefix` - #log_group_arn: "arn:aws:logs:us-east-1:428152502467:log-group:test:*" - - # Name of the log group to collect logs from. - # Note: region_name is required when log_group_name is given. - #log_group_name: test - - # The prefix for a group of log group names. - # You can include linked source accounts by using the property `include_linked_accounts_for_prefix_mode`. - # Note: `region_name` is required when `log_group_name_prefix` is given. - # `log_group_name` and `log_group_name_prefix` cannot be given at the same time. - #log_group_name_prefix: /aws/ - - # State whether to include linked source accounts when obtaining log groups matching the prefix provided through `log_group_name_prefix` - # This property works together with `log_group_name_prefix` and default value (if unset) is false - #include_linked_accounts_for_prefix_mode: true - - # Region that the specified log group or log group prefix belongs to. - #region_name: us-east-1 - - # A list of strings of log streams names that Filebeat collect log events from. - #log_streams: - # - log_stream_name - - # A string to filter the results to include only log events from log streams - # that have names starting with this prefix. - #log_stream_prefix: test - - # `start_position` allows user to specify if this input should read log files - # from the `beginning` or from the `end`. - # `beginning`: reads from the beginning of the log group (default). - # `end`: read only new messages from current time minus `scan_frequency` going forward. - #start_position: beginning - - # This config parameter sets how often Filebeat checks for new log events from the - # specified log group. Default `scan_frequency` is 1 minute, which means Filebeat - # will sleep for 1 minute before querying for new logs again. - #scan_frequency: 1m - - # The maximum duration of AWS API can take. If it exceeds the timeout, AWS API - # will be interrupted. - # The default AWS API timeout for a message is 120 seconds. - # The minimum is 0 seconds. - #api_timeout: 120s - - # This is used to sleep between AWS `FilterLogEvents` API calls inside the same - # collection period. - #api_sleep: 200ms - - # This is used to shift collection start time and end time back in order to - # collect logs when there is a delay in CloudWatch. - #latency: 1m - -#------------------------------ ETW input -------------------------------- -# Beta: Config options for ETW (Event Trace for Windows) input (Only available for Windows) -#- type: etw - #enabled: false - #id: etw-dnsserver - - # Path to an .etl file to read from. - #file: "C:\Windows\System32\Winevt\Logs\Logfile.etl" - - # GUID of an ETW provider. - # Run 'logman query providers' to list the available providers. - #provider.guid: {EB79061A-A566-4698-9119-3ED2807060E7} - - # Name of an ETW provider. - # Run 'logman query providers' to list the available providers. - #provider.name: Microsoft-Windows-DNSServer - - # Tag to identify created sessions. - # If missing, its default value is the provider ID prefixed by 'Elastic-'. - #session_name: DNSServer-Analytical-Trace - - # Filter collected events with a level value that is less than or equal to this level. - # Allowed values are critical, error, warning, informational, and verbose. - #trace_level: verbose - - # 8-byte bitmask that enables the filtering of events from specific provider subcomponents. - # The provider will write a particular event if the event's keyword bits match any of the bits - # in this bitmask. - # Run 'logman query providers ""' to list available keywords. - #match_any_keyword: 0x8000000000000000 - - # 8-byte bitmask that enables the filtering of events from - # specific provider subcomponents. The provider will write a particular - # event if the event's keyword bits match all of the bits in this bitmask. - # Run 'logman query providers ""' to list available keywords. - #match_all_keyword: 0 - - # An existing session to read from. - # Run 'logman query -ets' to list existing sessions. - #session: UAL_Usermode_Provider - -# =========================== Filebeat autodiscover ============================ - -# Autodiscover allows you to detect changes in the system and spawn new modules -# or inputs as they happen. - -#filebeat.autodiscover: - # List of enabled autodiscover providers -# providers: -# - type: docker -# templates: -# - condition: -# equals.docker.container.image: busybox -# config: -# - type: container -# paths: -# - /var/log/containers/*.log - -#Example: for kubernetes container logs autodiscovery -# filebeat.autodiscover: -# providers: -# - type: kubernetes -# node: ${NODE_NAME} -# hints.enabled: true -# # By default requests to kubeadm config map are made in order to enrich cluster name by requesting /api/v1/namespaces/kube-system/configmaps/kubeadm-config API endpoint. -# use_kubeadm: true -# hints.default_config: -# type: filestream -# id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} -# paths: -# - /var/log/containers/*-${data.kubernetes.container.id}.log -# parsers: -# - container: ~ -# prospector: -# scanner: -# fingerprint.enabled: true -# symlinks: true -# file_identity.fingerprint: ~ - -#By default requests to kubeadm config map are made in order to enrich cluster name by requesting /api/v1/namespaces/kube-system/configmaps/kubeadm-config API endpoint. -# use_kubeadm: true - -# ========================== Filebeat global options =========================== - -# Registry data path. If a relative path is used, it is considered relative to the -# data path. -#filebeat.registry.path: ${path.data}/registry - -# The permissions mask to apply on registry data and meta files. The default -# value is 0600. Must be a valid Unix-style file permissions mask expressed in -# octal notation. This option is not supported on Windows. -#filebeat.registry.file_permissions: 0600 - -# The timeout value that controls when registry entries are written to the disk -# (flushed). When an unwritten update exceeds this value, it triggers a write -# to disk. When flush is set to 0s, the registry is written to disk after each -# batch of events has been published successfully. The default value is 1s. -#filebeat.registry.flush: 1s - - -# Starting with Filebeat 7.0, the registry uses a new directory format to store -# Filebeat state. After you upgrade, Filebeat will automatically migrate a 6.x -# registry file to use the new directory format. If you changed -# filebeat.registry.path while upgrading, set filebeat.registry.migrate_file to -# point to the old registry file. -#filebeat.registry.migrate_file: ${path.data}/registry - -# By default Ingest pipelines are not updated if a pipeline with the same ID -# already exists. If this option is enabled Filebeat overwrites pipelines -# every time a new Elasticsearch connection is established. -#filebeat.overwrite_pipelines: false - -# How long filebeat waits on shutdown for the publisher to finish. -# Default is 0, not waiting. -#filebeat.shutdown_timeout: 0 - -# Enable filebeat config reloading -#filebeat.config: - #inputs: - #enabled: false - #path: inputs.d/*.yml - #reload.enabled: true - #reload.period: 10s - #modules: - #enabled: true - #path: modules.d/*.yml - #reload.enabled: true - #reload.period: 10s - - -# ================================== General =================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this option is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a field -# sub-dictionary. Default is false. -#fields_under_root: false - -# Configure the precision of all timestamps in Filebeat. -# Available options: millisecond, microsecond, nanosecond -#timestamp.precision: millisecond - -# Internal queue configuration for buffering events to be published. -# Queue settings may be overridden by performance presets in the -# Elasticsearch output. To configure them manually use "preset: custom". -#queue: - # Queue type by name (default 'mem') - # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to serve - # another batch of events. - #mem: - # Max number of events the queue can buffer. - #events: 3200 - - # Hints the minimum number of events stored in the queue, - # before providing a batch of events to the outputs. - # The default value is set to 2048. - # A value of 0 ensures events are immediately available - # to be sent to the outputs. - #flush.min_events: 1600 - - # Maximum duration after which events are available to the outputs, - # if the number of events stored in the queue is < `flush.min_events`. - #flush.timeout: 10s - - # The disk queue stores incoming events on disk until the output is - # ready for them. This allows a higher event limit than the memory-only - # queue and lets pending events persist through a restart. - #disk: - # The directory path to store the queue's data. - #path: "${path.data}/diskqueue" - - # The maximum space the queue should occupy on disk. Depending on - # input settings, events that exceed this limit are delayed or discarded. - #max_size: 10GB - - # The maximum size of a single queue data file. Data in the queue is - # stored in smaller segments that are deleted after all their events - # have been processed. - #segment_size: 1GB - - # The number of events to read from disk to memory while waiting for - # the output to request them. - #read_ahead: 512 - - # The number of events to accept from inputs while waiting for them - # to be written to disk. If event data arrives faster than it - # can be written to disk, this setting prevents it from overflowing - # main memory. - #write_ahead: 2048 - - # The duration to wait before retrying when the queue encounters a disk - # write error. - #retry_interval: 1s - - # The maximum length of time to wait before retrying on a disk write - # error. If the queue encounters repeated errors, it will double the - # length of its retry interval each time, up to this maximum. - #max_retry_interval: 30s - -# Sets the maximum number of CPUs that can be executed simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -# ================================= Processors ================================= - -# Processors are used to reduce the number of fields in the exported event or to -# enhance the event with external metadata. This section defines a list of -# processors that are applied one by one and the first one receives the initial -# event: -# -# event -> filter1 -> event1 -> filter2 ->event2 ... -# -# The supported processors are drop_fields, drop_event, include_fields, -# decode_json_fields, and add_cloud_metadata. -# -# For example, you can use the following processors to keep the fields that -# contain CPU load percentages, but remove the fields that contain CPU ticks -# values: -# -#processors: -# - include_fields: -# fields: ["cpu"] -# - drop_fields: -# fields: ["cpu.user", "cpu.system"] -# -# The following example drops the events that have the HTTP response code 200: -# -#processors: -# - drop_event: -# when: -# equals: -# http.code: 200 -# -# The following example renames the field a to b: -# -#processors: -# - rename: -# fields: -# - from: "a" -# to: "b" -# -# The following example tokenizes the string into fields: -# -#processors: -# - dissect: -# tokenizer: "%{key1} - %{key2}" -# field: "message" -# target_prefix: "dissect" -# -# The following example enriches each event with metadata from the cloud -# provider about the host machine. It works on EC2, GCE, DigitalOcean, -# Tencent Cloud, and Alibaba Cloud. -# -#processors: -# - add_cloud_metadata: ~ -# -# The following example enriches each event with the machine's local time zone -# offset from UTC. -# -#processors: -# - add_locale: -# format: offset -# -# The following example enriches each event with docker metadata, it matches -# given fields to an existing container id and adds info from that container: -# -#processors: -# - add_docker_metadata: -# host: "unix:///var/run/docker.sock" -# match_fields: ["system.process.cgroup.id"] -# match_pids: ["process.pid", "process.parent.pid"] -# match_source: true -# match_source_index: 4 -# match_short_id: false -# cleanup_timeout: 60 -# labels.dedot: false -# # To connect to Docker over TLS you must specify a client and CA certificate. -# #ssl: -# # certificate_authority: "/etc/pki/root/ca.pem" -# # certificate: "/etc/pki/client/cert.pem" -# # key: "/etc/pki/client/cert.key" -# -# The following example enriches each event with docker metadata, it matches -# container id from log path available in `source` field (by default it expects -# it to be /var/lib/docker/containers/*/*.log). -# -#processors: -# - add_docker_metadata: ~ -# -# The following example enriches each event with host metadata. -# -#processors: -# - add_host_metadata: ~ -# -# The following example enriches each event with process metadata using -# process IDs included in the event. -# -#processors: -# - add_process_metadata: -# match_pids: ["system.process.ppid"] -# target: system.process.parent -# -# The following example decodes fields containing JSON strings -# and replaces the strings with valid JSON objects. -# -#processors: -# - decode_json_fields: -# fields: ["field1", "field2", ...] -# process_array: false -# max_depth: 1 -# target: "" -# overwrite_keys: false -# -#processors: -# - decompress_gzip_field: -# from: "field1" -# to: "field2" -# ignore_missing: false -# fail_on_error: true -# -# The following example copies the value of the message to message_copied -# -#processors: -# - copy_fields: -# fields: -# - from: message -# to: message_copied -# fail_on_error: true -# ignore_missing: false -# -# The following example truncates the value of the message to 1024 bytes -# -#processors: -# - truncate_fields: -# fields: -# - message -# max_bytes: 1024 -# fail_on_error: false -# ignore_missing: true -# -# The following example preserves the raw message under event.original -# -#processors: -# - copy_fields: -# fields: -# - from: message -# to: event.original -# fail_on_error: false -# ignore_missing: true -# - truncate_fields: -# fields: -# - event.original -# max_bytes: 1024 -# fail_on_error: false -# ignore_missing: true -# -# The following example URL-decodes the value of field1 to field2 -# -#processors: -# - urldecode: -# fields: -# - from: "field1" -# to: "field2" -# ignore_missing: false -# fail_on_error: true - -# =============================== Elastic Cloud ================================ - -# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/). - -# The cloud.id setting overwrites the `output.elasticsearch.hosts` and -# `setup.kibana.host` options. -# You can find the `cloud.id` in the Elastic Cloud web UI. -#cloud.id: - -# The cloud.auth setting overwrites the `output.elasticsearch.username` and -# `output.elasticsearch.password` settings. The format is `:`. -#cloud.auth: - -# ================================== Outputs =================================== - -# Configure what output to use when sending the data collected by the beat. - -# ---------------------------- Elasticsearch Output ---------------------------- -output.elasticsearch: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 - hosts: ["localhost:9200"] - - # Performance presets configure other output fields to recommended values - # based on a performance priority. - # Options are "balanced", "throughput", "scale", "latency" and "custom". - # Default if unspecified: "custom" - preset: balanced - - # Set gzip compression level. Set to 0 to disable compression. - # This field may conflict with performance presets. To set it - # manually use "preset: custom". - # The default is 1. - #compression_level: 1 - - # Configure escaping HTML symbols in strings. - #escape_html: false - - # Protocol - either `http` (default) or `https`. - #protocol: "https" - - # Authentication credentials - either API key or username/password. - #api_key: "id:api_key" - #username: "elastic" - #password: "changeme" - - # Dictionary of HTTP parameters to pass within the URL with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Number of workers per Elasticsearch host. - # This field may conflict with performance presets. To set it - # manually use "preset: custom". - #worker: 1 - - # If set to true and multiple hosts are configured, the output plugin load - # balances published events onto all Elasticsearch hosts. If set to false, - # the output plugin sends all events to only one host (determined at random) - # and will switch to another host if the currently selected one becomes - # unreachable. The default value is true. - #loadbalance: true - - # Optional data stream or index name. The default is "filebeat-%{[agent.version]}". - # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. - #index: "filebeat-%{[agent.version]}" - - # Optional ingest pipeline. By default, no pipeline will be used. - #pipeline: "" - - # Optional HTTP path - #path: "/elasticsearch" - - # Custom HTTP headers to add to each request - #headers: - # X-My-Header: Contents of the header - - # Proxy server URL - #proxy_url: http://proxy:3128 - - # Whether to disable proxy settings for outgoing connections. If true, this - # takes precedence over both the proxy_url field and any environment settings - # (HTTP_PROXY, HTTPS_PROXY). The default is false. - #proxy_disable: false - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # This field may conflict with performance presets. To set it - # manually use "preset: custom". - # The default is 1600. - #bulk_max_size: 1600 - - # The number of seconds to wait before trying to reconnect to Elasticsearch - # after a network error. After waiting backoff.init seconds, the Beat - # tries to reconnect. If the attempt fails, the backoff timer is increased - # exponentially up to backoff.max. After a successful connection, the backoff - # timer is reset. The default is 1s. - #backoff.init: 1s - - # The maximum number of seconds to wait before attempting to connect to - # Elasticsearch after a network error. The default is 60s. - #backoff.max: 60s - - # The maximum amount of time an idle connection will remain idle - # before closing itself. Zero means use the default of 60s. The - # format is a Go language duration (example 60s is 60 seconds). - # This field may conflict with performance presets. To set it - # manually use "preset: custom". - # The default is 3s. - # idle_connection_timeout: 3s - - # Configure HTTP request timeout before failing a request to Elasticsearch. - #timeout: 90 - - # Prevents filebeat from connecting to older Elasticsearch versions when set to `false` - #allow_older_versions: true - - # Use SSL settings for HTTPS. - #ssl.enabled: true - - # Controls the verification of certificates. Valid values are: - # * full, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. - # * strict, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. If the Subject Alternative - # Name is empty, it returns an error. - # * certificate, which verifies that the provided certificate is signed by a - # trusted authority (CA), but does not perform any hostname verification. - # * none, which performs no verification of the server's certificate. This - # mode disables many of the security benefits of SSL/TLS and should only be used - # after very careful consideration. It is primarily intended as a temporary - # diagnostic mechanism when attempting to resolve TLS errors; its use in - # production environments is strongly discouraged. - # The default value is full. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions from 1.1 - # up to 1.3 are enabled. - #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client certificate key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the certificate key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE-based cipher suites - #ssl.curve_types: [] - - # Configure what types of renegotiation are supported. Valid options are - # never, once, and freely. Default is never. - #ssl.renegotiation: never - - # Configure a pin that can be used to do extra validation of the verified certificate chain, - # this allow you to ensure that a specific certificate is used to validate the chain of trust. - # - # The pin is a base64 encoded string of the SHA-256 fingerprint. - #ssl.ca_sha256: "" - - # A root CA HEX encoded fingerprint. During the SSL handshake if the - # fingerprint matches the root CA certificate, it will be added to - # the provided list of root CAs (`certificate_authorities`), if the - # list is empty or not defined, the matching certificate will be the - # only one in the list. Then the normal SSL validation happens. - #ssl.ca_trusted_fingerprint: "" - - - # Enables restarting filebeat if any file listed by `key`, - # `certificate`, or `certificate_authorities` is modified. - # This feature IS NOT supported on Windows. - #ssl.restart_on_cert_change.enabled: false - - # Period to scan for changes on CA certificate files - #ssl.restart_on_cert_change.period: 1m - - # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. - #kerberos.enabled: true - - # Authentication type to use with Kerberos. Available options: keytab, password. - #kerberos.auth_type: password - - # Path to the keytab file. It is used when auth_type is set to keytab. - #kerberos.keytab: /etc/elastic.keytab - - # Path to the Kerberos configuration. - #kerberos.config_path: /etc/krb5.conf - - # Name of the Kerberos user. - #kerberos.username: elastic - - # Password of the Kerberos user. It is used when auth_type is set to password. - #kerberos.password: changeme - - # Kerberos realm. - #kerberos.realm: ELASTIC - - -# ------------------------------ Logstash Output ------------------------------- -#output.logstash: - # Boolean flag to enable or disable the output module. - #enabled: true - - # The Logstash hosts - #hosts: ["localhost:5044"] - - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Configure escaping HTML symbols in strings. - #escape_html: false - - # Optional maximum time to live for a connection to Logstash, after which the - # connection will be re-established. A value of `0s` (the default) will - # disable this feature. - # - # Not yet supported for async connections (i.e. with the "pipelining" option set) - #ttl: 30s - - # Optionally load-balance events between Logstash hosts. Default is false. - #loadbalance: false - - # Number of batches to be sent asynchronously to Logstash while processing - # new batches. - #pipelining: 2 - - # If enabled only a subset of events in a batch of events is transferred per - # transaction. The number of events to be sent increases up to `bulk_max_size` - # if no error is encountered. - #slow_start: false - - # The number of seconds to wait before trying to reconnect to Logstash - # after a network error. After waiting backoff.init seconds, the Beat - # tries to reconnect. If the attempt fails, the backoff timer is increased - # exponentially up to backoff.max. After a successful connection, the backoff - # timer is reset. The default is 1s. - #backoff.init: 1s - - # The maximum number of seconds to wait before attempting to connect to - # Logstash after a network error. The default is 60s. - #backoff.max: 60s - - # Optional index name. The default index name is set to filebeat - # in all lowercase. - #index: 'filebeat' - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - - # Use SSL settings for HTTPS. - #ssl.enabled: true - - # Controls the verification of certificates. Valid values are: - # * full, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. - # * strict, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. If the Subject Alternative - # Name is empty, it returns an error. - # * certificate, which verifies that the provided certificate is signed by a - # trusted authority (CA), but does not perform any hostname verification. - # * none, which performs no verification of the server's certificate. This - # mode disables many of the security benefits of SSL/TLS and should only be used - # after very careful consideration. It is primarily intended as a temporary - # diagnostic mechanism when attempting to resolve TLS errors; its use in - # production environments is strongly discouraged. - # The default value is full. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions from 1.1 - # up to 1.3 are enabled. - #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client certificate key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the certificate key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE-based cipher suites - #ssl.curve_types: [] - - # Configure what types of renegotiation are supported. Valid options are - # never, once, and freely. Default is never. - #ssl.renegotiation: never - - # Configure a pin that can be used to do extra validation of the verified certificate chain, - # this allow you to ensure that a specific certificate is used to validate the chain of trust. - # - # The pin is a base64 encoded string of the SHA-256 fingerprint. - #ssl.ca_sha256: "" - - # A root CA HEX encoded fingerprint. During the SSL handshake if the - # fingerprint matches the root CA certificate, it will be added to - # the provided list of root CAs (`certificate_authorities`), if the - # list is empty or not defined, the matching certificate will be the - # only one in the list. Then the normal SSL validation happens. - #ssl.ca_trusted_fingerprint: "" - - # Enables restarting filebeat if any file listed by `key`, - # `certificate`, or `certificate_authorities` is modified. - # This feature IS NOT supported on Windows. - #ssl.restart_on_cert_change.enabled: false - - # Period to scan for changes on CA certificate files - #ssl.restart_on_cert_change.period: 1m - - # The number of times to retry publishing an event after a publishing failure. - # After the specified number of retries, the events are typically dropped. - # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting - # and retry until all events are published. Set max_retries to a value less - # than 0 to retry until all events are published. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Logstash request. The - # default is 2048. - #bulk_max_size: 2048 - - # The number of seconds to wait for responses from the Logstash server before - # timing out. The default is 30s. - #timeout: 30s - -# -------------------------------- Kafka Output -------------------------------- -#output.kafka: - # Boolean flag to enable or disable the output module. - #enabled: true - - # The list of Kafka broker addresses from which to fetch the cluster metadata. - # The cluster metadata contain the actual Kafka brokers events are published - # to. - #hosts: ["localhost:9092"] - - # The Kafka topic used for produced events. The setting can be a format string - # using any event field. To set the topic from document type use `%{[type]}`. - #topic: beats - - # The Kafka event key setting. Use format string to create a unique event key. - # By default no event key will be generated. - #key: '' - - # The Kafka event partitioning strategy. Default hashing strategy is `hash` - # using the `output.kafka.key` setting or randomly distributes events if - # `output.kafka.key` is not configured. - #partition.hash: - # If enabled, events will only be published to partitions with reachable - # leaders. Default is false. - #reachable_only: false - - # Configure alternative event field names used to compute the hash value. - # If empty `output.kafka.key` setting will be used. - # Default value is empty list. - #hash: [] - - # Authentication details. Password is required if username is set. - #username: '' - #password: '' - - # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. - # Defaults to PLAIN when `username` and `password` are configured. - #sasl.mechanism: '' - - # Kafka version Filebeat is assumed to run against. Defaults to the "1.0.0". - #version: '1.0.0' - - # Configure JSON encoding - #codec.json: - # Pretty-print JSON event - #pretty: false - - # Configure escaping HTML symbols in strings. - #escape_html: false - - # Metadata update configuration. Metadata contains leader information - # used to decide which broker to use when publishing. - #metadata: - # Max metadata request retry attempts when cluster is in middle of leader - # election. Defaults to 3 retries. - #retry.max: 3 - - # Wait time between retries during leader elections. Default is 250ms. - #retry.backoff: 250ms - - # Refresh metadata interval. Defaults to every 10 minutes. - #refresh_frequency: 10m - - # Strategy for fetching the topics metadata from the broker. Default is false. - #full: false - - # The number of times to retry publishing an event after a publishing failure. - # After the specified number of retries, events are typically dropped. - # Some Beats, such as Filebeat, ignore the max_retries setting and retry until - # all events are published. Set max_retries to a value less than 0 to retry - # until all events are published. The default is 3. - #max_retries: 3 - - # The number of seconds to wait before trying to republish to Kafka - # after a network error. After waiting backoff.init seconds, the Beat - # tries to republish. If the attempt fails, the backoff timer is increased - # exponentially up to backoff.max. After a successful publish, the backoff - # timer is reset. The default is 1s. - #backoff.init: 1s - - # The maximum number of seconds to wait before attempting to republish to - # Kafka after a network error. The default is 60s. - #backoff.max: 60s - - # The maximum number of events to bulk in a single Kafka request. The default - # is 2048. - #bulk_max_size: 2048 - - # Duration to wait before sending bulk Kafka request. 0 is no delay. The default - # is 0. - #bulk_flush_frequency: 0s - - # The number of seconds to wait for responses from the Kafka brokers before - # timing out. The default is 30s. - #timeout: 30s - - # The maximum duration a broker will wait for number of required ACKs. The - # default is 10s. - #broker_timeout: 10s - - # The number of messages buffered for each Kafka broker. The default is 256. - #channel_buffer_size: 256 - - # The keep-alive period for an active network connection. If 0s, keep-alives - # are disabled. The default is 0 seconds. - #keep_alive: 0 - - # Sets the output compression codec. Must be one of none, snappy and gzip. The - # default is gzip. - #compression: gzip - - # Set the compression level. Currently only gzip provides a compression level - # between 0 and 9. The default value is chosen by the compression algorithm. - #compression_level: 4 - - # The maximum permitted size of JSON-encoded messages. Bigger messages will be - # dropped. The default value is 1000000 (bytes). This value should be equal to - # or less than the broker's message.max.bytes. - #max_message_bytes: 1000000 - - # The ACK reliability level required from broker. 0=no response, 1=wait for - # local commit, -1=wait for all replicas to commit. The default is 1. Note: - # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently - # on error. - #required_acks: 1 - - # The configurable ClientID used for logging, debugging, and auditing - # purposes. The default is "beats". - #client_id: beats - - # Use SSL settings for HTTPS. - #ssl.enabled: true - - # Controls the verification of certificates. Valid values are: - # * full, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. - # * strict, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. If the Subject Alternative - # Name is empty, it returns an error. - # * certificate, which verifies that the provided certificate is signed by a - # trusted authority (CA), but does not perform any hostname verification. - # * none, which performs no verification of the server's certificate. This - # mode disables many of the security benefits of SSL/TLS and should only be used - # after very careful consideration. It is primarily intended as a temporary - # diagnostic mechanism when attempting to resolve TLS errors; its use in - # production environments is strongly discouraged. - # The default value is full. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions from 1.1 - # up to 1.3 are enabled. - #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client certificate key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the certificate key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE-based cipher suites - #ssl.curve_types: [] - - # Configure what types of renegotiation are supported. Valid options are - # never, once, and freely. Default is never. - #ssl.renegotiation: never - - # Configure a pin that can be used to do extra validation of the verified certificate chain, - # this allow you to ensure that a specific certificate is used to validate the chain of trust. - # - # The pin is a base64 encoded string of the SHA-256 fingerprint. - #ssl.ca_sha256: "" - - # A root CA HEX encoded fingerprint. During the SSL handshake if the - # fingerprint matches the root CA certificate, it will be added to - # the provided list of root CAs (`certificate_authorities`), if the - # list is empty or not defined, the matching certificate will be the - # only one in the list. Then the normal SSL validation happens. - #ssl.ca_trusted_fingerprint: "" - - # Enables restarting filebeat if any file listed by `key`, - # `certificate`, or `certificate_authorities` is modified. - # This feature IS NOT supported on Windows. - #ssl.restart_on_cert_change.enabled: false - - # Period to scan for changes on CA certificate files - #ssl.restart_on_cert_change.period: 1m - - # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. - #kerberos.enabled: true - - # Authentication type to use with Kerberos. Available options: keytab, password. - #kerberos.auth_type: password - - # Path to the keytab file. It is used when auth_type is set to keytab. - #kerberos.keytab: /etc/security/keytabs/kafka.keytab - - # Path to the Kerberos configuration. - #kerberos.config_path: /etc/krb5.conf - - # The service name. Service principal name is contructed from - # service_name/hostname@realm. - #kerberos.service_name: kafka - - # Name of the Kerberos user. - #kerberos.username: elastic - - # Password of the Kerberos user. It is used when auth_type is set to password. - #kerberos.password: changeme - - # Kerberos realm. - #kerberos.realm: ELASTIC - - # Enables Kerberos FAST authentication. This may - # conflict with certain Active Directory configurations. - #kerberos.enable_krb5_fast: false - -# -------------------------------- Redis Output -------------------------------- -#output.redis: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Configure JSON encoding - #codec.json: - # Pretty print json event - #pretty: false - - # Configure escaping HTML symbols in strings. - #escape_html: false - - # The list of Redis servers to connect to. If load-balancing is enabled, the - # events are distributed to the servers in the list. If one server becomes - # unreachable, the events are distributed to the reachable servers only. - # The hosts setting supports redis and rediss urls with custom password like - # redis://:password@localhost:6379. - #hosts: ["localhost:6379"] - - # The name of the Redis list or channel the events are published to. The - # default is filebeat. - #key: filebeat - - # The password to authenticate to Redis with. The default is no authentication. - #password: - - # The Redis database number where the events are published. The default is 0. - #db: 0 - - # The Redis data type to use for publishing events. If the data type is list, - # the Redis RPUSH command is used. If the data type is channel, the Redis - # PUBLISH command is used. The default value is list. - #datatype: list - - # The number of workers to use for each host configured to publish events to - # Redis. Use this setting along with the loadbalance option. For example, if - # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each - # host). - #worker: 1 - - # If set to true and multiple hosts or workers are configured, the output - # plugin load balances published events onto all Redis hosts. If set to false, - # the output plugin sends all events to only one host (determined at random) - # and will switch to another host if the currently selected one becomes - # unreachable. The default value is true. - #loadbalance: true - - # The Redis connection timeout in seconds. The default is 5 seconds. - #timeout: 5s - - # The number of times to retry publishing an event after a publishing failure. - # After the specified number of retries, the events are typically dropped. - # Some Beats, such as Filebeat, ignore the max_retries setting and retry until - # all events are published. Set max_retries to a value less than 0 to retry - # until all events are published. The default is 3. - #max_retries: 3 - - # The number of seconds to wait before trying to reconnect to Redis - # after a network error. After waiting backoff.init seconds, the Beat - # tries to reconnect. If the attempt fails, the backoff timer is increased - # exponentially up to backoff.max. After a successful connection, the backoff - # timer is reset. The default is 1s. - #backoff.init: 1s - - # The maximum number of seconds to wait before attempting to connect to - # Redis after a network error. The default is 60s. - #backoff.max: 60s - - # The maximum number of events to bulk in a single Redis request or pipeline. - # The default is 2048. - #bulk_max_size: 2048 - - # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The - # value must be a URL with a scheme of socks5://. - #proxy_url: - - # This option determines whether Redis hostnames are resolved locally when - # using a proxy. The default value is false, which means that name resolution - # occurs on the proxy server. - #proxy_use_local_resolver: false - - # Use SSL settings for HTTPS. - #ssl.enabled: true - - # Controls the verification of certificates. Valid values are: - # * full, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. - # * strict, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. If the Subject Alternative - # Name is empty, it returns an error. - # * certificate, which verifies that the provided certificate is signed by a - # trusted authority (CA), but does not perform any hostname verification. - # * none, which performs no verification of the server's certificate. This - # mode disables many of the security benefits of SSL/TLS and should only be used - # after very careful consideration. It is primarily intended as a temporary - # diagnostic mechanism when attempting to resolve TLS errors; its use in - # production environments is strongly discouraged. - # The default value is full. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions from 1.1 - # up to 1.3 are enabled. - #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client certificate key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the certificate key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE-based cipher suites - #ssl.curve_types: [] - - # Configure what types of renegotiation are supported. Valid options are - # never, once, and freely. Default is never. - #ssl.renegotiation: never - - # Configure a pin that can be used to do extra validation of the verified certificate chain, - # this allow you to ensure that a specific certificate is used to validate the chain of trust. - # - # The pin is a base64 encoded string of the SHA-256 fingerprint. - #ssl.ca_sha256: "" - - # A root CA HEX encoded fingerprint. During the SSL handshake if the - # fingerprint matches the root CA certificate, it will be added to - # the provided list of root CAs (`certificate_authorities`), if the - # list is empty or not defined, the matching certificate will be the - # only one in the list. Then the normal SSL validation happens. - #ssl.ca_trusted_fingerprint: "" - - -# -------------------------------- File Output --------------------------------- -#output.file: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Configure JSON encoding - #codec.json: - # Pretty-print JSON event - #pretty: false - - # Configure escaping HTML symbols in strings. - #escape_html: false - - # Path to the directory where to save the generated files. The option is - # mandatory. - #path: "/tmp/filebeat" - - # Name of the generated files. The default is `filebeat` and it generates - # files: `filebeat-{datetime}.ndjson`, `filebeat-{datetime}-1.ndjson`, etc. - #filename: filebeat - - # Maximum size in kilobytes of each file. When this size is reached, and on - # every Filebeat restart, the files are rotated. The default value is 10240 - # kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, - # the oldest file is deleted and the rest are shifted from last to first. The - # default is 7 files. - #number_of_files: 7 - - # Permissions to use for file creation. The default is 0600. - #permissions: 0600 - - # Configure automatic file rotation on every startup. The default is true. - #rotate_on_startup: true - -# ------------------------------- Console Output ------------------------------- -#output.console: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Configure JSON encoding - #codec.json: - # Pretty-print JSON event - #pretty: false - - # Configure escaping HTML symbols in strings. - #escape_html: false - -# =================================== Paths ==================================== - -# The home path for the Filebeat installation. This is the default base path -# for all other path settings and for miscellaneous files that come with the -# distribution (for example, the sample dashboards). -# If not set by a CLI flag or in the configuration file, the default for the -# home path is the location of the binary. -#path.home: - -# The configuration path for the Filebeat installation. This is the default -# base path for configuration files, including the main YAML configuration file -# and the Elasticsearch template file. If not set by a CLI flag or in the -# configuration file, the default for the configuration path is the home path. -#path.config: ${path.home} - -# The data path for the Filebeat installation. This is the default base path -# for all the files in which Filebeat needs to store its data. If not set by a -# CLI flag or in the configuration file, the default for the data path is a data -# subdirectory inside the home path. -#path.data: ${path.home}/data - -# The logs path for a Filebeat installation. This is the default location for -# the Beat's log files. If not set by a CLI flag or in the configuration file, -# the default for the logs path is a logs subdirectory inside the home path. -#path.logs: ${path.home}/logs - -# ================================== Keystore ================================== - -# Location of the Keystore containing the keys and their sensitive values. -#keystore.path: "${path.config}/beats.keystore" - -# ================================= Dashboards ================================= - -# These settings control loading the sample dashboards to the Kibana index. Loading -# the dashboards are disabled by default and can be enabled either by setting the -# options here or by using the `-setup` CLI flag or the `setup` command. -#setup.dashboards.enabled: false - -# The directory from where to read the dashboards. The default is the `kibana` -# folder in the home path. -#setup.dashboards.directory: ${path.home}/kibana - -# The URL from where to download the dashboard archive. It is used instead of -# the directory if it has a value. -#setup.dashboards.url: - -# The file archive (zip file) from where to read the dashboards. It is used instead -# of the directory when it has a value. -#setup.dashboards.file: - -# In case the archive contains the dashboards from multiple Beats, this lets you -# select which one to load. You can load all the dashboards in the archive by -# setting this to the empty string. -#setup.dashboards.beat: filebeat - -# The name of the Kibana index to use for setting the configuration. Default is ".kibana" -#setup.dashboards.kibana_index: .kibana - -# The Elasticsearch index name. This overwrites the index name defined in the -# dashboards and index pattern. Example: testbeat-* -#setup.dashboards.index: - -# Always use the Kibana API for loading the dashboards instead of autodetecting -# how to install the dashboards by first querying Elasticsearch. -#setup.dashboards.always_kibana: false - -# If true and Kibana is not reachable at the time when dashboards are loaded, -# it will retry to reconnect to Kibana instead of exiting with an error. -#setup.dashboards.retry.enabled: false - -# Duration interval between Kibana connection retries. -#setup.dashboards.retry.interval: 1s - -# Maximum number of retries before exiting with an error, 0 for unlimited retrying. -#setup.dashboards.retry.maximum: 0 - -# ================================== Template ================================== - -# A template is used to set the mapping in Elasticsearch -# By default template loading is enabled and the template is loaded. -# These settings can be adjusted to load your own template or overwrite existing ones. - -# Set to false to disable template loading. -#setup.template.enabled: true - -# Template name. By default the template name is "filebeat-%{[agent.version]}" -# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. -#setup.template.name: "filebeat-%{[agent.version]}" - -# Template pattern. By default the template pattern is "filebeat-%{[agent.version]}" to apply to the default index settings. -# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. -#setup.template.pattern: "filebeat-%{[agent.version]}" - -# Path to fields.yml file to generate the template -#setup.template.fields: "${path.config}/fields.yml" - -# A list of fields to be added to the template and Kibana index pattern. Also -# specify setup.template.overwrite: true to overwrite the existing template. -#setup.template.append_fields: -#- name: field_name -# type: field_type - -# Enable JSON template loading. If this is enabled, the fields.yml is ignored. -#setup.template.json.enabled: false - -# Path to the JSON template file -#setup.template.json.path: "${path.config}/template.json" - -# Name under which the template is stored in Elasticsearch -#setup.template.json.name: "" - -# Set this option if the JSON template is a data stream. -#setup.template.json.data_stream: false - -# Overwrite existing template -# Do not enable this option for more than one instance of filebeat as it might -# overload your Elasticsearch with too many update requests. -#setup.template.overwrite: false - -# Elasticsearch template settings -setup.template.settings: - - # A dictionary of settings to place into the settings.index dictionary - # of the Elasticsearch template. For more details, please check - # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html - #index: - #number_of_shards: 1 - #codec: best_compression - - # A dictionary of settings for the _source field. For more details, please check - # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html - #_source: - #enabled: false - -# ====================== Index Lifecycle Management (ILM) ====================== - -# Configure index lifecycle management (ILM) to manage the backing indices -# of your data streams. - -# Enable ILM support. Valid values are true, or false. -#setup.ilm.enabled: true - -# Set the lifecycle policy name. The default policy name is -# 'beatname'. -#setup.ilm.policy_name: "mypolicy" - -# The path to a JSON file that contains a lifecycle policy configuration. Used -# to load your own lifecycle policy. -#setup.ilm.policy_file: - -# Disable the check for an existing lifecycle policy. The default is true. -# If you set this option to false, lifecycle policy will not be installed, -# even if setup.ilm.overwrite is set to true. -#setup.ilm.check_exists: true - -# Overwrite the lifecycle policy at startup. The default is false. -#setup.ilm.overwrite: false - -# ======================== Data Stream Lifecycle (DSL) ========================= - -# Configure Data Stream Lifecycle to manage data streams while connected to Serverless elasticsearch. -# These settings are mutually exclusive with ILM settings which are not supported in Serverless projects. - -# Enable DSL support. Valid values are true, or false. -#setup.dsl.enabled: true - -# Set the lifecycle policy name or pattern. For DSL, this name must match the data stream that the lifecycle is for. -# The default data stream pattern is filebeat-%{[agent.version]}" -# The template string `%{[agent.version]}` will resolve to the current stack version. -# The other possible template value is `%{[beat.name]}`. -#setup.dsl.data_stream_pattern: "filebeat-%{[agent.version]}" - -# The path to a JSON file that contains a lifecycle policy configuration. Used -# to load your own lifecycle policy. -# If no custom policy is specified, a default policy with a lifetime of 7 days will be created. -#setup.dsl.policy_file: - -# Disable the check for an existing lifecycle policy. The default is true. If -# you disable this check, set setup.dsl.overwrite: true so the lifecycle policy -# can be installed. -#setup.dsl.check_exists: true - -# Overwrite the lifecycle policy at startup. The default is false. -#setup.dsl.overwrite: false - -# =================================== Kibana =================================== - -# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. -# This requires a Kibana endpoint configuration. -setup.kibana: - - # Kibana Host - # Scheme and port can be left out and will be set to the default (http and 5601) - # In case you specify and additional path, the scheme is required: http://localhost:5601/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 - #host: "localhost:5601" - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - - # Optional HTTP path - #path: "" - - # Optional Kibana space ID. - #space.id: "" - - # Custom HTTP headers to add to each request - #headers: - # X-My-Header: Contents of the header - - # Use SSL settings for HTTPS. - #ssl.enabled: true - - # Controls the verification of certificates. Valid values are: - # * full, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. - # * strict, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. If the Subject Alternative - # Name is empty, it returns an error. - # * certificate, which verifies that the provided certificate is signed by a - # trusted authority (CA), but does not perform any hostname verification. - # * none, which performs no verification of the server's certificate. This - # mode disables many of the security benefits of SSL/TLS and should only be used - # after very careful consideration. It is primarily intended as a temporary - # diagnostic mechanism when attempting to resolve TLS errors; its use in - # production environments is strongly discouraged. - # The default value is full. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions from 1.1 - # up to 1.3 are enabled. - #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client certificate key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the certificate key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE-based cipher suites - #ssl.curve_types: [] - - # Configure what types of renegotiation are supported. Valid options are - # never, once, and freely. Default is never. - #ssl.renegotiation: never - - # Configure a pin that can be used to do extra validation of the verified certificate chain, - # this allow you to ensure that a specific certificate is used to validate the chain of trust. - # - # The pin is a base64 encoded string of the SHA-256 fingerprint. - #ssl.ca_sha256: "" - - # A root CA HEX encoded fingerprint. During the SSL handshake if the - # fingerprint matches the root CA certificate, it will be added to - # the provided list of root CAs (`certificate_authorities`), if the - # list is empty or not defined, the matching certificate will be the - # only one in the list. Then the normal SSL validation happens. - #ssl.ca_trusted_fingerprint: "" - - -# ================================== Logging =================================== - -# There are four options for the log output: file, stderr, syslog, eventlog -# The file output is the default. - -# Sets log level. The default log level is info. -# Available log levels are: error, warning, info, debug -#logging.level: info - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are "beat", "publisher", "service" -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to stderr. The default is false. -#logging.to_stderr: false - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: false - -# Send all logging output to Windows Event Logs. The default is false. -#logging.to_eventlog: false - -# If enabled, Filebeat periodically logs its internal metrics that have changed -# in the last period. For each metric that changed, the delta from the value at -# the beginning of the period is logged. Also, the total values for -# all non-zero internal metrics are logged on shutdown. The default is true. -#logging.metrics.enabled: true - -# The period after which to log the internal metrics. The default is 30s. -#logging.metrics.period: 30s - -# A list of metrics namespaces to report in the logs. Defaults to [stats]. -# `stats` contains general Beat metrics. `dataset` may be present in some -# Beats and contains module or input metrics. -#logging.metrics.namespaces: [stats] - -# Logging to rotating files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/filebeat - - # The name of the files where the logs are written to. - #name: filebeat - - # Configure log file size limit. If the limit is reached, log file will be - # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 - - # The permissions mask to apply when rotating log files. The default value is 0600. - # Must be a valid Unix-style file permissions mask expressed in octal notation. - #permissions: 0600 - - # Enable log file rotation on time intervals in addition to the size-based rotation. - # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h - # are boundary-aligned with minutes, hours, days, weeks, months, and years as - # reported by the local system clock. All other intervals are calculated from the - # Unix epoch. Defaults to disabled. - #interval: 0 - - # Rotate existing logs on startup rather than appending them to the existing - # file. Defaults to true. - # rotateonstartup: true - -#=============================== Events Logging =============================== -# Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events (that may contain -# sensitive information) together with other log messages, a different -# log file, only for log entries containing raw events, is used. It will -# use the same level, selectors and all other configurations from the -# default logger, but it will have it's own file configuration. -# -# Having a different log file for raw events also prevents event data -# from drowning out the regular log files. -# -# IMPORTANT: No matter the default logger output configuration, raw events -# will **always** be logged to a file configured by `logging.event_data.files`. - -# logging.event_data: -# Logging to rotating files. Set logging.to_files to false to disable logging to -# files. -#logging.event_data.to_files: true -#logging.event_data: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/filebeat - - # The name of the files where the logs are written to. - #name: filebeat-event-data - - # Configure log file size limit. If the limit is reached, log file will be - # automatically rotated. - #rotateeverybytes: 5242880 # = 5MB - - # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 2 - - # The permissions mask to apply when rotating log files. The default value is 0600. - # Must be a valid Unix-style file permissions mask expressed in octal notation. - #permissions: 0600 - - # Enable log file rotation on time intervals in addition to the size-based rotation. - # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h - # are boundary-aligned with minutes, hours, days, weeks, months, and years as - # reported by the local system clock. All other intervals are calculated from the - # Unix epoch. Defaults to disabled. - #interval: 0 - - # Rotate existing logs on startup rather than appending them to the existing - # file. Defaults to false. - # rotateonstartup: false - -# ============================= X-Pack Monitoring ============================== -# Filebeat can export internal metrics to a central Elasticsearch monitoring -# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The -# reporting is disabled by default. - -# Set to true to enable the monitoring reporter. -#monitoring.enabled: false - -# Sets the UUID of the Elasticsearch cluster under which monitoring data for this -# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch -# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. -#monitoring.cluster_uuid: - -# Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. -# Note that the settings should point to your Elasticsearch *monitoring* cluster. -# Any setting that is not set is automatically inherited from the Elasticsearch -# output configuration, so if you have the Elasticsearch output configured such -# that it is pointing to your Elasticsearch monitoring cluster, you can simply -# uncomment the following line. -#monitoring.elasticsearch: - - # Array of hosts to connect to. - # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify an additional path, the scheme is required: http://localhost:9200/path - # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 - #hosts: ["localhost:9200"] - - # Set gzip compression level. - #compression_level: 0 - - # Protocol - either `http` (default) or `https`. - #protocol: "https" - - # Authentication credentials - either API key or username/password. - #api_key: "id:api_key" - #username: "beats_system" - #password: "changeme" - - # Dictionary of HTTP parameters to pass within the URL with index operations. - #parameters: - #param1: value1 - #param2: value2 - - # Custom HTTP headers to add to each request - #headers: - # X-My-Header: Contents of the header - - # Proxy server url - #proxy_url: http://proxy:3128 - - # The number of times a particular Elasticsearch index operation is attempted. If - # the indexing operation doesn't succeed after this many retries, the events are - # dropped. The default is 3. - #max_retries: 3 - - # The maximum number of events to bulk in a single Elasticsearch bulk API index request. - # The default is 50. - #bulk_max_size: 50 - - # The number of seconds to wait before trying to reconnect to Elasticsearch - # after a network error. After waiting backoff.init seconds, the Beat - # tries to reconnect. If the attempt fails, the backoff timer is increased - # exponentially up to backoff.max. After a successful connection, the backoff - # timer is reset. The default is 1s. - #backoff.init: 1s - - # The maximum number of seconds to wait before attempting to connect to - # Elasticsearch after a network error. The default is 60s. - #backoff.max: 60s - - # Configure HTTP request timeout before failing a request to Elasticsearch. - #timeout: 90 - - # Use SSL settings for HTTPS. - #ssl.enabled: true - - # Controls the verification of certificates. Valid values are: - # * full, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. - # * strict, which verifies that the provided certificate is signed by a trusted - # authority (CA) and also verifies that the server's hostname (or IP address) - # matches the names identified within the certificate. If the Subject Alternative - # Name is empty, it returns an error. - # * certificate, which verifies that the provided certificate is signed by a - # trusted authority (CA), but does not perform any hostname verification. - # * none, which performs no verification of the server's certificate. This - # mode disables many of the security benefits of SSL/TLS and should only be used - # after very careful consideration. It is primarily intended as a temporary - # diagnostic mechanism when attempting to resolve TLS errors; its use in - # production environments is strongly discouraged. - # The default value is full. - #ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions from 1.1 - # up to 1.3 are enabled. - #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] - - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client certificate key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the certificate key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE-based cipher suites - #ssl.curve_types: [] - - # Configure what types of renegotiation are supported. Valid options are - # never, once, and freely. Default is never. - #ssl.renegotiation: never - - # Configure a pin that can be used to do extra validation of the verified certificate chain, - # this allow you to ensure that a specific certificate is used to validate the chain of trust. - # - # The pin is a base64 encoded string of the SHA-256 fingerprint. - #ssl.ca_sha256: "" - - # A root CA HEX encoded fingerprint. During the SSL handshake if the - # fingerprint matches the root CA certificate, it will be added to - # the provided list of root CAs (`certificate_authorities`), if the - # list is empty or not defined, the matching certificate will be the - # only one in the list. Then the normal SSL validation happens. - #ssl.ca_trusted_fingerprint: "" - - # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. - #kerberos.enabled: true - - # Authentication type to use with Kerberos. Available options: keytab, password. - #kerberos.auth_type: password - - # Path to the keytab file. It is used when auth_type is set to keytab. - #kerberos.keytab: /etc/elastic.keytab - - # Path to the Kerberos configuration. - #kerberos.config_path: /etc/krb5.conf - - # Name of the Kerberos user. - #kerberos.username: elastic - - # Password of the Kerberos user. It is used when auth_type is set to password. - #kerberos.password: changeme - - # Kerberos realm. - #kerberos.realm: ELASTIC - - #metrics.period: 10s - #state.period: 1m - -# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts` -# setting. You can find the value for this setting in the Elastic Cloud web UI. -#monitoring.cloud.id: - -# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username` -# and `monitoring.elasticsearch.password` settings. The format is `:`. -#monitoring.cloud.auth: - -# =============================== HTTP Endpoint ================================ - -# Each beat can expose internal metrics through an HTTP endpoint. For security -# reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output -# append ?pretty to the URL. - -# Defines if the HTTP endpoint is enabled. -#http.enabled: false - -# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. -# When using IP addresses, it is recommended to only use localhost. -#http.host: localhost - -# Port on which the HTTP endpoint will bind. Default is 5066. -#http.port: 5066 - -# Define which user should be owning the named pipe. -#http.named_pipe.user: - -# Define which permissions should be applied to the named pipe, use the Security -# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with -# `http.user`. -#http.named_pipe.security_descriptor: - -# Defines if the HTTP pprof endpoints are enabled. -# It is recommended that this is only enabled on localhost as these endpoints may leak data. -#http.pprof.enabled: false - -# Controls the fraction of goroutine blocking events that are reported in the -# blocking profile. -#http.pprof.block_profile_rate: 0 - -# Controls the fraction of memory allocations that are recorded and reported in -# the memory profile. -#http.pprof.mem_profile_rate: 524288 - -# Controls the fraction of mutex contention events that are reported in the -# mutex profile. -#http.pprof.mutex_profile_rate: 0 - -# ============================== Process Security ============================== - -# Enable or disable seccomp system call filtering on Linux. Default is enabled. -#seccomp.enabled: true - -# ============================== Instrumentation =============================== - -# Instrumentation support for the filebeat. -#instrumentation: - # Set to true to enable instrumentation of filebeat. - #enabled: false - - # Environment in which filebeat is running on (eg: staging, production, etc.) - #environment: "" - - # APM Server hosts to report instrumentation results to. - #hosts: - # - http://localhost:8200 - - # API Key for the APM Server(s). - # If api_key is set then secret_token will be ignored. - #api_key: - - # Secret token for the APM Server(s). - #secret_token: - - # Enable profiling of the server, recording profile samples as events. - # - # This feature is experimental. - #profiling: - #cpu: - # Set to true to enable CPU profiling. - #enabled: false - #interval: 60s - #duration: 10s - #heap: - # Set to true to enable heap profiling. - #enabled: false - #interval: 60s - -# ================================= Migration ================================== - -# This allows to enable 6.7 migration aliases -#migration.6_to_7.enabled: false - -# =============================== Feature Flags ================================ - -# Enable and configure feature flags. -#features: -# fqdn: -# enabled: true + +#=========================== Filebeat inputs ============================= + +# List of inputs to fetch data. +filebeat.inputs: +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one input +# +# Possible options are: +# * filestream: Reads every line of the log file +# * log: Reads every line of the log file (deprecated) +# * stdin: Reads the standard in + +#------------------------------ Log input -------------------------------- +- type: log + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure no file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Method to determine if two files are the same or not. By default + # the Beat considers two files the same if their inode and device id are the same. + #file_identity.native: ~ + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top-level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # By default, all events contain `host.name`. This option can be set to true + # to disable the addition of this field to all events. The default value is + # false. + #publisher_pipeline.disable_host: false + + # Ignore files that were modified more than the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, + # null_terminator + #line_terminator: auto + + ### Recursive glob configuration + + # Expand "**" patterns into regular glob patterns. + #recursive_glob.enabled: true + + ### JSON configuration + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrites the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #json.expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under the pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined into one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # To aggregate constant number of lines into a single event use the count mode of multiline. + #multiline.type: count + + # The number of lines to aggregate into a single event. + #multiline.count_lines: 3 + + # Do not add new line characters when concatenating lines. + #multiline.skip_newline: false + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # The ingest pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as the source. + #symlinks: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it has to wait + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real-time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # Max number of harvesters that are started in parallel. + # Default is 0 which means unlimited + #harvester_limit: 0 + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), and 5m (5 minutes) can be used. + #close_inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close_removed: true + + # Closes the file handler as soon as the harvesters reach the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_eof: false + + ### State options + + # Files for the modification data are older than clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for files which cannot be found on disk anymore immediately + #clean_removed: true + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_timeout: 0 + + # Defines if inputs are enabled + #enabled: true + +#--------------------------- Filestream input ---------------------------- +- type: filestream + + # Unique ID among all inputs, an ID is required. + id: my-filestream-id + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + # Line filtering happens after the parsers pipeline. If you would like to filter lines + # before parsers, use include_message parser. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + # Line filtering happens after the parsers pipeline. If you would like to filter lines + # before parsers, use include_message parser. + #include_lines: ['^ERR', '^WARN'] + + ### Prospector options + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #prospector.scanner.check_interval: 10s + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Include files. A list of regular expressions to match. Filebeat keeps only the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.include_files: ['/var/log/.*'] + + # Expand "**" patterns into regular glob patterns. + #prospector.scanner.recursive_glob: true + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as the source. + #prospector.scanner.symlinks: false + + # If enabled, instead of relying on the device ID and inode values when comparing files, + # compare hashes of the given byte ranges in files. A file becomes an ingest target + # when its size grows larger than offset+length (see below). Until then it's ignored. + #prospector.scanner.fingerprint.enabled: false + + # If fingerprint mode is enabled, sets the offset from the beginning of the file + # for the byte range used for computing the fingerprint value. + #prospector.scanner.fingerprint.offset: 0 + + # If fingerprint mode is enabled, sets the length of the byte range used for + # computing the fingerprint value. Cannot be less than 64 bytes. + #prospector.scanner.fingerprint.length: 1024 + + ### Parsers configuration + + #### JSON configuration + + #parsers: + #- ndjson: + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be a string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied to the top level of the output document. + #keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #add_error_key: false + + #### Filtering messages + + # You can filter messsages in the parsers pipeline. Use this method if you would like to + # include or exclude lines before they are aggregated into multiline or the JSON contents + # are parsed. + + #parsers: + #- include_message.patterns: + #- ["WARN", "ERR"] + + #### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + #parsers: + #- multiline: + #type: pattern + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #pattern: ^\[ + + # Defines if the pattern set under the pattern setting should be negated or not. Default is false. + #negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to next in Logstash + #match: after + + # The maximum number of lines that are combined into one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + # To aggregate constant number of lines into a single event use the count mode of multiline. + + #parsers: + #- multiline: + #type: count + + # The number of lines to aggregate into a single event. + #count_lines: 3 + + # The maximum number of lines that are combined into one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line characters when concatenating lines. + #skip_newline: false + + #### Parsing container events + + # You can parse container events with different formats from all streams. + + #parsers: + #- container: + # Source of container events. Available options: all, stdin, stderr. + #stream: all + + # Format of the container events. Available options: auto, cri, docker, json-file + #format: auto + + ### Log rotation + + # When an external tool rotates the input files with copytruncate strategy + # use this section to help the input find the rotated files. + #rotation.external.strategy.copytruncate: + # Regex that matches the rotated files. + # suffix_regex: \.\d$ + # If the rotated filename suffix is a datetime, set it here. + # dateformat: -20060102 + + ### State options + + # Files for the modification data is older than clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: -1 + + # Removes the state for files which cannot be found on disk anymore immediately + #clean_removed: true + + # Method to determine if two files are the same or not. By default + # the Beat considers two files the same if their inode and device id are the same. + #file_identity.native: ~ + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to publish fields with null values in events. + #keep_null: false + + # By default, all events contain `host.name`. This option can be set to true + # to disable the addition of this field to all events. The default value is + # false. + #publisher_pipeline.disable_host: false + + # Ignore files that were modified more than the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. + #ignore_older: 0 + + # Ignore files that have not been updated since the selected event. + # ignore_inactive is disabled by default, so no files are ignored by setting it to "". + # Available options: since_first_start, since_last_start. + #ignore_inactive: "" + + # If `take_over` is set to `true`, this `filestream` will take over all files + # from `log` inputs if they match at least one of the `paths` set in the `filestream`. + # This functionality is still in beta. + #take_over: false + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #message_max_bytes: 10485760 + + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, + # null_terminator + #line_terminator: auto + + # The ingest pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it has to wait + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real-time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff.init: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #backoff.max: 10s + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. + #close.on_state_change.inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.on_state_change.renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close.on_state_change.removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.on_eof: false + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.after_interval: 0 + +#----------------------------- Stdin input ------------------------------- +# Configuration to use stdin input +#- type: stdin + +#------------------------- Redis slowlog input --------------------------- +# Experimental: Config options for the redis slow log input +#- type: redis + #enabled: false + + # List of hosts to pool to retrieve the slow log information. + #hosts: ["localhost:6379"] + + # How often the input checks for redis slow log. + #scan_frequency: 10s + + # Timeout after which time the input should return an error + #timeout: 1s + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Redis AUTH password. Empty by default. + #password: foobared + +#------------------------------ Udp input -------------------------------- +# Experimental: Config options for the udp input +#- type: udp + #enabled: false + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + + # Size of the UDP read buffer in bytes + #read_buffer: 0 + + +#------------------------------ TCP input -------------------------------- +# Experimental: Config options for the TCP input +#- type: tcp + #enabled: false + + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # Max number of concurrent connections, or 0 for no limit. Default: 0 + #max_connections: 0 + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + + # Use SSL settings for TCP. + #ssl.enabled: true + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for client verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL server authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Server Certificate Key, + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of client authentication are supported. Valid options + # are `none`, `optional`, and `required`. When `certificate_authorities` is set it will + # default to `required` otherwise it will be set to `none`. + #ssl.client_authentication: "required" + + +#------------------------------ Kafka input -------------------------------- +# Accept events from topics in a Kafka cluster. +#- type: kafka + #enabled: false + + # A list of hosts/ports for the initial Kafka brokers. + #hosts: + #- kafka-broker-1:9092 + #- kafka-broker-2:9092 + + # A list of topics to read from. + #topics: ["my-topic", "important-logs"] + + # The Kafka consumer group id to use when connecting. + #group_id: "filebeat" + + # An optional Kafka client id to attach to Kafka requests. + #client_id: "my-client" + + # The version of the Kafka protocol to use. + #version: 1.0 + + # Set to "newest" to start reading from the most recent message when connecting to a + # new topic, otherwise the input will begin reading at the oldest remaining event. + #initial_offset: oldest + + # How long to wait before trying to reconnect to the kafka cluster after a fatal error. + #connect_backoff: 30s + + # How long to wait before retrying a failed read. + #consume_backoff: 2s + + # How long to wait for the minimum number of input bytes while reading. + #max_wait_time: 250ms + + # The Kafka isolation level, "read_uncommitted" or "read_committed". + #isolation_level: read_uncommitted + + # Some Kafka deployments such as Microsoft Azure can return multiple events packed into a + # single data field. Set this field to specify where events should be unpacked from. + #expand_event_list_from_field: "records" + + # The minimum number of bytes to wait for. + #fetch.min: 1 + + # The default number of bytes to read per request. + #fetch.default: 1MB + + # The maximum number of bytes to read per request (0 for no limit). + #fetch.max: 0 + + # Consumer rebalance strategy, "range" or "roundrobin" + #rebalance.strategy: "range" + + # How long to wait for an attempted rebalance. + #rebalance.timeout: 60s + + # How many times to retry if rebalancing fails. + #rebalance.max_retries: 4 + + # How long to wait after an unsuccessful rebalance attempt. + #rebalance.retry_backoff: 2s + + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' + + # Parsers can be used with the Kafka input. The available parsers are "ndjson" and + # "multiline". See the filestream input configuration for more details. + #parsers: + #- ndjson: + # ... + #- multiline: + # ... + + +#------------------------------ Syslog input -------------------------------- +# Accept RFC3164 formatted syslog event via UDP. +#- type: syslog + #enabled: false + #format: rfc3164 + #protocol.udp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + +# Accept RFC5424 formatted syslog event via TCP. +#- type: syslog + #enabled: false + #format: rfc5424 + + #protocol.tcp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + + # Use SSL settings for TCP. + #ssl.enabled: true + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for client verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL server authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Server Certificate Key, + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of client authentication are supported. Valid options + # are `none`, `optional`, and `required`. When `certificate_authorities` is set it will + # default to `required` otherwise it will be set to `none`. + #ssl.client_authentication: "required" + +#------------------------------ Container input -------------------------------- +#- type: container + #enabled: false + + # Paths for container logs that should be crawled and fetched. + #paths: + # -/var/lib/docker/containers/*/*.log + + # Configure stream to filter to a specific stream: stdout, stderr or all (default) + #stream: all + +#------------------------------ Journald input -------------------------------- +# Journald input is experimental. +#- type: journald + #enabled: true + #id: service-foo + + # You may wish to have separate inputs for each service. You can use + # include_matches.or to specify a list of filter expressions that are + # applied as a logical OR. You may specify filter + #include_matches.match: + #- _SYSTEMD_UNIT=foo.service + + # List of syslog identifiers + #syslog_identifiers: ["audit"] + + # Collect events from the service and messages about the service, + # including coredumps. + #units: ["docker.service"] + + # The list of transports (_TRANSPORT field of journald entries) + #transports: ["audit"] + + # Parsers are also supported, here is an example of the multiline + # parser. + #parsers: + #- multiline: + #type: count + #count_lines: 3 + +#------------------------------ NetFlow input -------------------------------- +# Experimental: Config options for the Netflow/IPFIX collector over UDP input +#- type: netflow + #enabled: false + + # Address where the NetFlow Collector will bind + #host: ":2055" + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + + # List of enabled protocols. + # Valid values are 'v1', 'v5', 'v6', 'v7', 'v8', 'v9' and 'ipfix' + #protocols: [ v5, v9, ipfix ] + + # Expiration timeout + # This is the time before an idle session or unused template is expired. + # Only applicable to v9 and ipfix protocols. A value of zero disables expiration. + #expiration_timeout: 30m + + # Share Templates + # This option allows v9 and ipfix templates to be shared within a session without + # reference to the origin of the template. + # + # Setting this to true is not recommended as it can result in the wrong template + # being applied under certain conditions, but it may be required for some systems. + #share_templates: false + + # Queue size limits the number of netflow packets that are queued awaiting + # processing. + #queue_size: 8192 + + # Custom field definitions for NetFlow V9 / IPFIX. + # List of files with YAML fields definition. + #custom_definitions: + #- path/to/ipfix.yaml + #- path/to/netflow.yaml + +#---------------------------- Google Cloud Pub/Sub Input ----------------------- +# Input for reading messages from a Google Cloud Pub/Sub topic subscription. +- type: gcp-pubsub + enabled: false + + # Google Cloud project ID. Required. + project_id: my-gcp-project-id + + # Google Cloud Pub/Sub topic name. Required. + topic: my-gcp-pubsub-topic-name + + # Google Cloud Pub/Sub topic subscription name. Required. + subscription.name: my-gcp-pubsub-subscription-name + + # Create subscription if it does not exist. + #subscription.create: true + + # Number of goroutines to create to read from the subscription. + #subscription.num_goroutines: 1 + + # Maximum number of unprocessed messages to allow at any time. + # This must be at least queue.mem.flush.min_events to prevent input blockage. + #subscription.max_outstanding_messages: 1600 + + # Path to a JSON file containing the credentials and key used to subscribe. + credentials_file: ${path.config}/my-pubsub-subscriber-credentials.json + +#------------------------------ AWS S3 input -------------------------------- +# Beta: Config options for AWS S3 input +#- type: aws-s3 + #enabled: false + + # AWS Credentials + # If access_key_id and secret_access_key are configured, then use them to make api calls. + # If not, aws-s3 input will load default AWS config or load with given profile name. + #access_key_id: '${AWS_ACCESS_KEY_ID:""}' + #secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' + #session_token: '${AWS_SESSION_TOKEN:"”}' + #credential_profile_name: test-aws-s3-input + + # SQS queue URL to receive messages from (required). + #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-aws-s3-logs-queue" + + # Number of workers on S3 bucket or SQS queue + #number_of_workers: 5 + + # Maximum duration of an AWS API call (excluding S3 GetObject calls). + #api_timeout: 120s + + # Duration that received SQS messages are hidden from subsequent + # requests after being retrieved by a ReceiveMessage request. + #visibility_timeout: 300s + + # List of S3 object metadata keys to include in events. + #include_s3_metadata: [] + + # The max number of times an SQS message should be received (retried) before deleting it. + #sqs.max_receive_count: 5 + + # Maximum duration for which the SQS ReceiveMessage call waits for a message + # to arrive in the queue before returning. + #sqs.wait_time: 20s + + # Bucket ARN used for polling AWS S3 buckets + #bucket_arn: arn:aws:s3:::test-s3-bucket + + # Bucket Name used for polling non-AWS S3 buckets + #non_aws_bucket_name: test-s3-bucket + + # Configures the AWS S3 API to use path style instead of virtual host style (default) + #path_style: false + + # Overrides the `cloud.provider` field for non-AWS S3 buckets. See docs for auto recognized providers. + #provider: minio + + # Configures backing up processed files to another (or the same) bucket + #backup_to_bucket_arn: 'arn:aws:s3:::mybucket' + #non_aws_backup_to_bucket_name: 'mybucket' + + # Sets a prefix to prepend to object keys when backing up + #backup_to_bucket_prefix: 'backup/' + + # Controls deletion of objects after backing them up + #delete_after_backup: false + +#------------------------------ AWS CloudWatch input -------------------------------- +# Beta: Config options for AWS CloudWatch input +#- type: aws-cloudwatch + #enabled: false + + # AWS Credentials + # If access_key_id and secret_access_key are configured, then use them to make api calls. + # If not, aws-cloudwatch input will load default AWS config or load with given profile name. + #access_key_id: '${AWS_ACCESS_KEY_ID:""}' + #secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' + #session_token: '${AWS_SESSION_TOKEN:"”}' + #credential_profile_name: test-aws-s3-input + + # ARN of the log group to collect logs from + # This ARN could refer to a log group from a linked source account + # Note: This property precedes over `log_group_name` & `log_group_name_prefix` + #log_group_arn: "arn:aws:logs:us-east-1:428152502467:log-group:test:*" + + # Name of the log group to collect logs from. + # Note: region_name is required when log_group_name is given. + #log_group_name: test + + # The prefix for a group of log group names. + # You can include linked source accounts by using the property `include_linked_accounts_for_prefix_mode`. + # Note: `region_name` is required when `log_group_name_prefix` is given. + # `log_group_name` and `log_group_name_prefix` cannot be given at the same time. + #log_group_name_prefix: /aws/ + + # State whether to include linked source accounts when obtaining log groups matching the prefix provided through `log_group_name_prefix` + # This property works together with `log_group_name_prefix` and default value (if unset) is false + #include_linked_accounts_for_prefix_mode: true + + # Region that the specified log group or log group prefix belongs to. + #region_name: us-east-1 + + # A list of strings of log streams names that Filebeat collect log events from. + #log_streams: + # - log_stream_name + + # A string to filter the results to include only log events from log streams + # that have names starting with this prefix. + #log_stream_prefix: test + + # `start_position` allows user to specify if this input should read log files + # from the `beginning` or from the `end`. + # `beginning`: reads from the beginning of the log group (default). + # `end`: read only new messages from current time minus `scan_frequency` going forward. + #start_position: beginning + + # This config parameter sets how often Filebeat checks for new log events from the + # specified log group. Default `scan_frequency` is 1 minute, which means Filebeat + # will sleep for 1 minute before querying for new logs again. + #scan_frequency: 1m + + # The maximum duration of AWS API can take. If it exceeds the timeout, AWS API + # will be interrupted. + # The default AWS API timeout for a message is 120 seconds. + # The minimum is 0 seconds. + #api_timeout: 120s + + # This is used to sleep between AWS `FilterLogEvents` API calls inside the same + # collection period. + #api_sleep: 200ms + + # This is used to shift collection start time and end time back in order to + # collect logs when there is a delay in CloudWatch. + #latency: 1m + +#------------------------------ ETW input -------------------------------- +# Beta: Config options for ETW (Event Trace for Windows) input (Only available for Windows) +#- type: etw + #enabled: false + #id: etw-dnsserver + + # Path to an .etl file to read from. + #file: "C:\Windows\System32\Winevt\Logs\Logfile.etl" + + # GUID of an ETW provider. + # Run 'logman query providers' to list the available providers. + #provider.guid: {EB79061A-A566-4698-9119-3ED2807060E7} + + # Name of an ETW provider. + # Run 'logman query providers' to list the available providers. + #provider.name: Microsoft-Windows-DNSServer + + # Tag to identify created sessions. + # If missing, its default value is the provider ID prefixed by 'Elastic-'. + #session_name: DNSServer-Analytical-Trace + + # Filter collected events with a level value that is less than or equal to this level. + # Allowed values are critical, error, warning, informational, and verbose. + #trace_level: verbose + + # 8-byte bitmask that enables the filtering of events from specific provider subcomponents. + # The provider will write a particular event if the event's keyword bits match any of the bits + # in this bitmask. + # Run 'logman query providers ""' to list available keywords. + #match_any_keyword: 0x8000000000000000 + + # 8-byte bitmask that enables the filtering of events from + # specific provider subcomponents. The provider will write a particular + # event if the event's keyword bits match all of the bits in this bitmask. + # Run 'logman query providers ""' to list available keywords. + #match_all_keyword: 0 + + # An existing session to read from. + # Run 'logman query -ets' to list existing sessions. + #session: UAL_Usermode_Provider + +# =========================== Filebeat autodiscover ============================ + +# Autodiscover allows you to detect changes in the system and spawn new modules +# or inputs as they happen. + +#filebeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: busybox +# config: +# - type: container +# paths: +# - /var/log/containers/*.log + +#Example: for kubernetes container logs autodiscovery +# filebeat.autodiscover: +# providers: +# - type: kubernetes +# node: ${NODE_NAME} +# hints.enabled: true +# # By default requests to kubeadm config map are made in order to enrich cluster name by requesting /api/v1/namespaces/kube-system/configmaps/kubeadm-config API endpoint. +# use_kubeadm: true +# hints.default_config: +# type: filestream +# id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} +# paths: +# - /var/log/containers/*-${data.kubernetes.container.id}.log +# parsers: +# - container: ~ +# prospector: +# scanner: +# fingerprint.enabled: true +# symlinks: true +# file_identity.fingerprint: ~ + +#By default requests to kubeadm config map are made in order to enrich cluster name by requesting /api/v1/namespaces/kube-system/configmaps/kubeadm-config API endpoint. +# use_kubeadm: true + +# ========================== Filebeat global options =========================== + +# Registry data path. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry.path: ${path.data}/registry + +# The permissions mask to apply on registry data and meta files. The default +# value is 0600. Must be a valid Unix-style file permissions mask expressed in +# octal notation. This option is not supported on Windows. +#filebeat.registry.file_permissions: 0600 + +# The timeout value that controls when registry entries are written to the disk +# (flushed). When an unwritten update exceeds this value, it triggers a write +# to disk. When flush is set to 0s, the registry is written to disk after each +# batch of events has been published successfully. The default value is 1s. +#filebeat.registry.flush: 1s + + +# Starting with Filebeat 7.0, the registry uses a new directory format to store +# Filebeat state. After you upgrade, Filebeat will automatically migrate a 6.x +# registry file to use the new directory format. If you changed +# filebeat.registry.path while upgrading, set filebeat.registry.migrate_file to +# point to the old registry file. +#filebeat.registry.migrate_file: ${path.data}/registry + +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Filebeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#filebeat.overwrite_pipelines: false + +# How long filebeat waits on shutdown for the publisher to finish. +# Default is 0, not waiting. +#filebeat.shutdown_timeout: 0 + +# Enable filebeat config reloading +#filebeat.config: + #inputs: + #enabled: false + #path: inputs.d/*.yml + #reload.enabled: true + #reload.period: 10s + #modules: + #enabled: true + #path: modules.d/*.yml + #reload.enabled: true + #reload.period: 10s + + +# ================================== General =================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this option is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a field +# sub-dictionary. Default is false. +#fields_under_root: false + +# Configure the precision of all timestamps in Filebeat. +# Available options: millisecond, microsecond, nanosecond +#timestamp.precision: millisecond + +# Internal queue configuration for buffering events to be published. +# Queue settings may be overridden by performance presets in the +# Elasticsearch output. To configure them manually use "preset: custom". +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to serve + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 3200 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 1600 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < `flush.min_events`. + #flush.timeout: 10s + + # The disk queue stores incoming events on disk until the output is + # ready for them. This allows a higher event limit than the memory-only + # queue and lets pending events persist through a restart. + #disk: + # The directory path to store the queue's data. + #path: "${path.data}/diskqueue" + + # The maximum space the queue should occupy on disk. Depending on + # input settings, events that exceed this limit are delayed or discarded. + #max_size: 10GB + + # The maximum size of a single queue data file. Data in the queue is + # stored in smaller segments that are deleted after all their events + # have been processed. + #segment_size: 1GB + + # The number of events to read from disk to memory while waiting for + # the output to request them. + #read_ahead: 512 + + # The number of events to accept from inputs while waiting for them + # to be written to disk. If event data arrives faster than it + # can be written to disk, this setting prevents it from overflowing + # main memory. + #write_ahead: 2048 + + # The duration to wait before retrying when the queue encounters a disk + # write error. + #retry_interval: 1s + + # The maximum length of time to wait before retrying on a disk write + # error. If the queue encounters repeated errors, it will double the + # length of its retry interval each time, up to this maximum. + #max_retry_interval: 30s + +# Sets the maximum number of CPUs that can be executed simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +# ================================= Processors ================================= + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +# - drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +# - rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +# - add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +# - add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.parent.pid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +# - add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +# - add_host_metadata: ~ +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of the message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of the message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true + +# =============================== Elastic Cloud ================================ + +# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Performance presets configure other output fields to recommended values + # based on a performance priority. + # Options are "balanced", "throughput", "scale", "latency" and "custom". + # Default if unspecified: "custom" + preset: balanced + + # Set gzip compression level. Set to 0 to disable compression. + # This field may conflict with performance presets. To set it + # manually use "preset: custom". + # The default is 1. + #compression_level: 1 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + # This field may conflict with performance presets. To set it + # manually use "preset: custom". + #worker: 1 + + # If set to true and multiple hosts are configured, the output plugin load + # balances published events onto all Elasticsearch hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # Optional data stream or index name. The default is "filebeat-%{[agent.version]}". + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "filebeat-%{[agent.version]}" + + # Optional ingest pipeline. By default, no pipeline will be used. + #pipeline: "" + + # Optional HTTP path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server URL + #proxy_url: http://proxy:3128 + + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # This field may conflict with performance presets. To set it + # manually use "preset: custom". + # The default is 1600. + #bulk_max_size: 1600 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum amount of time an idle connection will remain idle + # before closing itself. Zero means use the default of 60s. The + # format is a Go language duration (example 60s is 60 seconds). + # This field may conflict with performance presets. To set it + # manually use "preset: custom". + # The default is 3s. + # idle_connection_timeout: 3s + + # Configure HTTP request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Prevents filebeat from connecting to older Elasticsearch versions when set to `false` + #allow_older_versions: true + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # A root CA HEX encoded fingerprint. During the SSL handshake if the + # fingerprint matches the root CA certificate, it will be added to + # the provided list of root CAs (`certificate_authorities`), if the + # list is empty or not defined, the matching certificate will be the + # only one in the list. Then the normal SSL validation happens. + #ssl.ca_trusted_fingerprint: "" + + + # Enables restarting filebeat if any file listed by `key`, + # `certificate`, or `certificate_authorities` is modified. + # This feature IS NOT supported on Windows. + #ssl.restart_on_cert_change.enabled: false + + # Period to scan for changes on CA certificate files + #ssl.restart_on_cert_change.period: 1m + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + + +# ------------------------------ Logstash Output ------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optionally load-balance events between Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to filebeat + # in all lowercase. + #index: 'filebeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # A root CA HEX encoded fingerprint. During the SSL handshake if the + # fingerprint matches the root CA certificate, it will be added to + # the provided list of root CAs (`certificate_authorities`), if the + # list is empty or not defined, the matching certificate will be the + # only one in the list. Then the normal SSL validation happens. + #ssl.ca_trusted_fingerprint: "" + + # Enables restarting filebeat if any file listed by `key`, + # `certificate`, or `certificate_authorities` is modified. + # This feature IS NOT supported on Windows. + #ssl.restart_on_cert_change.enabled: false + + # Period to scan for changes on CA certificate files + #ssl.restart_on_cert_change.period: 1m + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting + # and retry until all events are published. Set max_retries to a value less + # than 0 to retry until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Logstash request. The + # default is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Logstash server before + # timing out. The default is 30s. + #timeout: 30s + +# -------------------------------- Kafka Output -------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from which to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create a unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512. + # Defaults to PLAIN when `username` and `password` are configured. + #sasl.mechanism: '' + + # Kafka version Filebeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Metadata update configuration. Metadata contains leader information + # used to decide which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Wait time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # A root CA HEX encoded fingerprint. During the SSL handshake if the + # fingerprint matches the root CA certificate, it will be added to + # the provided list of root CAs (`certificate_authorities`), if the + # list is empty or not defined, the matching certificate will be the + # only one in the list. Then the normal SSL validation happens. + #ssl.ca_trusted_fingerprint: "" + + # Enables restarting filebeat if any file listed by `key`, + # `certificate`, or `certificate_authorities` is modified. + # This feature IS NOT supported on Windows. + #ssl.restart_on_cert_change.enabled: false + + # Period to scan for changes on CA certificate files + #ssl.restart_on_cert_change.period: 1m + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + + # Enables Kerberos FAST authentication. This may + # conflict with certain Active Directory configurations. + #kerberos.enable_krb5_fast: false + +# -------------------------------- Redis Output -------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # The list of Redis servers to connect to. If load-balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. + #hosts: ["localhost:6379"] + + # The name of the Redis list or channel the events are published to. The + # default is filebeat. + #key: filebeat + + # The password to authenticate to Redis with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # A root CA HEX encoded fingerprint. During the SSL handshake if the + # fingerprint matches the root CA certificate, it will be added to + # the provided list of root CAs (`certificate_authorities`), if the + # list is empty or not defined, the matching certificate will be the + # only one in the list. Then the normal SSL validation happens. + #ssl.ca_trusted_fingerprint: "" + + +# -------------------------------- File Output --------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/filebeat" + + # Name of the generated files. The default is `filebeat` and it generates + # files: `filebeat-{datetime}.ndjson`, `filebeat-{datetime}-1.ndjson`, etc. + #filename: filebeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every Filebeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + # Configure automatic file rotation on every startup. The default is true. + #rotate_on_startup: true + +# ------------------------------- Console Output ------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + +# =================================== Paths ==================================== + +# The home path for the Filebeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the Filebeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the Filebeat installation. This is the default base path +# for all the files in which Filebeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a Filebeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +# ================================== Keystore ================================== + +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +# ================================= Dashboards ================================= + +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboard archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: filebeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + +# ================================== Template ================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Template name. By default the template name is "filebeat-%{[agent.version]}" +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +#setup.template.name: "filebeat-%{[agent.version]}" + +# Template pattern. By default the template pattern is "filebeat-%{[agent.version]}" to apply to the default index settings. +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +#setup.template.pattern: "filebeat-%{[agent.version]}" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +#setup.template.append_fields: +#- name: field_name +# type: field_type + +# Enable JSON template loading. If this is enabled, the fields.yml is ignored. +#setup.template.json.enabled: false + +# Path to the JSON template file +#setup.template.json.path: "${path.config}/template.json" + +# Name under which the template is stored in Elasticsearch +#setup.template.json.name: "" + +# Set this option if the JSON template is a data stream. +#setup.template.json.data_stream: false + +# Overwrite existing template +# Do not enable this option for more than one instance of filebeat as it might +# overload your Elasticsearch with too many update requests. +#setup.template.overwrite: false + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +# ====================== Index Lifecycle Management (ILM) ====================== + +# Configure index lifecycle management (ILM) to manage the backing indices +# of your data streams. + +# Enable ILM support. Valid values are true, or false. +#setup.ilm.enabled: true + +# Set the lifecycle policy name. The default policy name is +# 'beatname'. +#setup.ilm.policy_name: "mypolicy" + +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. +#setup.ilm.policy_file: + +# Disable the check for an existing lifecycle policy. The default is true. +# If you set this option to false, lifecycle policy will not be installed, +# even if setup.ilm.overwrite is set to true. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# ======================== Data Stream Lifecycle (DSL) ========================= + +# Configure Data Stream Lifecycle to manage data streams while connected to Serverless elasticsearch. +# These settings are mutually exclusive with ILM settings which are not supported in Serverless projects. + +# Enable DSL support. Valid values are true, or false. +#setup.dsl.enabled: true + +# Set the lifecycle policy name or pattern. For DSL, this name must match the data stream that the lifecycle is for. +# The default data stream pattern is filebeat-%{[agent.version]}" +# The template string `%{[agent.version]}` will resolve to the current stack version. +# The other possible template value is `%{[beat.name]}`. +#setup.dsl.data_stream_pattern: "filebeat-%{[agent.version]}" + +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. +# If no custom policy is specified, a default policy with a lifetime of 7 days will be created. +#setup.dsl.policy_file: + +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.dsl.overwrite: true so the lifecycle policy +# can be installed. +#setup.dsl.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.dsl.overwrite: false + +# =================================== Kibana =================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP path + #path: "" + + # Optional Kibana space ID. + #space.id: "" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # A root CA HEX encoded fingerprint. During the SSL handshake if the + # fingerprint matches the root CA certificate, it will be added to + # the provided list of root CAs (`certificate_authorities`), if the + # list is empty or not defined, the matching certificate will be the + # only one in the list. Then the normal SSL validation happens. + #ssl.ca_trusted_fingerprint: "" + + +# ================================== Logging =================================== + +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publisher", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to stderr. The default is false. +#logging.to_stderr: false + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, Filebeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# A list of metrics namespaces to report in the logs. Defaults to [stats]. +# `stats` contains general Beat metrics. `dataset` may be present in some +# Beats and contains module or input metrics. +#logging.metrics.namespaces: [stats] + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + +# ============================= X-Pack Monitoring ============================== +# Filebeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#monitoring.enabled: false + +# Sets the UUID of the Elasticsearch cluster under which monitoring data for this +# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch +# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. +#monitoring.cluster_uuid: + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. +# Note that the settings should point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration, so if you have the Elasticsearch output configured such +# that it is pointing to your Elasticsearch monitoring cluster, you can simply +# uncomment the following line. +#monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify an additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * strict, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. If the Subject Alternative + # Name is empty, it returns an error. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # A root CA HEX encoded fingerprint. During the SSL handshake if the + # fingerprint matches the root CA certificate, it will be added to + # the provided list of root CAs (`certificate_authorities`), if the + # list is empty or not defined, the matching certificate will be the + # only one in the list. Then the normal SSL validation happens. + #ssl.ca_trusted_fingerprint: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + + #metrics.period: 10s + #state.period: 1m + +# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts` +# setting. You can find the value for this setting in the Elastic Cloud web UI. +#monitoring.cloud.id: + +# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username` +# and `monitoring.elasticsearch.password` settings. The format is `:`. +#monitoring.cloud.auth: + +# =============================== HTTP Endpoint ================================ + +# Each beat can expose internal metrics through an HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. +# When using IP addresses, it is recommended to only use localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which permissions should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# Defines if the HTTP pprof endpoints are enabled. +# It is recommended that this is only enabled on localhost as these endpoints may leak data. +#http.pprof.enabled: false + +# Controls the fraction of goroutine blocking events that are reported in the +# blocking profile. +#http.pprof.block_profile_rate: 0 + +# Controls the fraction of memory allocations that are recorded and reported in +# the memory profile. +#http.pprof.mem_profile_rate: 524288 + +# Controls the fraction of mutex contention events that are reported in the +# mutex profile. +#http.pprof.mutex_profile_rate: 0 + +# ============================== Process Security ============================== + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true + +# ============================== Instrumentation =============================== + +# Instrumentation support for the filebeat. +#instrumentation: + # Set to true to enable instrumentation of filebeat. + #enabled: false + + # Environment in which filebeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: false + +# =============================== Feature Flags ================================ + +# Enable and configure feature flags. +#features: +# fqdn: +# enabled: true +