diff --git a/.github/workflows/build_and_test.yaml b/.github/workflows/build_and_test.yaml index bafd90d8351..76a81fe9b45 100644 --- a/.github/workflows/build_and_test.yaml +++ b/.github/workflows/build_and_test.yaml @@ -114,7 +114,19 @@ jobs: strategy: fail-fast: false matrix: - version: [ v1.28.13, v1.29.8, v1.30.4, v1.31.0 ] + target: + - version: v1.28.13 + ipFamily: ipv4 + - version: v1.29.8 + ipFamily: ipv4 + - version: v1.30.4 + ipFamily: ipv4 + - version: v1.31.0 + ipFamily: ipv4 + - version: v1.31.0 + ipFamily: ipv6 # only run ipv6 test on latest version to save time + - version: v1.31.0 + ipFamily: dual # only run dual test on latest version to save time steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: ./tools/github-actions/setup-deps @@ -133,8 +145,9 @@ jobs: # E2E - name: Run E2E Tests env: - KIND_NODE_TAG: ${{ matrix.version }} + KIND_NODE_TAG: ${{ matrix.target.version }} IMAGE_PULL_POLICY: IfNotPresent + IP_FAMILY: ${{ matrix.target.ipFamily }} run: make e2e benchmark-test: diff --git a/charts/gateway-addons-helm/Chart.lock b/charts/gateway-addons-helm/Chart.lock index 228a952fdc1..4e15b355cb5 100644 --- a/charts/gateway-addons-helm/Chart.lock +++ b/charts/gateway-addons-helm/Chart.lock @@ -8,6 +8,9 @@ dependencies: - name: fluent-bit repository: https://fluent.github.io/helm-charts version: 0.30.4 +- name: alloy + repository: https://grafana.github.io/helm-charts + version: 0.9.2 - name: loki repository: https://grafana.github.io/helm-charts version: 4.8.0 @@ -17,5 +20,5 @@ dependencies: - name: opentelemetry-collector repository: https://open-telemetry.github.io/opentelemetry-helm-charts version: 0.108.0 -digest: sha256:ea6663bb1358123b96b69d2c5b0b8c20650a43dc39b24c482f0560201fd2cc3a -generated: "2024-10-19T12:59:47.251089661+02:00" +digest: sha256:bc634c59972bfd4a01e0f4310a4949095752e659a9b5cb1d9c0fbe9a86f37011 +generated: "2024-10-25T10:55:26.755739+08:00" diff --git a/charts/gateway-addons-helm/Chart.yaml b/charts/gateway-addons-helm/Chart.yaml index 2571ccec51e..3a2303ef8c9 100644 --- a/charts/gateway-addons-helm/Chart.yaml +++ b/charts/gateway-addons-helm/Chart.yaml @@ -37,6 +37,10 @@ dependencies: repository: https://fluent.github.io/helm-charts version: 0.30.4 condition: fluent-bit.enabled + - name: alloy + repository: https://grafana.github.io/helm-charts + version: 0.9.2 + condition: alloy.enabled - name: loki version: 4.8.0 repository: https://grafana.github.io/helm-charts diff --git a/charts/gateway-addons-helm/README.md b/charts/gateway-addons-helm/README.md index 11eab2edf84..d86eb5c126a 100644 --- a/charts/gateway-addons-helm/README.md +++ b/charts/gateway-addons-helm/README.md @@ -22,6 +22,7 @@ An Add-ons Helm chart for Envoy Gateway | Repository | Name | Version | |------------|------|---------| | https://fluent.github.io/helm-charts | fluent-bit | 0.30.4 | +| https://grafana.github.io/helm-charts | alloy | 0.9.2 | | https://grafana.github.io/helm-charts | grafana | 8.0.0 | | https://grafana.github.io/helm-charts | loki | 4.8.0 | | https://grafana.github.io/helm-charts | tempo | 1.3.1 | @@ -55,6 +56,9 @@ To uninstall the chart: | Key | Type | Default | Description | |-----|------|---------|-------------| +| alloy.alloy.configMap.content | string | `"// Write your Alloy config here:\nlogging {\n level = \"info\"\n format = \"logfmt\"\n}\nloki.write \"alloy\" {\n endpoint {\n url = \"http://loki.monitoring.svc:3100/loki/api/v1/push\"\n }\n}\n// discovery.kubernetes allows you to find scrape targets from Kubernetes resources.\n// It watches cluster state and ensures targets are continually synced with what is currently running in your cluster.\ndiscovery.kubernetes \"pod\" {\n role = \"pod\"\n}\n\n// discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules.\n// If no rules are defined, then the input targets are exported as-is.\ndiscovery.relabel \"pod_logs\" {\n targets = discovery.kubernetes.pod.targets\n\n // Label creation - \"namespace\" field from \"__meta_kubernetes_namespace\"\n rule {\n source_labels = [\"__meta_kubernetes_namespace\"]\n action = \"replace\"\n target_label = \"namespace\"\n }\n\n // Label creation - \"pod\" field from \"__meta_kubernetes_pod_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_name\"]\n action = \"replace\"\n target_label = \"pod\"\n }\n\n // Label creation - \"container\" field from \"__meta_kubernetes_pod_container_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"container\"\n }\n\n // Label creation - \"app\" field from \"__meta_kubernetes_pod_label_app_kubernetes_io_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_label_app_kubernetes_io_name\"]\n action = \"replace\"\n target_label = \"app\"\n }\n\n // Label creation - \"job\" field from \"__meta_kubernetes_namespace\" and \"__meta_kubernetes_pod_container_name\"\n // Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name\n rule {\n source_labels = [\"__meta_kubernetes_namespace\", \"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"job\"\n separator = \"/\"\n replacement = \"$1\"\n }\n\n // Label creation - \"container\" field from \"__meta_kubernetes_pod_uid\" and \"__meta_kubernetes_pod_container_name\"\n // Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log\n rule {\n source_labels = [\"__meta_kubernetes_pod_uid\", \"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"__path__\"\n separator = \"/\"\n replacement = \"/var/log/pods/*$1/*.log\"\n }\n\n // Label creation - \"container_runtime\" field from \"__meta_kubernetes_pod_container_id\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_container_id\"]\n action = \"replace\"\n target_label = \"container_runtime\"\n regex = \"^(\\\\S+):\\\\/\\\\/.+$\"\n replacement = \"$1\"\n }\n}\n\n// loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API.\nloki.source.kubernetes \"pod_logs\" {\n targets = discovery.relabel.pod_logs.output\n forward_to = [loki.process.pod_logs.receiver]\n}\n// loki.process receives log entries from other Loki components, applies one or more processing stages,\n// and forwards the results to the list of receivers in the component’s arguments.\nloki.process \"pod_logs\" {\n stage.static_labels {\n values = {\n cluster = \"envoy-gateway\",\n }\n }\n\n forward_to = [loki.write.alloy.receiver]\n}"` | | +| alloy.enabled | bool | `true` | | +| alloy.fullnameOverride | string | `"alloy"` | | | fluent-bit.config.filters | string | `"[FILTER]\n Name kubernetes\n Match kube.*\n Merge_Log On\n Keep_Log Off\n K8S-Logging.Parser On\n K8S-Logging.Exclude On\n\n[FILTER]\n Name grep\n Match kube.*\n Regex $kubernetes['container_name'] ^envoy$\n\n[FILTER]\n Name parser\n Match kube.*\n Key_Name log\n Parser envoy\n Reserve_Data True\n"` | | | fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n multiline.parser docker, cri\n Tag kube.*\n Mem_Buf_Limit 5MB\n Skip_Long_Lines On\n"` | | | fluent-bit.config.outputs | string | `"[OUTPUT]\n Name loki\n Match kube.*\n Host loki.monitoring.svc.cluster.local\n Port 3100\n Labels job=fluentbit, app=$kubernetes['labels']['app'], k8s_namespace_name=$kubernetes['namespace_name'], k8s_pod_name=$kubernetes['pod_name'], k8s_container_name=$kubernetes['container_name']\n"` | | diff --git a/charts/gateway-addons-helm/values.yaml b/charts/gateway-addons-helm/values.yaml index 8ee3cbfaea9..5921559ef16 100644 --- a/charts/gateway-addons-helm/values.yaml +++ b/charts/gateway-addons-helm/values.yaml @@ -60,6 +60,7 @@ prometheus: # Values for Fluent-bit dependency +# TODO: remove fluent-bit dependency fluent-bit: enabled: true image: @@ -167,6 +168,109 @@ loki: gateway: enabled: false +# Values for Alloy dependency +alloy: + enabled: true + fullnameOverride: alloy + alloy: + configMap: + content: |- + // Write your Alloy config here: + logging { + level = "info" + format = "logfmt" + } + loki.write "alloy" { + endpoint { + url = "http://loki.monitoring.svc:3100/loki/api/v1/push" + } + } + // discovery.kubernetes allows you to find scrape targets from Kubernetes resources. + // It watches cluster state and ensures targets are continually synced with what is currently running in your cluster. + discovery.kubernetes "pod" { + role = "pod" + } + + // discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules. + // If no rules are defined, then the input targets are exported as-is. + discovery.relabel "pod_logs" { + targets = discovery.kubernetes.pod.targets + + // Label creation - "namespace" field from "__meta_kubernetes_namespace" + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + + // Label creation - "pod" field from "__meta_kubernetes_pod_name" + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + + // Label creation - "container" field from "__meta_kubernetes_pod_container_name" + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + + // Label creation - "app" field from "__meta_kubernetes_pod_label_app_kubernetes_io_name" + rule { + source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"] + action = "replace" + target_label = "app" + } + + // Label creation - "job" field from "__meta_kubernetes_namespace" and "__meta_kubernetes_pod_container_name" + // Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "job" + separator = "/" + replacement = "$1" + } + + // Label creation - "container" field from "__meta_kubernetes_pod_uid" and "__meta_kubernetes_pod_container_name" + // Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "__path__" + separator = "/" + replacement = "/var/log/pods/*$1/*.log" + } + + // Label creation - "container_runtime" field from "__meta_kubernetes_pod_container_id" + rule { + source_labels = ["__meta_kubernetes_pod_container_id"] + action = "replace" + target_label = "container_runtime" + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + } + } + + // loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API. + loki.source.kubernetes "pod_logs" { + targets = discovery.relabel.pod_logs.output + forward_to = [loki.process.pod_logs.receiver] + } + // loki.process receives log entries from other Loki components, applies one or more processing stages, + // and forwards the results to the list of receivers in the component’s arguments. + loki.process "pod_logs" { + stage.static_labels { + values = { + cluster = "envoy-gateway", + } + } + + forward_to = [loki.write.alloy.receiver] + } + # Values for Tempo dependency tempo: diff --git a/charts/gateway-helm/templates/envoy-gateway-deployment.yaml b/charts/gateway-helm/templates/envoy-gateway-deployment.yaml index 7746dd2e4ac..638497a07c5 100644 --- a/charts/gateway-helm/templates/envoy-gateway-deployment.yaml +++ b/charts/gateway-helm/templates/envoy-gateway-deployment.yaml @@ -46,6 +46,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/examples/redis/redis.yaml b/examples/redis/redis.yaml index cee4a37c559..98d1c20902b 100644 --- a/examples/redis/redis.yaml +++ b/examples/redis/redis.yaml @@ -60,6 +60,10 @@ data: kind: EnvoyGateway provider: type: Kubernetes + kubernetes: + rateLimitDeployment: + container: + image: ghcr.io/zirain-dev/ratelimit:latest # remove this line when upstream PR merged gateway: controllerName: gateway.envoyproxy.io/gatewayclass-controller extensionApis: diff --git a/internal/cmd/egctl/testdata/translate/out/default-resources.all.yaml b/internal/cmd/egctl/testdata/translate/out/default-resources.all.yaml index b965d6d9818..c33708e47a4 100644 --- a/internal/cmd/egctl/testdata/translate/out/default-resources.all.yaml +++ b/internal/cmd/egctl/testdata/translate/out/default-resources.all.yaml @@ -43,7 +43,7 @@ envoyProxyForGatewayClass: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/cmd/envoy/shutdown_manager.go b/internal/cmd/envoy/shutdown_manager.go index 48f624bb67a..8807beae74b 100644 --- a/internal/cmd/envoy/shutdown_manager.go +++ b/internal/cmd/envoy/shutdown_manager.go @@ -170,8 +170,9 @@ func Shutdown(drainTimeout time.Duration, minDrainDuration time.Duration, exitAt // postEnvoyAdminAPI sends a POST request to the Envoy admin API func postEnvoyAdminAPI(path string) error { + // TODO: change bootstrap.AdminAddress() to localhost because there're in the same pod? if resp, err := http.Post(fmt.Sprintf("http://%s:%d/%s", - bootstrap.EnvoyAdminAddress, bootstrap.EnvoyAdminPort, path), "application/json", nil); err != nil { + bootstrap.AdminAddress(), bootstrap.EnvoyAdminPort, path), "application/json", nil); err != nil { return err } else { defer resp.Body.Close() @@ -187,7 +188,7 @@ func postEnvoyAdminAPI(path string) error { func getTotalConnections() (*int, error) { // Send request to Envoy admin API to retrieve server.total_connections stat if resp, err := http.Get(fmt.Sprintf("http://%s:%d//stats?filter=^server\\.total_connections$&format=json", - bootstrap.EnvoyAdminAddress, bootstrap.EnvoyAdminPort)); err != nil { + bootstrap.AdminAddress(), bootstrap.EnvoyAdminPort)); err != nil { return nil, err } else { defer resp.Body.Close() diff --git a/internal/gatewayapi/listener.go b/internal/gatewayapi/listener.go index 30e75ad6197..0ab2a722048 100644 --- a/internal/gatewayapi/listener.go +++ b/internal/gatewayapi/listener.go @@ -22,6 +22,7 @@ import ( "github.com/envoyproxy/gateway/internal/ir" "github.com/envoyproxy/gateway/internal/utils" "github.com/envoyproxy/gateway/internal/utils/naming" + "github.com/envoyproxy/gateway/internal/utils/net" ) var _ ListenersTranslator = (*Translator)(nil) @@ -99,6 +100,12 @@ func (t *Translator) ProcessListeners(gateways []*GatewayContext, xdsIR resource if !isReady { continue } + + // TODO: find a better way to this + address := "0.0.0.0" + if net.IsIPv6Pod() { + address = "::" + } // Add the listener to the Xds IR servicePort := &protocolPort{protocol: listener.Protocol, port: int32(listener.Port)} containerPort := servicePortToContainerPort(int32(listener.Port), gateway.envoyProxy) @@ -107,7 +114,7 @@ func (t *Translator) ProcessListeners(gateways []*GatewayContext, xdsIR resource irListener := &ir.HTTPListener{ CoreListenerDetails: ir.CoreListenerDetails{ Name: irListenerName(listener), - Address: "0.0.0.0", + Address: address, Port: uint32(containerPort), Metadata: buildListenerMetadata(listener, gateway), IPFamily: getIPFamily(gateway.envoyProxy), @@ -134,7 +141,7 @@ func (t *Translator) ProcessListeners(gateways []*GatewayContext, xdsIR resource irListener := &ir.TCPListener{ CoreListenerDetails: ir.CoreListenerDetails{ Name: irListenerName(listener), - Address: "0.0.0.0", + Address: address, Port: uint32(containerPort), IPFamily: getIPFamily(gateway.envoyProxy), }, @@ -150,7 +157,7 @@ func (t *Translator) ProcessListeners(gateways []*GatewayContext, xdsIR resource irListener := &ir.UDPListener{ CoreListenerDetails: ir.CoreListenerDetails{ Name: irListenerName(listener), - Address: "0.0.0.0", + Address: address, Port: uint32(containerPort), }, } diff --git a/internal/infrastructure/kubernetes/infra.go b/internal/infrastructure/kubernetes/infra.go index 4285f395967..3d9c3e7b14f 100644 --- a/internal/infrastructure/kubernetes/infra.go +++ b/internal/infrastructure/kubernetes/infra.go @@ -50,9 +50,6 @@ type Infra struct { // Namespace is the Namespace used for managed infra. Namespace string - // DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". - DNSDomain string - // EnvoyGateway is the configuration used to startup Envoy Gateway. EnvoyGateway *egv1a1.EnvoyGateway @@ -64,7 +61,6 @@ type Infra struct { func NewInfra(cli client.Client, cfg *config.Server) *Infra { return &Infra{ Namespace: cfg.Namespace, - DNSDomain: cfg.DNSDomain, EnvoyGateway: cfg.EnvoyGateway, Client: New(cli), } diff --git a/internal/infrastructure/kubernetes/proxy/resource.go b/internal/infrastructure/kubernetes/proxy/resource.go index aa5a4d64e70..615ec05ba40 100644 --- a/internal/infrastructure/kubernetes/proxy/resource.go +++ b/internal/infrastructure/kubernetes/proxy/resource.go @@ -83,8 +83,6 @@ func expectedProxyContainers(infra *ir.ProxyInfra, containerSpec *egv1a1.KubernetesContainerSpec, shutdownConfig *egv1a1.ShutdownConfig, shutdownManager *egv1a1.ShutdownManager, - namespace string, - dnsDomain string, ) ([]corev1.Container, error) { // Define slice to hold container ports var ports []corev1.ContainerPort @@ -134,7 +132,6 @@ func expectedProxyContainers(infra *ir.ProxyInfra, TrustedCA: filepath.Join("/sds", common.SdsCAFilename), }, MaxHeapSizeBytes: maxHeapSizeBytes, - XdsServerHost: ptr.To(fmt.Sprintf("%s.%s.svc.%s", config.EnvoyGatewayServiceName, namespace, dnsDomain)), } args, err := common.BuildProxyArgs(infra, shutdownConfig, bootstrapConfigOptions, fmt.Sprintf("$(%s)", envoyPodEnvVar)) @@ -347,6 +344,14 @@ func expectedVolumes(name string, pod *egv1a1.KubernetesPodSpec) []corev1.Volume // expectedContainerEnv returns expected proxy container envs. func expectedContainerEnv(containerSpec *egv1a1.KubernetesContainerSpec) []corev1.EnvVar { env := []corev1.EnvVar{ + { + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, { Name: envoyNsEnvVar, ValueFrom: &corev1.EnvVarSource{ diff --git a/internal/infrastructure/kubernetes/proxy/resource_provider.go b/internal/infrastructure/kubernetes/proxy/resource_provider.go index 9830bafad71..b1d47e906d9 100644 --- a/internal/infrastructure/kubernetes/proxy/resource_provider.go +++ b/internal/infrastructure/kubernetes/proxy/resource_provider.go @@ -45,16 +45,12 @@ type ResourceRender struct { // Namespace is the Namespace used for managed infra. Namespace string - // DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". - DNSDomain string - ShutdownManager *egv1a1.ShutdownManager } -func NewResourceRender(ns string, dnsDomain string, infra *ir.ProxyInfra, gateway *egv1a1.EnvoyGateway) *ResourceRender { +func NewResourceRender(ns string, infra *ir.ProxyInfra, gateway *egv1a1.EnvoyGateway) *ResourceRender { return &ResourceRender{ Namespace: ns, - DNSDomain: dnsDomain, infra: infra, ShutdownManager: gateway.GetEnvoyGatewayProvider().GetEnvoyGatewayKubeProvider().ShutdownManager, } @@ -262,7 +258,7 @@ func (r *ResourceRender) Deployment() (*appsv1.Deployment, error) { proxyConfig := r.infra.GetProxyConfig() // Get expected bootstrap configurations rendered ProxyContainers - containers, err := expectedProxyContainers(r.infra, deploymentConfig.Container, proxyConfig.Spec.Shutdown, r.ShutdownManager, r.Namespace, r.DNSDomain) + containers, err := expectedProxyContainers(r.infra, deploymentConfig.Container, proxyConfig.Spec.Shutdown, r.ShutdownManager) if err != nil { return nil, err } @@ -364,7 +360,7 @@ func (r *ResourceRender) DaemonSet() (*appsv1.DaemonSet, error) { proxyConfig := r.infra.GetProxyConfig() // Get expected bootstrap configurations rendered ProxyContainers - containers, err := expectedProxyContainers(r.infra, daemonSetConfig.Container, proxyConfig.Spec.Shutdown, r.ShutdownManager, r.Namespace, r.DNSDomain) + containers, err := expectedProxyContainers(r.infra, daemonSetConfig.Container, proxyConfig.Spec.Shutdown, r.ShutdownManager) if err != nil { return nil, err } diff --git a/internal/infrastructure/kubernetes/proxy/resource_provider_test.go b/internal/infrastructure/kubernetes/proxy/resource_provider_test.go index 0cf54a40427..8c4138a3825 100644 --- a/internal/infrastructure/kubernetes/proxy/resource_provider_test.go +++ b/internal/infrastructure/kubernetes/proxy/resource_provider_test.go @@ -564,7 +564,7 @@ func TestDeployment(t *testing.T) { tc.infra.Proxy.Config.Spec.ExtraArgs = tc.extraArgs } - r := NewResourceRender(cfg.Namespace, cfg.DNSDomain, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) + r := NewResourceRender(cfg.Namespace, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) dp, err := r.Deployment() require.NoError(t, err) @@ -993,7 +993,7 @@ func TestDaemonSet(t *testing.T) { tc.infra.Proxy.Config.Spec.ExtraArgs = tc.extraArgs } - r := NewResourceRender(cfg.Namespace, cfg.DNSDomain, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) + r := NewResourceRender(cfg.Namespace, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) ds, err := r.DaemonSet() require.NoError(t, err) @@ -1143,7 +1143,7 @@ func TestService(t *testing.T) { provider.EnvoyService = tc.service } - r := NewResourceRender(cfg.Namespace, cfg.DNSDomain, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) + r := NewResourceRender(cfg.Namespace, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) svc, err := r.Service() require.NoError(t, err) @@ -1186,7 +1186,7 @@ func TestConfigMap(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - r := NewResourceRender(cfg.Namespace, cfg.DNSDomain, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) + r := NewResourceRender(cfg.Namespace, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) cm, err := r.ConfigMap() require.NoError(t, err) @@ -1229,7 +1229,7 @@ func TestServiceAccount(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - r := NewResourceRender(cfg.Namespace, cfg.DNSDomain, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) + r := NewResourceRender(cfg.Namespace, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) sa, err := r.ServiceAccount() require.NoError(t, err) @@ -1285,7 +1285,7 @@ func TestPDB(t *testing.T) { provider.GetEnvoyProxyKubeProvider() - r := NewResourceRender(cfg.Namespace, cfg.DNSDomain, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) + r := NewResourceRender(cfg.Namespace, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) pdb, err := r.PodDisruptionBudget() require.NoError(t, err) @@ -1371,7 +1371,7 @@ func TestHorizontalPodAutoscaler(t *testing.T) { } provider.GetEnvoyProxyKubeProvider() - r := NewResourceRender(cfg.Namespace, cfg.DNSDomain, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) + r := NewResourceRender(cfg.Namespace, tc.infra.GetProxyInfra(), cfg.EnvoyGateway) hpa, err := r.HorizontalPodAutoscaler() require.NoError(t, err) diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/component-level.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/component-level.yaml index 4f9107fb4d7..0b5874fd16c 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/component-level.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/component-level.yaml @@ -46,6 +46,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -122,6 +126,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/custom.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/custom.yaml index 5683159e6c5..50b1438559a 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/custom.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/custom.yaml @@ -75,7 +75,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -131,7 +131,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -229,6 +229,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -299,6 +303,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default-env.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default-env.yaml index 75db2fc35a8..8646fcd8836 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default-env.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default-env.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -228,6 +228,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -298,6 +302,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default.yaml index e2d420a0407..6343e40c1a3 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/default.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -213,6 +213,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -289,6 +293,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/disable-prometheus.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/disable-prometheus.yaml index a351838340a..f171b03de2b 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/disable-prometheus.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/disable-prometheus.yaml @@ -70,7 +70,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -104,7 +104,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -187,6 +187,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -260,6 +264,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/extension-env.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/extension-env.yaml index 1ed87f21c2c..59db1e52ba8 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/extension-env.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/extension-env.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -228,6 +228,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -302,6 +306,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/override-labels-and-annotations.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/override-labels-and-annotations.yaml index 70534adc4cc..a0a94c7834c 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/override-labels-and-annotations.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/override-labels-and-annotations.yaml @@ -83,7 +83,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -139,7 +139,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -222,6 +222,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -298,6 +302,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/patch-daemonset.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/patch-daemonset.yaml index b3ed37d3241..4345875cb3e 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/patch-daemonset.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/patch-daemonset.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -213,6 +213,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -289,6 +293,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/shutdown-manager.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/shutdown-manager.yaml index 97debe2f4e8..7a6359c89cc 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/shutdown-manager.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/shutdown-manager.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -213,6 +213,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -294,6 +298,10 @@ spec: value: env_a_value - name: env_b value: env_b_value + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/volumes.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/volumes.yaml index 64d66281152..f82d66d6b2c 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/volumes.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/volumes.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -228,6 +228,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -302,6 +306,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-annotations.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-annotations.yaml index b5fadea1445..1e73b9ca326 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-annotations.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-annotations.yaml @@ -79,7 +79,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -135,7 +135,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -218,6 +218,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -294,6 +298,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-concurrency.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-concurrency.yaml index aceabd683a3..76abb19e9af 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-concurrency.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-concurrency.yaml @@ -46,6 +46,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -122,6 +126,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-extra-args.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-extra-args.yaml index 6cc85628272..365738a38c1 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-extra-args.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-extra-args.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -215,6 +215,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -291,6 +295,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-image-pull-secrets.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-image-pull-secrets.yaml index 3f6090692bb..e1d825552ad 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-image-pull-secrets.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-image-pull-secrets.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -213,6 +213,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -289,6 +293,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-name.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-name.yaml index 01a84c9e25f..53ddc9f8418 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-name.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-name.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -213,6 +213,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -289,6 +293,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-node-selector.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-node-selector.yaml index c32b7625ae7..e7a2a135009 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-node-selector.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-node-selector.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -213,6 +213,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -289,6 +293,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-topology-spread-constraints.yaml b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-topology-spread-constraints.yaml index 06e48c355a3..302bea29ada 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-topology-spread-constraints.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/daemonsets/with-topology-spread-constraints.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -130,7 +130,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -213,6 +213,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -289,6 +293,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/bootstrap.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/bootstrap.yaml index edbcf01d06f..bc725332478 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/bootstrap.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/bootstrap.yaml @@ -49,6 +49,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -125,6 +129,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/component-level.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/component-level.yaml index 6ac7da41299..61dce22e4bc 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/component-level.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/component-level.yaml @@ -50,6 +50,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -126,6 +130,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom.yaml index 3a080205897..c53a3d049c4 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom.yaml @@ -80,7 +80,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -136,7 +136,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -234,6 +234,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -304,6 +308,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom_with_initcontainers.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom_with_initcontainers.yaml index c26ec592cd9..da48336938e 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom_with_initcontainers.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/custom_with_initcontainers.yaml @@ -80,7 +80,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -136,7 +136,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -234,6 +234,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -306,6 +310,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/default-env.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/default-env.yaml index 4a7a1b1c1c0..7262b2e3971 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/default-env.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/default-env.yaml @@ -79,7 +79,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -135,7 +135,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -233,6 +233,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -303,6 +307,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/default.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/default.yaml index 13d12e3fa40..793c1cf808a 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/default.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/default.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -293,6 +297,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/disable-prometheus.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/disable-prometheus.yaml index 29207aaa3f8..5779a5338cb 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/disable-prometheus.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/disable-prometheus.yaml @@ -74,7 +74,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -108,7 +108,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -191,6 +191,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -264,6 +268,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/extension-env.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/extension-env.yaml index 7c724bce6ce..8782ca79080 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/extension-env.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/extension-env.yaml @@ -79,7 +79,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -135,7 +135,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -233,6 +233,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -307,6 +311,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/override-labels-and-annotations.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/override-labels-and-annotations.yaml index aa87ba5b43c..e47e181fdff 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/override-labels-and-annotations.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/override-labels-and-annotations.yaml @@ -87,7 +87,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -143,7 +143,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -226,6 +226,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -302,6 +306,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/patch-deployment.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/patch-deployment.yaml index f4bf7a49f83..61cb11e1fd1 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/patch-deployment.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/patch-deployment.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -293,6 +297,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/shutdown-manager.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/shutdown-manager.yaml index 4281e7c3697..c209a034409 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/shutdown-manager.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/shutdown-manager.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -298,6 +302,10 @@ spec: value: env_a_value - name: env_b value: env_b_value + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/volumes.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/volumes.yaml index ff84e18cdf2..d0474c474e9 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/volumes.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/volumes.yaml @@ -79,7 +79,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -135,7 +135,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -233,6 +233,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -307,6 +311,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-annotations.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-annotations.yaml index 2f44c8853d9..5b75a28c94a 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-annotations.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-annotations.yaml @@ -83,7 +83,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -139,7 +139,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -222,6 +222,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -298,6 +302,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-concurrency.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-concurrency.yaml index 3cafba40548..417399236e1 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-concurrency.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-concurrency.yaml @@ -50,6 +50,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -126,6 +130,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-empty-memory-limits.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-empty-memory-limits.yaml index 432b8f31188..2a7d130304a 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-empty-memory-limits.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-empty-memory-limits.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -292,6 +296,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-extra-args.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-extra-args.yaml index bd87ca6b3a6..c7383e8fe48 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-extra-args.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-extra-args.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -219,6 +219,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -295,6 +299,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-image-pull-secrets.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-image-pull-secrets.yaml index 8033aa516af..38e67df731a 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-image-pull-secrets.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-image-pull-secrets.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -293,6 +297,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-name.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-name.yaml index 5c7da4fcdd3..4e237138ea3 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-name.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-name.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -293,6 +297,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-node-selector.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-node-selector.yaml index 93c65430254..a0bb5ef883e 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-node-selector.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-node-selector.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -293,6 +297,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-topology-spread-constraints.yaml b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-topology-spread-constraints.yaml index 93f0cd1456a..3bf8ab2aead 100644 --- a/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-topology-spread-constraints.yaml +++ b/internal/infrastructure/kubernetes/proxy/testdata/deployments/with-topology-spread-constraints.yaml @@ -78,7 +78,7 @@ spec: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: @@ -134,7 +134,7 @@ spec: endpoint: address: socket_address: - address: envoy-gateway.envoy-gateway-system.svc.cluster.local + address: envoy-gateway port_value: 18000 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: @@ -217,6 +217,10 @@ spec: command: - envoy env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: @@ -293,6 +297,10 @@ spec: command: - envoy-gateway env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/internal/infrastructure/kubernetes/proxy_configmap_test.go b/internal/infrastructure/kubernetes/proxy_configmap_test.go index ec4c0ec74e7..ef461fdde0a 100644 --- a/internal/infrastructure/kubernetes/proxy_configmap_test.go +++ b/internal/infrastructure/kubernetes/proxy_configmap_test.go @@ -111,7 +111,7 @@ func TestCreateOrUpdateProxyConfigMap(t *testing.T) { Build() } kube := NewInfra(cli, cfg) - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, infra.GetProxyInfra(), kube.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, infra.GetProxyInfra(), kube.EnvoyGateway) err := kube.createOrUpdateConfigMap(context.Background(), r) require.NoError(t, err) actual := &corev1.ConfigMap{ @@ -169,7 +169,7 @@ func TestDeleteConfigProxyMap(t *testing.T) { infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNamespaceLabel] = "default" infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNameLabel] = infra.Proxy.Name - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, infra.GetProxyInfra(), kube.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, infra.GetProxyInfra(), kube.EnvoyGateway) cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: kube.Namespace, diff --git a/internal/infrastructure/kubernetes/proxy_daemonset_test.go b/internal/infrastructure/kubernetes/proxy_daemonset_test.go index 2c126586247..e9fef86470c 100644 --- a/internal/infrastructure/kubernetes/proxy_daemonset_test.go +++ b/internal/infrastructure/kubernetes/proxy_daemonset_test.go @@ -66,7 +66,7 @@ func TestCreateOrUpdateProxyDaemonSet(t *testing.T) { }, } - r := proxy.NewResourceRender(cfg.Namespace, cfg.DNSDomain, infra.GetProxyInfra(), cfg.EnvoyGateway) + r := proxy.NewResourceRender(cfg.Namespace, infra.GetProxyInfra(), cfg.EnvoyGateway) ds, err := r.DaemonSet() require.NoError(t, err) @@ -245,7 +245,7 @@ func TestCreateOrUpdateProxyDaemonSet(t *testing.T) { } kube := NewInfra(cli, cfg) - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, tc.in.GetProxyInfra(), cfg.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, tc.in.GetProxyInfra(), cfg.EnvoyGateway) err := kube.createOrUpdateDaemonSet(context.Background(), r) if tc.wantErr { require.Error(t, err) diff --git a/internal/infrastructure/kubernetes/proxy_deployment_test.go b/internal/infrastructure/kubernetes/proxy_deployment_test.go index 188c92961b3..616101e18a2 100644 --- a/internal/infrastructure/kubernetes/proxy_deployment_test.go +++ b/internal/infrastructure/kubernetes/proxy_deployment_test.go @@ -59,7 +59,7 @@ func TestCreateOrUpdateProxyDeployment(t *testing.T) { infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNamespaceLabel] = "default" infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNameLabel] = infra.Proxy.Name - r := proxy.NewResourceRender(cfg.Namespace, cfg.DNSDomain, infra.GetProxyInfra(), cfg.EnvoyGateway) + r := proxy.NewResourceRender(cfg.Namespace, infra.GetProxyInfra(), cfg.EnvoyGateway) deploy, err := r.Deployment() require.NoError(t, err) @@ -238,7 +238,7 @@ func TestCreateOrUpdateProxyDeployment(t *testing.T) { } kube := NewInfra(cli, cfg) - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, tc.in.GetProxyInfra(), cfg.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, tc.in.GetProxyInfra(), cfg.EnvoyGateway) err := kube.createOrUpdateDeployment(context.Background(), r) if tc.wantErr { require.Error(t, err) @@ -284,7 +284,7 @@ func TestDeleteProxyDeployment(t *testing.T) { infra := ir.NewInfra() infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNamespaceLabel] = "default" infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNameLabel] = infra.Proxy.Name - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, infra.GetProxyInfra(), kube.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, infra.GetProxyInfra(), kube.EnvoyGateway) err := kube.createOrUpdateDeployment(context.Background(), r) require.NoError(t, err) diff --git a/internal/infrastructure/kubernetes/proxy_infra.go b/internal/infrastructure/kubernetes/proxy_infra.go index b7d96f3bb3c..e0b1fc5f9cc 100644 --- a/internal/infrastructure/kubernetes/proxy_infra.go +++ b/internal/infrastructure/kubernetes/proxy_infra.go @@ -23,7 +23,7 @@ func (i *Infra) CreateOrUpdateProxyInfra(ctx context.Context, infra *ir.Infra) e return errors.New("infra proxy ir is nil") } - r := proxy.NewResourceRender(i.Namespace, i.DNSDomain, infra.GetProxyInfra(), i.EnvoyGateway) + r := proxy.NewResourceRender(i.Namespace, infra.GetProxyInfra(), i.EnvoyGateway) return i.createOrUpdate(ctx, r) } @@ -33,6 +33,6 @@ func (i *Infra) DeleteProxyInfra(ctx context.Context, infra *ir.Infra) error { return errors.New("infra ir is nil") } - r := proxy.NewResourceRender(i.Namespace, i.DNSDomain, infra.GetProxyInfra(), i.EnvoyGateway) + r := proxy.NewResourceRender(i.Namespace, infra.GetProxyInfra(), i.EnvoyGateway) return i.delete(ctx, r) } diff --git a/internal/infrastructure/kubernetes/proxy_service_test.go b/internal/infrastructure/kubernetes/proxy_service_test.go index dab16d5b981..ffc8e4912e6 100644 --- a/internal/infrastructure/kubernetes/proxy_service_test.go +++ b/internal/infrastructure/kubernetes/proxy_service_test.go @@ -32,7 +32,7 @@ func TestDeleteProxyService(t *testing.T) { infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNamespaceLabel] = "default" infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNameLabel] = infra.Proxy.Name - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, infra.GetProxyInfra(), kube.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, infra.GetProxyInfra(), kube.EnvoyGateway) err := kube.createOrUpdateService(context.Background(), r) require.NoError(t, err) diff --git a/internal/infrastructure/kubernetes/proxy_serviceaccount_test.go b/internal/infrastructure/kubernetes/proxy_serviceaccount_test.go index 44732bf6b48..9aed62b6e6b 100644 --- a/internal/infrastructure/kubernetes/proxy_serviceaccount_test.go +++ b/internal/infrastructure/kubernetes/proxy_serviceaccount_test.go @@ -187,7 +187,7 @@ func TestCreateOrUpdateProxyServiceAccount(t *testing.T) { kube := NewInfra(cli, cfg) - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, tc.in.GetProxyInfra(), cfg.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, tc.in.GetProxyInfra(), cfg.EnvoyGateway) err = kube.createOrUpdateServiceAccount(context.Background(), r) require.NoError(t, err) @@ -220,7 +220,7 @@ func TestDeleteProxyServiceAccount(t *testing.T) { infra := ir.NewInfra() infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNamespaceLabel] = "default" infra.Proxy.GetProxyMetadata().Labels[gatewayapi.OwningGatewayNameLabel] = infra.Proxy.Name - r := proxy.NewResourceRender(kube.Namespace, kube.DNSDomain, infra.GetProxyInfra(), kube.EnvoyGateway) + r := proxy.NewResourceRender(kube.Namespace, infra.GetProxyInfra(), kube.EnvoyGateway) err := kube.createOrUpdateServiceAccount(context.Background(), r) require.NoError(t, err) diff --git a/internal/infrastructure/kubernetes/ratelimit/resource.go b/internal/infrastructure/kubernetes/ratelimit/resource.go index 4785a700d40..0d35196d629 100644 --- a/internal/infrastructure/kubernetes/ratelimit/resource.go +++ b/internal/infrastructure/kubernetes/ratelimit/resource.go @@ -277,6 +277,22 @@ func expectedRateLimitContainerEnv(rateLimit *egv1a1.RateLimit, rateLimitDeploym namespace string, ) []corev1.EnvVar { env := []corev1.EnvVar{ + { + Name: "PROTO", + Value: "tcp6", + }, + { + Name: "HOST", + Value: "", + }, + { + Name: "DEBUG_HOST", + Value: "", + }, + { + Name: "GRPC_HOST", + Value: "", + }, { Name: RuntimeRootEnvVar, Value: "/data", diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/custom.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/custom.yaml index 0c1be549e83..fc06ce0e6ba 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/custom.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/custom.yaml @@ -41,6 +41,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default-env.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default-env.yaml index 0c1be549e83..fc06ce0e6ba 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default-env.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default-env.yaml @@ -41,6 +41,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default.yaml index 32e56a1ea5c..b4de59b7fc5 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/default.yaml @@ -40,6 +40,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/disable-prometheus.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/disable-prometheus.yaml index 449ed2c1d70..0295426a38d 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/disable-prometheus.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/disable-prometheus.yaml @@ -36,6 +36,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing-custom.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing-custom.yaml index 320497017f7..00bac21b020 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing-custom.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing-custom.yaml @@ -40,6 +40,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing.yaml index 5b01b9ef319..8610989b103 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/enable-tracing.yaml @@ -40,6 +40,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/extension-env.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/extension-env.yaml index 65c68972f9d..290e9f656ab 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/extension-env.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/extension-env.yaml @@ -41,6 +41,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-annotations.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-annotations.yaml index 4bc241198c6..7ae57f84ab9 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-annotations.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-annotations.yaml @@ -42,6 +42,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-labels.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-labels.yaml index 6681232eeb8..e8d5445a482 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-labels.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/merge-labels.yaml @@ -42,6 +42,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/override-env.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/override-env.yaml index 0c0f73f3c83..915e44b0893 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/override-env.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/override-env.yaml @@ -41,6 +41,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/patch-deployment.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/patch-deployment.yaml index 773607b225a..ecca9ce7c69 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/patch-deployment.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/patch-deployment.yaml @@ -40,6 +40,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/redis-tls-settings.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/redis-tls-settings.yaml index 29428fc447b..a5406052825 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/redis-tls-settings.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/redis-tls-settings.yaml @@ -41,6 +41,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/tolerations.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/tolerations.yaml index a2478222625..41ac3eef2ca 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/tolerations.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/tolerations.yaml @@ -41,6 +41,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/volumes.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/volumes.yaml index 30d8852d642..be42c7e0f8d 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/volumes.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/volumes.yaml @@ -41,6 +41,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-node-selector.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-node-selector.yaml index 9cf4e01fbff..046ad33ea36 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-node-selector.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-node-selector.yaml @@ -40,6 +40,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-topology-spread-constraints.yaml b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-topology-spread-constraints.yaml index 5625daf61a0..74eb16a010c 100644 --- a/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-topology-spread-constraints.yaml +++ b/internal/infrastructure/kubernetes/ratelimit/testdata/deployments/with-topology-spread-constraints.yaml @@ -40,6 +40,11 @@ spec: - command: - /bin/ratelimit env: + - name: PROTO + value: tcp6 + - name: HOST + - name: DEBUG_HOST + - name: GRPC_HOST - name: RUNTIME_ROOT value: /data - name: RUNTIME_SUBDIRECTORY diff --git a/internal/utils/net/ip.go b/internal/utils/net/ip.go new file mode 100644 index 00000000000..af0166b097b --- /dev/null +++ b/internal/utils/net/ip.go @@ -0,0 +1,25 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +package net + +import ( + "net" + "os" +) + +func IsIPv6(s string) bool { + ip := net.ParseIP(s) + if ip == nil { + return false + } + return ip.To4() == nil +} + +// IsIPv6Pod returns true if the POD_IP environment variable is an IPv6 address. +// WARNING: This function is only intended to be used in the context of Kubernetes. +func IsIPv6Pod() bool { + return IsIPv6(os.Getenv("POD_IP")) +} diff --git a/internal/utils/net/ip_test.go b/internal/utils/net/ip_test.go new file mode 100644 index 00000000000..f3c285e27ff --- /dev/null +++ b/internal/utils/net/ip_test.go @@ -0,0 +1,37 @@ +// Copyright Envoy Gateway Authors +// SPDX-License-Identifier: Apache-2.0 +// The full text of the Apache license is available in the LICENSE file at +// the root of the repo. + +package net + +import "testing" + +func TestIsIPv6(t *testing.T) { + cases := []struct { + ip string + expected bool + }{ + { + ip: "", + expected: false, + }, + { + ip: "127.0.0.1", + expected: false, + }, + { + ip: "::1", + expected: true, + }, + } + + for _, tc := range cases { + t.Run(tc.ip, func(t *testing.T) { + actual := IsIPv6(tc.ip) + if actual != tc.expected { + t.Errorf("IsIPv6(%s) = %t; expected %t", tc.ip, actual, tc.expected) + } + }) + } +} diff --git a/internal/xds/bootstrap/bootstrap.go b/internal/xds/bootstrap/bootstrap.go index 0efad8c314f..2a5789e68dc 100644 --- a/internal/xds/bootstrap/bootstrap.go +++ b/internal/xds/bootstrap/bootstrap.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" egv1a1 "github.com/envoyproxy/gateway/api/v1alpha1" - "github.com/envoyproxy/gateway/internal/utils/net" + netutils "github.com/envoyproxy/gateway/internal/utils/net" "github.com/envoyproxy/gateway/internal/utils/regex" ) @@ -26,7 +26,8 @@ const ( // It defaults to the Envoy Gateway Kubernetes service. envoyGatewayXdsServerHost = "envoy-gateway" // EnvoyAdminAddress is the listening address of the envoy admin interface. - EnvoyAdminAddress = "127.0.0.1" + envoyAdminAddress = "127.0.0.1" + envoyAdminAddressIPv6 = "::1" // EnvoyAdminPort is the port used to expose admin interface. EnvoyAdminPort = 19000 // envoyAdminAccessLogPath is the path used to expose admin access log. @@ -39,14 +40,29 @@ const ( // DefaultWasmServerPort is the default listening port of the wasm HTTP server. wasmServerPort = 18002 - envoyReadinessAddress = "0.0.0.0" - EnvoyReadinessPort = 19001 - EnvoyReadinessPath = "/ready" + envoyReadinessAddress = "0.0.0.0" + envoyReadinessAddressIPv6 = "::" + EnvoyReadinessPort = 19001 + EnvoyReadinessPath = "/ready" defaultSdsTrustedCAPath = "/sds/xds-trusted-ca.json" defaultSdsCertificatePath = "/sds/xds-certificate.json" ) +func AdminAddress() string { + if netutils.IsIPv6Pod() { + return envoyAdminAddressIPv6 + } + return envoyAdminAddress +} + +func readinessAddress() string { + if netutils.IsIPv6Pod() { + return envoyReadinessAddressIPv6 + } + return envoyReadinessAddress +} + //go:embed bootstrap.yaml.tpl var bootstrapTmplStr string @@ -199,7 +215,7 @@ func GetRenderedBootstrapConfig(opts *RenderBootstrapConfigOptions) (string, err host, port = *sink.OpenTelemetry.Host, uint32(sink.OpenTelemetry.Port) } if len(sink.OpenTelemetry.BackendRefs) > 0 { - host, port = net.BackendHostAndPort(sink.OpenTelemetry.BackendRefs[0].BackendObjectReference, "") + host, port = netutils.BackendHostAndPort(sink.OpenTelemetry.BackendRefs[0].BackendObjectReference, "") } addr := fmt.Sprintf("%s:%d", host, port) if addresses.Has(addr) { @@ -249,12 +265,12 @@ func GetRenderedBootstrapConfig(opts *RenderBootstrapConfigOptions) (string, err Port: wasmServerPort, }, AdminServer: adminServerParameters{ - Address: EnvoyAdminAddress, + Address: AdminAddress(), Port: EnvoyAdminPort, AccessLogPath: envoyAdminAccessLogPath, }, ReadyServer: readyServerParameters{ - Address: envoyReadinessAddress, + Address: readinessAddress(), Port: EnvoyReadinessPort, ReadinessPath: EnvoyReadinessPath, }, diff --git a/internal/xds/bootstrap/bootstrap.yaml.tpl b/internal/xds/bootstrap/bootstrap.yaml.tpl index d243b7777ec..10eb76c75fe 100644 --- a/internal/xds/bootstrap/bootstrap.yaml.tpl +++ b/internal/xds/bootstrap/bootstrap.yaml.tpl @@ -65,7 +65,7 @@ static_resources: - name: envoy-gateway-proxy-ready-{{ .ReadyServer.Address }}-{{ .ReadyServer.Port }} address: socket_address: - address: {{ .ReadyServer.Address }} + address: '{{ .ReadyServer.Address }}' port_value: {{ .ReadyServer.Port }} protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/custom-server-port.yaml b/internal/xds/bootstrap/testdata/render/custom-server-port.yaml index 23cd059a2a6..cc3b56b399c 100644 --- a/internal/xds/bootstrap/testdata/render/custom-server-port.yaml +++ b/internal/xds/bootstrap/testdata/render/custom-server-port.yaml @@ -34,7 +34,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-3333 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 3333 protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/custom-stats-matcher.yaml b/internal/xds/bootstrap/testdata/render/custom-stats-matcher.yaml index 370b66914e3..27258e741ea 100644 --- a/internal/xds/bootstrap/testdata/render/custom-stats-matcher.yaml +++ b/internal/xds/bootstrap/testdata/render/custom-stats-matcher.yaml @@ -45,7 +45,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/disable-prometheus.yaml b/internal/xds/bootstrap/testdata/render/disable-prometheus.yaml index 1b5be570ce3..1e3ba1994dd 100644 --- a/internal/xds/bootstrap/testdata/render/disable-prometheus.yaml +++ b/internal/xds/bootstrap/testdata/render/disable-prometheus.yaml @@ -34,7 +34,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/enable-prometheus-gzip-compression.yaml b/internal/xds/bootstrap/testdata/render/enable-prometheus-gzip-compression.yaml index 93829b713f1..20eedcb3be8 100644 --- a/internal/xds/bootstrap/testdata/render/enable-prometheus-gzip-compression.yaml +++ b/internal/xds/bootstrap/testdata/render/enable-prometheus-gzip-compression.yaml @@ -34,7 +34,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/enable-prometheus.yaml b/internal/xds/bootstrap/testdata/render/enable-prometheus.yaml index 5d17a89534f..162569bcaf9 100644 --- a/internal/xds/bootstrap/testdata/render/enable-prometheus.yaml +++ b/internal/xds/bootstrap/testdata/render/enable-prometheus.yaml @@ -34,7 +34,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/otel-metrics-backendref.yaml b/internal/xds/bootstrap/testdata/render/otel-metrics-backendref.yaml index 3f6c0259a7e..27521b3c3fa 100644 --- a/internal/xds/bootstrap/testdata/render/otel-metrics-backendref.yaml +++ b/internal/xds/bootstrap/testdata/render/otel-metrics-backendref.yaml @@ -41,7 +41,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/otel-metrics.yaml b/internal/xds/bootstrap/testdata/render/otel-metrics.yaml index 3f6c0259a7e..27521b3c3fa 100644 --- a/internal/xds/bootstrap/testdata/render/otel-metrics.yaml +++ b/internal/xds/bootstrap/testdata/render/otel-metrics.yaml @@ -41,7 +41,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/xds/bootstrap/testdata/render/with-max-heap-size-bytes.yaml b/internal/xds/bootstrap/testdata/render/with-max-heap-size-bytes.yaml index 854b8a28988..a50a221b48f 100644 --- a/internal/xds/bootstrap/testdata/render/with-max-heap-size-bytes.yaml +++ b/internal/xds/bootstrap/testdata/render/with-max-heap-size-bytes.yaml @@ -34,7 +34,7 @@ static_resources: - name: envoy-gateway-proxy-ready-0.0.0.0-19001 address: socket_address: - address: 0.0.0.0 + address: '0.0.0.0' port_value: 19001 protocol: TCP filter_chains: diff --git a/internal/xds/translator/listener.go b/internal/xds/translator/listener.go index 9a68c5f3c1f..2072757e081 100644 --- a/internal/xds/translator/listener.go +++ b/internal/xds/translator/listener.go @@ -150,13 +150,12 @@ func setAddressByIPFamily(socketAddress *corev3.SocketAddress, ipFamily *ir.IPFa if ipFamily == nil { return nil } - switch *ipFamily { - case ir.IPv4: - socketAddress.Address = "0.0.0.0" - case ir.IPv6: - socketAddress.Address = "::" - case ir.Dualstack: - socketAddress.Address = "0.0.0.0" + if *ipFamily == ir.Dualstack { + // case ir.IPv4: + // socketAddress.Address = "0.0.0.0" + // case ir.IPv6: + // socketAddress.Address = "::" + // socketAddress.Address = "0.0.0.0" return []*listenerv3.AdditionalAddress{ { Address: &corev3.Address{ diff --git a/site/content/en/latest/install/gateway-addons-helm-api.md b/site/content/en/latest/install/gateway-addons-helm-api.md index ba60a7d51b0..5e2459da863 100644 --- a/site/content/en/latest/install/gateway-addons-helm-api.md +++ b/site/content/en/latest/install/gateway-addons-helm-api.md @@ -24,6 +24,7 @@ An Add-ons Helm chart for Envoy Gateway | Repository | Name | Version | |------------|------|---------| | https://fluent.github.io/helm-charts | fluent-bit | 0.30.4 | +| https://grafana.github.io/helm-charts | alloy | 0.9.2 | | https://grafana.github.io/helm-charts | grafana | 8.0.0 | | https://grafana.github.io/helm-charts | loki | 4.8.0 | | https://grafana.github.io/helm-charts | tempo | 1.3.1 | @@ -34,6 +35,9 @@ An Add-ons Helm chart for Envoy Gateway | Key | Type | Default | Description | |-----|------|---------|-------------| +| alloy.alloy.configMap.content | string | `"// Write your Alloy config here:\nlogging {\n level = \"info\"\n format = \"logfmt\"\n}\nloki.write \"alloy\" {\n endpoint {\n url = \"http://loki.monitoring.svc:3100/loki/api/v1/push\"\n }\n}\n// discovery.kubernetes allows you to find scrape targets from Kubernetes resources.\n// It watches cluster state and ensures targets are continually synced with what is currently running in your cluster.\ndiscovery.kubernetes \"pod\" {\n role = \"pod\"\n}\n\n// discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules.\n// If no rules are defined, then the input targets are exported as-is.\ndiscovery.relabel \"pod_logs\" {\n targets = discovery.kubernetes.pod.targets\n\n // Label creation - \"namespace\" field from \"__meta_kubernetes_namespace\"\n rule {\n source_labels = [\"__meta_kubernetes_namespace\"]\n action = \"replace\"\n target_label = \"namespace\"\n }\n\n // Label creation - \"pod\" field from \"__meta_kubernetes_pod_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_name\"]\n action = \"replace\"\n target_label = \"pod\"\n }\n\n // Label creation - \"container\" field from \"__meta_kubernetes_pod_container_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"container\"\n }\n\n // Label creation - \"app\" field from \"__meta_kubernetes_pod_label_app_kubernetes_io_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_label_app_kubernetes_io_name\"]\n action = \"replace\"\n target_label = \"app\"\n }\n\n // Label creation - \"job\" field from \"__meta_kubernetes_namespace\" and \"__meta_kubernetes_pod_container_name\"\n // Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name\n rule {\n source_labels = [\"__meta_kubernetes_namespace\", \"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"job\"\n separator = \"/\"\n replacement = \"$1\"\n }\n\n // Label creation - \"container\" field from \"__meta_kubernetes_pod_uid\" and \"__meta_kubernetes_pod_container_name\"\n // Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log\n rule {\n source_labels = [\"__meta_kubernetes_pod_uid\", \"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"__path__\"\n separator = \"/\"\n replacement = \"/var/log/pods/*$1/*.log\"\n }\n\n // Label creation - \"container_runtime\" field from \"__meta_kubernetes_pod_container_id\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_container_id\"]\n action = \"replace\"\n target_label = \"container_runtime\"\n regex = \"^(\\\\S+):\\\\/\\\\/.+$\"\n replacement = \"$1\"\n }\n}\n\n// loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API.\nloki.source.kubernetes \"pod_logs\" {\n targets = discovery.relabel.pod_logs.output\n forward_to = [loki.process.pod_logs.receiver]\n}\n// loki.process receives log entries from other Loki components, applies one or more processing stages,\n// and forwards the results to the list of receivers in the component’s arguments.\nloki.process \"pod_logs\" {\n stage.static_labels {\n values = {\n cluster = \"envoy-gateway\",\n }\n }\n\n forward_to = [loki.write.alloy.receiver]\n}"` | | +| alloy.enabled | bool | `true` | | +| alloy.fullnameOverride | string | `"alloy"` | | | fluent-bit.config.filters | string | `"[FILTER]\n Name kubernetes\n Match kube.*\n Merge_Log On\n Keep_Log Off\n K8S-Logging.Parser On\n K8S-Logging.Exclude On\n\n[FILTER]\n Name grep\n Match kube.*\n Regex $kubernetes['container_name'] ^envoy$\n\n[FILTER]\n Name parser\n Match kube.*\n Key_Name log\n Parser envoy\n Reserve_Data True\n"` | | | fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n multiline.parser docker, cri\n Tag kube.*\n Mem_Buf_Limit 5MB\n Skip_Long_Lines On\n"` | | | fluent-bit.config.outputs | string | `"[OUTPUT]\n Name loki\n Match kube.*\n Host loki.monitoring.svc.cluster.local\n Port 3100\n Labels job=fluentbit, app=$kubernetes['labels']['app'], k8s_namespace_name=$kubernetes['namespace_name'], k8s_pod_name=$kubernetes['pod_name'], k8s_container_name=$kubernetes['container_name']\n"` | | diff --git a/site/content/zh/latest/install/gateway-addons-helm-api.md b/site/content/zh/latest/install/gateway-addons-helm-api.md index ba60a7d51b0..5e2459da863 100644 --- a/site/content/zh/latest/install/gateway-addons-helm-api.md +++ b/site/content/zh/latest/install/gateway-addons-helm-api.md @@ -24,6 +24,7 @@ An Add-ons Helm chart for Envoy Gateway | Repository | Name | Version | |------------|------|---------| | https://fluent.github.io/helm-charts | fluent-bit | 0.30.4 | +| https://grafana.github.io/helm-charts | alloy | 0.9.2 | | https://grafana.github.io/helm-charts | grafana | 8.0.0 | | https://grafana.github.io/helm-charts | loki | 4.8.0 | | https://grafana.github.io/helm-charts | tempo | 1.3.1 | @@ -34,6 +35,9 @@ An Add-ons Helm chart for Envoy Gateway | Key | Type | Default | Description | |-----|------|---------|-------------| +| alloy.alloy.configMap.content | string | `"// Write your Alloy config here:\nlogging {\n level = \"info\"\n format = \"logfmt\"\n}\nloki.write \"alloy\" {\n endpoint {\n url = \"http://loki.monitoring.svc:3100/loki/api/v1/push\"\n }\n}\n// discovery.kubernetes allows you to find scrape targets from Kubernetes resources.\n// It watches cluster state and ensures targets are continually synced with what is currently running in your cluster.\ndiscovery.kubernetes \"pod\" {\n role = \"pod\"\n}\n\n// discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules.\n// If no rules are defined, then the input targets are exported as-is.\ndiscovery.relabel \"pod_logs\" {\n targets = discovery.kubernetes.pod.targets\n\n // Label creation - \"namespace\" field from \"__meta_kubernetes_namespace\"\n rule {\n source_labels = [\"__meta_kubernetes_namespace\"]\n action = \"replace\"\n target_label = \"namespace\"\n }\n\n // Label creation - \"pod\" field from \"__meta_kubernetes_pod_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_name\"]\n action = \"replace\"\n target_label = \"pod\"\n }\n\n // Label creation - \"container\" field from \"__meta_kubernetes_pod_container_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"container\"\n }\n\n // Label creation - \"app\" field from \"__meta_kubernetes_pod_label_app_kubernetes_io_name\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_label_app_kubernetes_io_name\"]\n action = \"replace\"\n target_label = \"app\"\n }\n\n // Label creation - \"job\" field from \"__meta_kubernetes_namespace\" and \"__meta_kubernetes_pod_container_name\"\n // Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name\n rule {\n source_labels = [\"__meta_kubernetes_namespace\", \"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"job\"\n separator = \"/\"\n replacement = \"$1\"\n }\n\n // Label creation - \"container\" field from \"__meta_kubernetes_pod_uid\" and \"__meta_kubernetes_pod_container_name\"\n // Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log\n rule {\n source_labels = [\"__meta_kubernetes_pod_uid\", \"__meta_kubernetes_pod_container_name\"]\n action = \"replace\"\n target_label = \"__path__\"\n separator = \"/\"\n replacement = \"/var/log/pods/*$1/*.log\"\n }\n\n // Label creation - \"container_runtime\" field from \"__meta_kubernetes_pod_container_id\"\n rule {\n source_labels = [\"__meta_kubernetes_pod_container_id\"]\n action = \"replace\"\n target_label = \"container_runtime\"\n regex = \"^(\\\\S+):\\\\/\\\\/.+$\"\n replacement = \"$1\"\n }\n}\n\n// loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API.\nloki.source.kubernetes \"pod_logs\" {\n targets = discovery.relabel.pod_logs.output\n forward_to = [loki.process.pod_logs.receiver]\n}\n// loki.process receives log entries from other Loki components, applies one or more processing stages,\n// and forwards the results to the list of receivers in the component’s arguments.\nloki.process \"pod_logs\" {\n stage.static_labels {\n values = {\n cluster = \"envoy-gateway\",\n }\n }\n\n forward_to = [loki.write.alloy.receiver]\n}"` | | +| alloy.enabled | bool | `true` | | +| alloy.fullnameOverride | string | `"alloy"` | | | fluent-bit.config.filters | string | `"[FILTER]\n Name kubernetes\n Match kube.*\n Merge_Log On\n Keep_Log Off\n K8S-Logging.Parser On\n K8S-Logging.Exclude On\n\n[FILTER]\n Name grep\n Match kube.*\n Regex $kubernetes['container_name'] ^envoy$\n\n[FILTER]\n Name parser\n Match kube.*\n Key_Name log\n Parser envoy\n Reserve_Data True\n"` | | | fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n multiline.parser docker, cri\n Tag kube.*\n Mem_Buf_Limit 5MB\n Skip_Long_Lines On\n"` | | | fluent-bit.config.outputs | string | `"[OUTPUT]\n Name loki\n Match kube.*\n Host loki.monitoring.svc.cluster.local\n Port 3100\n Labels job=fluentbit, app=$kubernetes['labels']['app'], k8s_namespace_name=$kubernetes['namespace_name'], k8s_pod_name=$kubernetes['pod_name'], k8s_container_name=$kubernetes['container_name']\n"` | | diff --git a/test/e2e/tests/accesslog.go b/test/e2e/tests/accesslog.go index b2c9a28ac94..4edc12f7c55 100644 --- a/test/e2e/tests/accesslog.go +++ b/test/e2e/tests/accesslog.go @@ -30,9 +30,9 @@ var FileAccessLogTest = suite.ConformanceTest{ Manifests: []string{"testdata/accesslog-file.yaml"}, Test: func(t *testing.T, suite *suite.ConformanceTestSuite) { labels := map[string]string{ - "job": "fluentbit", - "k8s_namespace_name": "envoy-gateway-system", - "k8s_container_name": "envoy", + "job": "envoy-gateway-system/envoy", + "namespace": "envoy-gateway-system", + "container": "envoy", } match := "test-annotation-value" diff --git a/test/helm/gateway-addons-helm/default.out.yaml b/test/helm/gateway-addons-helm/default.out.yaml index 614a2d22454..0588a9bde95 100644 --- a/test/helm/gateway-addons-helm/default.out.yaml +++ b/test/helm/gateway-addons-helm/default.out.yaml @@ -1,4 +1,20 @@ --- +# Source: gateway-addons-helm/charts/alloy/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alloy + namespace: monitoring + labels: + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- # Source: gateway-addons-helm/charts/fluent-bit/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount @@ -89,6 +105,118 @@ data: admin-password: "YWRtaW4=" ldap-toml: "" --- +# Source: gateway-addons-helm/charts/alloy/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: alloy + labels: + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: config +data: + config.alloy: |- + // Write your Alloy config here: + logging { + level = "info" + format = "logfmt" + } + loki.write "alloy" { + endpoint { + url = "http://loki.monitoring.svc:3100/loki/api/v1/push" + } + } + // discovery.kubernetes allows you to find scrape targets from Kubernetes resources. + // It watches cluster state and ensures targets are continually synced with what is currently running in your cluster. + discovery.kubernetes "pod" { + role = "pod" + } + + // discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules. + // If no rules are defined, then the input targets are exported as-is. + discovery.relabel "pod_logs" { + targets = discovery.kubernetes.pod.targets + + // Label creation - "namespace" field from "__meta_kubernetes_namespace" + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } + + // Label creation - "pod" field from "__meta_kubernetes_pod_name" + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } + + // Label creation - "container" field from "__meta_kubernetes_pod_container_name" + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } + + // Label creation - "app" field from "__meta_kubernetes_pod_label_app_kubernetes_io_name" + rule { + source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"] + action = "replace" + target_label = "app" + } + + // Label creation - "job" field from "__meta_kubernetes_namespace" and "__meta_kubernetes_pod_container_name" + // Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "job" + separator = "/" + replacement = "$1" + } + + // Label creation - "container" field from "__meta_kubernetes_pod_uid" and "__meta_kubernetes_pod_container_name" + // Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "__path__" + separator = "/" + replacement = "/var/log/pods/*$1/*.log" + } + + // Label creation - "container_runtime" field from "__meta_kubernetes_pod_container_id" + rule { + source_labels = ["__meta_kubernetes_pod_container_id"] + action = "replace" + target_label = "container_runtime" + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + } + } + + // loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API. + loki.source.kubernetes "pod_logs" { + targets = discovery.relabel.pod_logs.output + forward_to = [loki.process.pod_logs.receiver] + } + // loki.process receives log entries from other Loki components, applies one or more processing stages, + // and forwards the results to the list of receivers in the component’s arguments. + loki.process "pod_logs" { + stage.static_labels { + values = { + cluster = "envoy-gateway", + } + } + + forward_to = [loki.write.alloy.receiver] + } +--- # Source: gateway-addons-helm/charts/fluent-bit/templates/configmap.yaml apiVersion: v1 kind: ConfigMap @@ -9263,6 +9391,106 @@ data: "uid": "f7aeb41676b7865cf31ae49691325f91" } --- +# Source: gateway-addons-helm/charts/alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alloy + labels: + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- # Source: gateway-addons-helm/charts/fluent-bit/templates/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -9350,6 +9578,29 @@ rules: verbs: - get --- +# Source: gateway-addons-helm/charts/alloy/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alloy + labels: + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alloy +subjects: + - kind: ServiceAccount + name: alloy + namespace: monitoring +--- # Source: gateway-addons-helm/charts/fluent-bit/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -9447,6 +9698,32 @@ subjects: name: grafana namespace: monitoring --- +# Source: gateway-addons-helm/charts/alloy/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: alloy + labels: + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- # Source: gateway-addons-helm/charts/fluent-bit/templates/service.yaml apiVersion: v1 kind: Service @@ -9658,6 +9935,83 @@ spec: app.kubernetes.io/name: tempo app.kubernetes.io/instance: gateway-addons-helm --- +# Source: gateway-addons-helm/charts/alloy/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: alloy + labels: + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + + app.kubernetes.io/version: "v1.4.3" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy + app.kubernetes.io/instance: gateway-addons-helm + spec: + serviceAccountName: alloy + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.4.3 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + volumes: + - name: config + configMap: + name: alloy +--- # Source: gateway-addons-helm/charts/fluent-bit/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet diff --git a/test/helm/gateway-addons-helm/e2e.in.yaml b/test/helm/gateway-addons-helm/e2e.in.yaml index 93ce0d8d622..324800dc8a2 100644 --- a/test/helm/gateway-addons-helm/e2e.in.yaml +++ b/test/helm/gateway-addons-helm/e2e.in.yaml @@ -2,3 +2,5 @@ grafana: enabled: false opentelemetry-collector: enabled: true +fluent-bit: + enabled: false diff --git a/test/helm/gateway-addons-helm/e2e.out.yaml b/test/helm/gateway-addons-helm/e2e.out.yaml index 84a0691299a..5a2f32ed2c8 100644 --- a/test/helm/gateway-addons-helm/e2e.out.yaml +++ b/test/helm/gateway-addons-helm/e2e.out.yaml @@ -1,16 +1,19 @@ --- -# Source: gateway-addons-helm/charts/fluent-bit/templates/serviceaccount.yaml +# Source: gateway-addons-helm/charts/alloy/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: fluent-bit + name: alloy namespace: monitoring labels: - helm.sh/chart: fluent-bit-0.30.4 - app.kubernetes.io/name: fluent-bit + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm - app.kubernetes.io/version: "2.1.4" + + app.kubernetes.io/version: "v1.4.3" app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac --- # Source: gateway-addons-helm/charts/loki/templates/serviceaccount.yaml apiVersion: v1 @@ -69,73 +72,117 @@ metadata: app.kubernetes.io/managed-by: Helm automountServiceAccountToken: true --- -# Source: gateway-addons-helm/charts/fluent-bit/templates/configmap.yaml +# Source: gateway-addons-helm/charts/alloy/templates/configmap.yaml apiVersion: v1 kind: ConfigMap metadata: - name: fluent-bit - namespace: monitoring + name: alloy labels: - helm.sh/chart: fluent-bit-0.30.4 - app.kubernetes.io/name: fluent-bit + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm - app.kubernetes.io/version: "2.1.4" + + app.kubernetes.io/version: "v1.4.3" app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: config data: - custom_parsers.conf: | - [PARSER] - Name docker_no_time - Format json - Time_Keep Off - Time_Key time - Time_Format %Y-%m-%dT%H:%M:%S.%L + config.alloy: |- + // Write your Alloy config here: + logging { + level = "info" + format = "logfmt" + } + loki.write "alloy" { + endpoint { + url = "http://loki.monitoring.svc:3100/loki/api/v1/push" + } + } + // discovery.kubernetes allows you to find scrape targets from Kubernetes resources. + // It watches cluster state and ensures targets are continually synced with what is currently running in your cluster. + discovery.kubernetes "pod" { + role = "pod" + } - fluent-bit.conf: | - [SERVICE] - Daemon Off - Flush 1 - Log_Level info - Parsers_File parsers.conf - Parsers_File custom_parsers.conf - HTTP_Server On - HTTP_Listen 0.0.0.0 - HTTP_Port 2020 - Health_Check On + // discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules. + // If no rules are defined, then the input targets are exported as-is. + discovery.relabel "pod_logs" { + targets = discovery.kubernetes.pod.targets - [INPUT] - Name tail - Path /var/log/containers/*.log - multiline.parser docker, cri - Tag kube.* - Mem_Buf_Limit 5MB - Skip_Long_Lines On + // Label creation - "namespace" field from "__meta_kubernetes_namespace" + rule { + source_labels = ["__meta_kubernetes_namespace"] + action = "replace" + target_label = "namespace" + } - [FILTER] - Name kubernetes - Match kube.* - Merge_Log On - Keep_Log Off - K8S-Logging.Parser On - K8S-Logging.Exclude On + // Label creation - "pod" field from "__meta_kubernetes_pod_name" + rule { + source_labels = ["__meta_kubernetes_pod_name"] + action = "replace" + target_label = "pod" + } - [FILTER] - Name grep - Match kube.* - Regex $kubernetes['container_name'] ^envoy$ + // Label creation - "container" field from "__meta_kubernetes_pod_container_name" + rule { + source_labels = ["__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "container" + } - [FILTER] - Name parser - Match kube.* - Key_Name log - Parser envoy - Reserve_Data True + // Label creation - "app" field from "__meta_kubernetes_pod_label_app_kubernetes_io_name" + rule { + source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"] + action = "replace" + target_label = "app" + } + + // Label creation - "job" field from "__meta_kubernetes_namespace" and "__meta_kubernetes_pod_container_name" + // Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name + rule { + source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "job" + separator = "/" + replacement = "$1" + } - [OUTPUT] - Name loki - Match kube.* - Host loki.monitoring.svc.cluster.local - Port 3100 - Labels job=fluentbit, app=$kubernetes['labels']['app'], k8s_namespace_name=$kubernetes['namespace_name'], k8s_pod_name=$kubernetes['pod_name'], k8s_container_name=$kubernetes['container_name'] + // Label creation - "container" field from "__meta_kubernetes_pod_uid" and "__meta_kubernetes_pod_container_name" + // Concatenate values __meta_kubernetes_pod_uid/__meta_kubernetes_pod_container_name.log + rule { + source_labels = ["__meta_kubernetes_pod_uid", "__meta_kubernetes_pod_container_name"] + action = "replace" + target_label = "__path__" + separator = "/" + replacement = "/var/log/pods/*$1/*.log" + } + + // Label creation - "container_runtime" field from "__meta_kubernetes_pod_container_id" + rule { + source_labels = ["__meta_kubernetes_pod_container_id"] + action = "replace" + target_label = "container_runtime" + regex = "^(\\S+):\\/\\/.+$" + replacement = "$1" + } + } + + // loki.source.kubernetes tails logs from Kubernetes containers using the Kubernetes API. + loki.source.kubernetes "pod_logs" { + targets = discovery.relabel.pod_logs.output + forward_to = [loki.process.pod_logs.receiver] + } + // loki.process receives log entries from other Loki components, applies one or more processing stages, + // and forwards the results to the list of receivers in the component’s arguments. + loki.process "pod_logs" { + stage.static_labels { + values = { + cluster = "envoy-gateway", + } + } + + forward_to = [loki.write.alloy.receiver] + } --- # Source: gateway-addons-helm/charts/loki/templates/configmap.yaml apiVersion: v1 @@ -9298,27 +9345,105 @@ data: "uid": "f7aeb41676b7865cf31ae49691325f91" } --- -# Source: gateway-addons-helm/charts/fluent-bit/templates/clusterrole.yaml +# Source: gateway-addons-helm/charts/alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: fluent-bit + name: alloy labels: - helm.sh/chart: fluent-bit-0.30.4 - app.kubernetes.io/name: fluent-bit + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm - app.kubernetes.io/version: "2.1.4" + + app.kubernetes.io/version: "v1.4.3" app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac rules: + # Rules which allow discovery.kubernetes to function. - apiGroups: - "" + - "discovery.k8s.io" + - "networking.k8s.io" resources: - - namespaces + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events verbs: - get - list - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] --- # Source: gateway-addons-helm/charts/prometheus/templates/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -9372,24 +9497,27 @@ rules: verbs: - get --- -# Source: gateway-addons-helm/charts/fluent-bit/templates/clusterrolebinding.yaml +# Source: gateway-addons-helm/charts/alloy/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: fluent-bit + name: alloy labels: - helm.sh/chart: fluent-bit-0.30.4 - app.kubernetes.io/name: fluent-bit + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm - app.kubernetes.io/version: "2.1.4" + + app.kubernetes.io/version: "v1.4.3" app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: fluent-bit + name: alloy subjects: - kind: ServiceAccount - name: fluent-bit + name: alloy namespace: monitoring --- # Source: gateway-addons-helm/charts/prometheus/templates/clusterrolebinding.yaml @@ -9414,28 +9542,31 @@ roleRef: kind: ClusterRole name: prometheus --- -# Source: gateway-addons-helm/charts/fluent-bit/templates/service.yaml +# Source: gateway-addons-helm/charts/alloy/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: fluent-bit - namespace: monitoring + name: alloy labels: - helm.sh/chart: fluent-bit-0.30.4 - app.kubernetes.io/name: fluent-bit + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm - app.kubernetes.io/version: "2.1.4" + + app.kubernetes.io/version: "v1.4.3" app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking spec: type: ClusterIP - ports: - - port: 2020 - targetPort: http - protocol: TCP - name: http selector: - app.kubernetes.io/name: fluent-bit + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" --- # Source: gateway-addons-helm/charts/loki/templates/service-memberlist.yaml apiVersion: v1 @@ -9651,84 +9782,82 @@ spec: app.kubernetes.io/name: tempo app.kubernetes.io/instance: gateway-addons-helm --- -# Source: gateway-addons-helm/charts/fluent-bit/templates/daemonset.yaml +# Source: gateway-addons-helm/charts/alloy/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: fluent-bit - namespace: monitoring + name: alloy labels: - helm.sh/chart: fluent-bit-0.30.4 - app.kubernetes.io/name: fluent-bit + helm.sh/chart: alloy-0.9.2 + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm - app.kubernetes.io/version: "2.1.4" + + app.kubernetes.io/version: "v1.4.3" app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy spec: + minReadySeconds: 10 selector: matchLabels: - app.kubernetes.io/name: fluent-bit + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm template: metadata: annotations: - checksum/config: 03d122555879033ccf6443369f73463490b100f195550b1483d337f497c749e3 - checksum/luascripts: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - fluentbit.io/exclude: "true" - prometheus.io/path: /api/v1/metrics/prometheus - prometheus.io/port: "2020" - prometheus.io/scrape: "true" + kubectl.kubernetes.io/default-container: alloy labels: - app.kubernetes.io/name: fluent-bit + app.kubernetes.io/name: alloy app.kubernetes.io/instance: gateway-addons-helm spec: - serviceAccountName: fluent-bit - hostNetwork: false - dnsPolicy: ClusterFirst + serviceAccountName: alloy containers: - - name: fluent-bit - image: "fluent/fluent-bit:2.1.4" - imagePullPolicy: Always + - name: alloy + image: docker.io/grafana/alloy:v1.4.3 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName ports: - - name: http - containerPort: 2020 - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http + - containerPort: 12345 + name: http-metrics readinessProbe: httpGet: - path: /api/v1/health - port: http + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 volumeMounts: - - mountPath: /fluent-bit/etc/fluent-bit.conf - name: config - subPath: fluent-bit.conf - - mountPath: /fluent-bit/etc/custom_parsers.conf - name: config - subPath: custom_parsers.conf - - mountPath: /var/log - name: varlog - - mountPath: /var/lib/docker/containers - name: varlibdockercontainers - readOnly: true - - mountPath: /etc/machine-id - name: etcmachineid - readOnly: true + - name: config + mountPath: /etc/alloy + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst volumes: - name: config configMap: - name: fluent-bit - - hostPath: - path: /var/log - name: varlog - - hostPath: - path: /var/lib/docker/containers - name: varlibdockercontainers - - hostPath: - path: /etc/machine-id - type: File - name: etcmachineid + name: alloy --- # Source: gateway-addons-helm/charts/opentelemetry-collector/templates/deployment.yaml apiVersion: apps/v1 diff --git a/test/helm/gateway-helm/certjen-custom-scheduling.out.yaml b/test/helm/gateway-helm/certjen-custom-scheduling.out.yaml index 37d0212f719..73dd3114451 100644 --- a/test/helm/gateway-helm/certjen-custom-scheduling.out.yaml +++ b/test/helm/gateway-helm/certjen-custom-scheduling.out.yaml @@ -389,6 +389,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/control-plane-with-pdb.out.yaml b/test/helm/gateway-helm/control-plane-with-pdb.out.yaml index 69f08e1dbb7..1af6260f38a 100644 --- a/test/helm/gateway-helm/control-plane-with-pdb.out.yaml +++ b/test/helm/gateway-helm/control-plane-with-pdb.out.yaml @@ -404,6 +404,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/default-config.out.yaml b/test/helm/gateway-helm/default-config.out.yaml index 6e1b1846bae..043cc87acaa 100644 --- a/test/helm/gateway-helm/default-config.out.yaml +++ b/test/helm/gateway-helm/default-config.out.yaml @@ -389,6 +389,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/deployment-custom-topology.out.yaml b/test/helm/gateway-helm/deployment-custom-topology.out.yaml index 0bc5809337c..3777ad9af29 100644 --- a/test/helm/gateway-helm/deployment-custom-topology.out.yaml +++ b/test/helm/gateway-helm/deployment-custom-topology.out.yaml @@ -417,6 +417,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/deployment-images-config.out.yaml b/test/helm/gateway-helm/deployment-images-config.out.yaml index f99a89039d8..5acd24f187a 100644 --- a/test/helm/gateway-helm/deployment-images-config.out.yaml +++ b/test/helm/gateway-helm/deployment-images-config.out.yaml @@ -389,6 +389,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/deployment-priorityclass.out.yaml b/test/helm/gateway-helm/deployment-priorityclass.out.yaml index 3757e360d95..23b6995e1e4 100644 --- a/test/helm/gateway-helm/deployment-priorityclass.out.yaml +++ b/test/helm/gateway-helm/deployment-priorityclass.out.yaml @@ -389,6 +389,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/envoy-gateway-config.out.yaml b/test/helm/gateway-helm/envoy-gateway-config.out.yaml index fb1e51f2209..8458f976388 100644 --- a/test/helm/gateway-helm/envoy-gateway-config.out.yaml +++ b/test/helm/gateway-helm/envoy-gateway-config.out.yaml @@ -391,6 +391,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/global-images-config.out.yaml b/test/helm/gateway-helm/global-images-config.out.yaml index ebcda594b19..4ce46484753 100644 --- a/test/helm/gateway-helm/global-images-config.out.yaml +++ b/test/helm/gateway-helm/global-images-config.out.yaml @@ -393,6 +393,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/test/helm/gateway-helm/service-annotations.out.yaml b/test/helm/gateway-helm/service-annotations.out.yaml index 9d37bdffcde..72cc8f6afe4 100644 --- a/test/helm/gateway-helm/service-annotations.out.yaml +++ b/test/helm/gateway-helm/service-annotations.out.yaml @@ -391,6 +391,10 @@ spec: - server - --config-path=/config/envoy-gateway.yaml env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: ENVOY_GATEWAY_NAMESPACE valueFrom: fieldRef: diff --git a/tools/make/kube.mk b/tools/make/kube.mk index 430084dc544..fc3dde70afd 100644 --- a/tools/make/kube.mk +++ b/tools/make/kube.mk @@ -132,7 +132,7 @@ experimental-conformance: create-cluster kube-install-image kube-deploy run-expe benchmark: create-cluster kube-install-image kube-deploy-for-benchmark-test run-benchmark delete-cluster ## Create a kind cluster, deploy EG into it, run Envoy Gateway benchmark test, and clean up. .PHONY: e2e -e2e: create-cluster kube-install-image kube-deploy install-ratelimit install-e2e-telemetry run-e2e delete-cluster +e2e: create-cluster kube-install-image kube-deploy install-ratelimit install-eg-addons run-e2e delete-cluster .PHONY: install-ratelimit install-ratelimit: @@ -188,10 +188,10 @@ uninstall-benchmark-server: ## Uninstall nighthawk server for benchmark test kubectl delete configmap test-server-config -n benchmark-test kubectl delete namespace benchmark-test -.PHONY: install-e2e-telemetry -install-e2e-telemetry: helm-generate.gateway-addons-helm +.PHONY: install-eg-addons +install-eg-addons: helm-generate.gateway-addons-helm @$(LOG_TARGET) - helm upgrade -i eg-addons charts/gateway-addons-helm --set grafana.enabled=false,opentelemetry-collector.enabled=true -n monitoring --create-namespace --timeout='$(WAIT_TIMEOUT)' --wait --wait-for-jobs + helm upgrade -i eg-addons charts/gateway-addons-helm -f test/helm/gateway-addons-helm/e2e.in.yaml -n monitoring --create-namespace --timeout='$(WAIT_TIMEOUT)' --wait --wait-for-jobs # Change loki service type from ClusterIP to LoadBalancer kubectl patch service loki -n monitoring -p '{"spec": {"type": "LoadBalancer"}}' # Wait service Ready @@ -202,8 +202,8 @@ install-e2e-telemetry: helm-generate.gateway-addons-helm kubectl rollout restart -n monitoring deployment/otel-collector kubectl rollout status --watch --timeout=5m -n monitoring deployment/otel-collector -.PHONY: uninstall-e2e-telemetry -uninstall-e2e-telemetry: +.PHONY: uninstall-eg-addons +uninstall-eg-addons: @$(LOG_TARGET) helm delete $(shell helm list -n monitoring -q) -n monitoring