From 480ccb536ecb2790962bf1b713aee5426f008813 Mon Sep 17 00:00:00 2001 From: cintoSunny <67714887+cintoSunny@users.noreply.github.com> Date: Wed, 19 Oct 2022 08:37:02 -0700 Subject: [PATCH] Sidecar container spec for druid (#296) * Sidecar container spec for druid * Fix PR comments on additionalcontainer * Add envFrom and add comments * Add example spec to example.md * Add multi container deployment to features.md Co-authored-by: cinto --- apis/druid/v1alpha1/druid_types.go | 41 ++ apis/druid/v1alpha1/zz_generated.deepcopy.go | 60 ++- .../crds/druid.apache.org_druids.yaml | 204 +++++++++ controllers/druid/handler.go | 71 +++- controllers/druid/handler_test.go | 18 + .../testdata/broker-statefulset-sidecar.yaml | 119 ++++++ .../druid/testdata/druid-test-cr-sidecar.yaml | 394 ++++++++++++++++++ deploy/crds/druid.apache.org_druids.yaml | 204 +++++++++ docs/examples.md | 32 ++ docs/features.md | 7 + 10 files changed, 1126 insertions(+), 24 deletions(-) create mode 100755 controllers/druid/testdata/broker-statefulset-sidecar.yaml create mode 100644 controllers/druid/testdata/druid-test-cr-sidecar.yaml diff --git a/apis/druid/v1alpha1/druid_types.go b/apis/druid/v1alpha1/druid_types.go index 2af0fd2c..6adc5e4e 100644 --- a/apis/druid/v1alpha1/druid_types.go +++ b/apis/druid/v1alpha1/druid_types.go @@ -24,6 +24,44 @@ import ( // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// AdditionalContainer defines the additional sidecar container +type AdditionalContainer struct { + // List of configurations to use which are not present or to override default implementation configurations + + // This is the image for the additional container to run. + // This is a required field + Image string `json:"image"` + + // This is the name of the additional container. + // This is a required field + ContainerName string `json:"containerName"` + + // This is the command for the additional container to run. + // This is a required field + Command []string `json:"command"` + + // Optional: If not present, will be taken from top level spec + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Optional: Argument to call the command + Args []string `json:"args,omitempty"` + + // Optional: ContainerSecurityContext. If not present, will be taken from top level pod + ContainerSecurityContext *v1.SecurityContext `json:"securityContext,omitempty"` + + // Optional: CPU/Memory Resources + Resources v1.ResourceRequirements `json:"resources,omitempty"` + + // Optional: volumes etc for the Druid pods + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` + + // Optional: environment variables for the Additional Container + Env []v1.EnvVar `json:"env,omitempty"` + + // Optional: Extra environment variables + EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty"` +} + // DruidSpec defines the desired state of Druid type DruidSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster @@ -132,6 +170,9 @@ type DruidSpec struct { // that is, it must match regex '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' Nodes map[string]DruidNodeSpec `json:"nodes"` + // Operator deploys the sidecar container based on these properties. Sidecar will be deployed for all the Druid pods. + AdditionalContainer []AdditionalContainer `json:"additionalContainer,omitempty"` + // Operator deploys above list of nodes in the Druid prescribed order of Historical, Overlord, MiddleManager, // Broker, Coordinator etc. // Optional: If set to true then operator checks the rollout status of previous version StateSets before updating next. diff --git a/apis/druid/v1alpha1/zz_generated.deepcopy.go b/apis/druid/v1alpha1/zz_generated.deepcopy.go index b8473e09..6bf13d96 100644 --- a/apis/druid/v1alpha1/zz_generated.deepcopy.go +++ b/apis/druid/v1alpha1/zz_generated.deepcopy.go @@ -1,4 +1,3 @@ -//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -19,6 +18,58 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalContainer) DeepCopyInto(out *AdditionalContainer) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerSecurityContext != nil { + in, out := &in.ContainerSecurityContext, &out.ContainerSecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalContainer. +func (in *AdditionalContainer) DeepCopy() *AdditionalContainer { + if in == nil { + return nil + } + out := new(AdditionalContainer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeepStorageSpec) DeepCopyInto(out *DeepStorageSpec) { *out = *in @@ -481,6 +532,13 @@ func (in *DruidSpec) DeepCopyInto(out *DruidSpec) { (*out)[key] = *val.DeepCopy() } } + if in.AdditionalContainer != nil { + in, out := &in.AdditionalContainer, &out.AdditionalContainer + *out = make([]AdditionalContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Zookeeper != nil { in, out := &in.Zookeeper, &out.Zookeeper *out = new(ZookeeperSpec) diff --git a/chart/templates/crds/druid.apache.org_druids.yaml b/chart/templates/crds/druid.apache.org_druids.yaml index 99124341..5a2dbc37 100644 --- a/chart/templates/crds/druid.apache.org_druids.yaml +++ b/chart/templates/crds/druid.apache.org_druids.yaml @@ -29,6 +29,210 @@ spec: type: object spec: properties: + additionalContainer: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + containerName: + type: string + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + required: + - command + - containerName + - image + type: object + type: array affinity: properties: nodeAffinity: diff --git a/controllers/druid/handler.go b/controllers/druid/handler.go index 0ae4118a..40d348b4 100644 --- a/controllers/druid/handler.go +++ b/controllers/druid/handler.go @@ -1302,30 +1302,55 @@ func makePodTemplate(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, ls map // makePodSpec shall create podSpec common to both deployment and statefulset. func makePodSpec(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, nodeSpecUniqueStr, configMapSHA string) v1.PodSpec { - spec := v1.PodSpec{ - NodeSelector: firstNonNilValue(m.Spec.NodeSelector, nodeSpec.NodeSelector).(map[string]string), - TopologySpreadConstraints: getTopologySpreadConstraints(nodeSpec), - Tolerations: getTolerations(nodeSpec, m), - Affinity: getAffinity(nodeSpec, m), - ImagePullSecrets: firstNonNilValue(nodeSpec.ImagePullSecrets, m.Spec.ImagePullSecrets).([]v1.LocalObjectReference), - Containers: []v1.Container{ - { - Image: firstNonEmptyStr(nodeSpec.Image, m.Spec.Image), - Name: fmt.Sprintf("%s", nodeSpecUniqueStr), - Command: []string{firstNonEmptyStr(m.Spec.StartScript, "bin/run-druid.sh"), nodeSpec.NodeType}, - ImagePullPolicy: v1.PullPolicy(firstNonEmptyStr(string(nodeSpec.ImagePullPolicy), string(m.Spec.ImagePullPolicy))), - Ports: nodeSpec.Ports, - Resources: nodeSpec.Resources, - Env: getEnv(nodeSpec, m, configMapSHA), - EnvFrom: getEnvFrom(nodeSpec, m), - VolumeMounts: getVolumeMounts(nodeSpec, m), - LivenessProbe: getLivenessProbe(nodeSpec, m), - ReadinessProbe: getReadinessProbe(nodeSpec, m), - StartupProbe: getStartUpProbe(nodeSpec, m), - Lifecycle: nodeSpec.Lifecycle, - SecurityContext: firstNonNilValue(nodeSpec.ContainerSecurityContext, m.Spec.ContainerSecurityContext).(*v1.SecurityContext), - }, + + var containers []v1.Container + containers = append(containers, + v1.Container{ + Image: firstNonEmptyStr(nodeSpec.Image, m.Spec.Image), + Name: fmt.Sprintf("%s", nodeSpecUniqueStr), + Command: []string{firstNonEmptyStr(m.Spec.StartScript, "bin/run-druid.sh"), nodeSpec.NodeType}, + ImagePullPolicy: v1.PullPolicy(firstNonEmptyStr(string(nodeSpec.ImagePullPolicy), string(m.Spec.ImagePullPolicy))), + Ports: nodeSpec.Ports, + Resources: nodeSpec.Resources, + Env: getEnv(nodeSpec, m, configMapSHA), + EnvFrom: getEnvFrom(nodeSpec, m), + VolumeMounts: getVolumeMounts(nodeSpec, m), + LivenessProbe: getLivenessProbe(nodeSpec, m), + ReadinessProbe: getReadinessProbe(nodeSpec, m), + StartupProbe: getStartUpProbe(nodeSpec, m), + Lifecycle: nodeSpec.Lifecycle, + SecurityContext: firstNonNilValue(nodeSpec.ContainerSecurityContext, m.Spec.ContainerSecurityContext).(*v1.SecurityContext), }, + ) + + if m.Spec.AdditionalContainer != nil { + + for _, containerList := range m.Spec.AdditionalContainer { + + containers = append(containers, + v1.Container{ + Image: containerList.Image, + Name: containerList.ContainerName, + Resources: containerList.Resources, + VolumeMounts: containerList.VolumeMounts, + Command: containerList.Command, + Args: containerList.Args, + ImagePullPolicy: containerList.ImagePullPolicy, + SecurityContext: containerList.ContainerSecurityContext, + Env: containerList.Env, + EnvFrom: containerList.EnvFrom, + }, + ) + } + } + + spec := v1.PodSpec{ + NodeSelector: firstNonNilValue(m.Spec.NodeSelector, nodeSpec.NodeSelector).(map[string]string), + TopologySpreadConstraints: getTopologySpreadConstraints(nodeSpec), + Tolerations: getTolerations(nodeSpec, m), + Affinity: getAffinity(nodeSpec, m), + ImagePullSecrets: firstNonNilValue(nodeSpec.ImagePullSecrets, m.Spec.ImagePullSecrets).([]v1.LocalObjectReference), + Containers: containers, TerminationGracePeriodSeconds: nodeSpec.TerminationGracePeriodSeconds, Volumes: getVolume(nodeSpec, m, nodeSpecUniqueStr), SecurityContext: firstNonNilValue(nodeSpec.PodSecurityContext, m.Spec.PodSecurityContext).(*v1.PodSecurityContext), diff --git a/controllers/druid/handler_test.go b/controllers/druid/handler_test.go index 764205d2..7774c8d7 100644 --- a/controllers/druid/handler_test.go +++ b/controllers/druid/handler_test.go @@ -29,6 +29,20 @@ func TestMakeStatefulSetForBroker(t *testing.T) { assertEquals(expected, actual, t) } +func TestMakeStatefulSetForBrokerWithSidecar(t *testing.T) { + clusterSpec := readSampleDruidClusterSpecWithSidecar(t) + + nodeSpecUniqueStr := makeNodeSpecificUniqueString(clusterSpec, "brokers") + nodeSpec := clusterSpec.Spec.Nodes["brokers"] + + actual, _ := makeStatefulSet(&nodeSpec, clusterSpec, makeLabelsForNodeSpec(&nodeSpec, clusterSpec, clusterSpec.Name, nodeSpecUniqueStr), nodeSpecUniqueStr, "blah", nodeSpecUniqueStr) + addHashToObject(actual) + + expected := new(appsv1.StatefulSet) + readAndUnmarshallResource("testdata/broker-statefulset-sidecar.yaml", &expected, t) + assertEquals(expected, actual, t) +} + func TestDeploymentForBroker(t *testing.T) { clusterSpec := readSampleDruidClusterSpec(t) @@ -120,6 +134,10 @@ func readSampleDruidClusterSpec(t *testing.T) *v1alpha1.Druid { return readDruidClusterSpecFromFile(t, "testdata/druid-test-cr.yaml") } +func readSampleDruidClusterSpecWithSidecar(t *testing.T) *v1alpha1.Druid { + return readDruidClusterSpecFromFile(t, "testdata/druid-test-cr-sidecar.yaml") +} + func readDruidClusterSpecFromFile(t *testing.T, filePath string) *v1alpha1.Druid { bytes, err := ioutil.ReadFile(filePath) if err != nil { diff --git a/controllers/druid/testdata/broker-statefulset-sidecar.yaml b/controllers/druid/testdata/broker-statefulset-sidecar.yaml new file mode 100755 index 00000000..956a6f21 --- /dev/null +++ b/controllers/druid/testdata/broker-statefulset-sidecar.yaml @@ -0,0 +1,119 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-druid-test-brokers + namespace: test-namespace + labels: + app: druid + druid_cr: druid-test + nodeSpecUniqueStr: druid-druid-test-brokers + component: broker + annotations: + druidOpResourceHash: qItQSuF2PU0ZPa2ZFyj3DOa7qNc= +spec: + podManagementPolicy: Parallel + replicas: 2 + selector: + matchLabels: + app: druid + druid_cr: druid-test + nodeSpecUniqueStr: druid-druid-test-brokers + component: broker + serviceName: druid-druid-test-brokers + template: + metadata: + labels: + app: druid + druid_cr: druid-test + nodeSpecUniqueStr: druid-druid-test-brokers + component: broker + annotations: + key1: value1 + key2: value2 + spec: + tolerations: [] + affinity: {} + containers: + - command: + - bin/run-druid.sh + - broker + image: apache/druid:0.22.1 + name: druid-druid-test-brokers + env: + - name : configMapSHA + value : blah + ports: + - containerPort: 8083 + name: random + readinessProbe: + httpGet: + path: /status + port: 8080 + livenessProbe: + httpGet: + path: /status + port: 8080 + resources: + limits: + cpu: "4" + memory: 2Gi + requests: + cpu: "4" + memory: 2Gi + volumeMounts: + - mountPath: /druid/conf/druid/_common + readOnly: true + name: common-config-volume + - mountPath: /druid/conf/druid/broker + readOnly: true + name: nodetype-config-volume + - mountPath: /druid/data + readOnly: true + name: data-volume + - command: + - /bin/sidekick + image: universalforwarder-sidekick:next + name: forwarder + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "500m" + args: + - -loggingEnabled=true + - -dataCenter=dataCenter + - -environment=environment + - -application=application + - -instance=instance + - -logFiles=logFiles + securityContext: + runAsUser: 506 + imagePullPolicy: Always + volumeMounts: + - name: logstore + mountPath: /logstore + env: + - name: SAMPLE_ENV + value: SAMPLE_VALUE + securityContext: + fsGroup: 107 + runAsUser: 106 + volumes: + - configMap: + name: druid-test-druid-common-config + name: common-config-volume + - configMap: + name: druid-druid-test-brokers-config + name: nodetype-config-volume + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 \ No newline at end of file diff --git a/controllers/druid/testdata/druid-test-cr-sidecar.yaml b/controllers/druid/testdata/druid-test-cr-sidecar.yaml new file mode 100644 index 00000000..ad89df9d --- /dev/null +++ b/controllers/druid/testdata/druid-test-cr-sidecar.yaml @@ -0,0 +1,394 @@ +apiVersion: "druid.apache.org/v1alpha1" +kind: "Druid" +metadata: + name: druid-test + namespace: test-namespace +spec: + image: apache/druid:0.22.1 + podAnnotations: + key1: value1 + key2: value2 + securityContext: + fsGroup: 107 + runAsUser: 106 + readinessProbe: + httpGet: + path: /status + additionalContainer: + - image: universalforwarder-sidekick:next + containerName: forwarder + command: + - /bin/sidekick + imagePullPolicy: Always + securityContext: + runAsUser: 506 + volumeMounts: + - name: logstore + mountPath: /logstore + env: + - name: SAMPLE_ENV + value: SAMPLE_VALUE + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "500m" + args: + - -loggingEnabled=true + - -dataCenter=dataCenter + - -environment=environment + - -application=application + - -instance=instance + - -logFiles=logFiles + zookeeper: + type: default + spec: + properties: |- + druid.zk.service.host=zookeeper-0.zookeeper,zookeeper-1.zookeeper,zookeeper-2.zookeeper + druid.zk.paths.base=/druid + druid.zk.service.compress=false + metadataStore: + type: default + spec: + properties: |- + druid.metadata.storage.type=postgresql + druid.metadata.storage.connector.connectURI=jdbc:postgresql://rdsaddr.us-west-2.rds.amazonaws.com:5432/druiddb + druid.metadata.storage.connector.user=iamuser + druid.metadata.storage.connector.password=changeme + druid.metadata.storage.connector.createTables=true + deepStorage: + type: default + spec: + properties: |- + druid.storage.type=s3 + druid.storage.bucket=mybucket + druid.storage.baseKey=druid/segments + druid.s3.accessKey=accesskey + druid.s3.secretKey=secretkey + jvm.options: |- + -server + -XX:MaxDirectMemorySize=10240g + -Duser.timezone=UTC + -Dfile.encoding=UTF-8 + -Dlog4j.debug + -XX:+ExitOnOutOfMemoryError + -XX:+HeapDumpOnOutOfMemoryError + -XX:+UseG1GC + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager + log4j.config: |- + + + + + + + + + + + + + + common.runtime.properties: |- + # + # Extensions + # + druid.extensions.loadList=["druid-datasketches", "druid-s3-extensions", "postgresql-metadata-storage"] + + # + # Logging + # + # Log all runtime properties on startup. Disable to avoid logging properties on startup: + druid.startup.logging.logProperties=true + + # + # Indexing service logs + # + # Store indexing logs in an S3 bucket named 'druid-deep-storage' with the + # prefix 'druid/indexing-logs' + druid.indexer.logs.type=s3 + druid.indexer.logs.s3Bucket=mybucket + druid.indexer.logs.s3Prefix=druid/indexing-logs + + # + # Service discovery + # + druid.selectors.indexing.serviceName=druid/overlord + druid.selectors.coordinator.serviceName=druid/coordinator + + # + # Monitoring + # + druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"] + druid.emitter=logging + druid.emitter.logging.logLevel=info + + # Storage type of double columns + # ommiting this will lead to index double as float at the storage layer + druid.indexing.doubleStorage=double + metricDimensions.json: |- + { + "query/time" : { "dimensions" : ["dataSource", "type"], "type" : "timer"} + } + nodes: + brokers: + nodeType: "broker" + services: + - + spec: + type: ClusterIP + clusterIP: None + - + metadata: + name: broker-%s-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + spec: + type: LoadBalancer + ports: + - + name: service-port + port: 8090 + targetPort: 8080 + druid.port: 8080 + replicas: 2 + podDisruptionBudgetSpec: + maxUnavailable: 1 + livenessProbe: + httpGet: + path: /status + ports: + - + name: random + containerPort: 8083 + runtime.properties: |- + druid.service=druid/broker + + # HTTP server threads + druid.broker.http.numConnections=5 + druid.server.http.numThreads=25 + + # Processing threads and buffers + druid.processing.buffer.sizeBytes=1 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + readOnly: true + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + coordinators: + nodeType: "coordinator" + druid.port: 8080 + replicas: 1 + ports: + - + name: random + containerPort: 8083 + runtime.properties: |- + druid.service=druid/coordinator + + # HTTP server threads + druid.coordinator.startDelay=PT30S + druid.coordinator.period=PT30S + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + historicals: + nodeType: "historical" + druid.port: 8080 + replicas: 2 + ports: + - + name: random + containerPort: 8084 + runtime.properties: |- + druid.service=druid/historical + druid.server.http.numThreads=10 + druid.processing.buffer.sizeBytes=268435456 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + # Segment storage + druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] + druid.server.maxSize=10737418240 + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + overlords: + nodeType: "overlord" + druid.port: 8080 + replicas: 1 + ports: + - + name: random + containerPort: 8083 + runtime.properties: |- + druid.service=druid/overlord + + # HTTP server threads + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=remote + druid.indexer.storage.type=metadata + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + middlemanagers: + nodeType: "middleManager" + druid.port: 8080 + replicas: 1 + ports: + - + name: peon-0-pt + containerPort: 8100 + - + name: peon-1-pt + containerPort: 8101 + - + name: peon-2-pt + containerPort: 8102 + - + name: peon-3-pt + containerPort: 8103 + - + name: peon-4-pt + containerPort: 8104 + - + name: peon-5-pt + containerPort: 8105 + - + name: peon-6-pt + containerPort: 8106 + - + name: peon-7-pt + containerPort: 8107 + - + name: peon-8-pt + containerPort: 8108 + - + name: peon-9-pt + containerPort: 8109 + + runtime.properties: |- + druid.service=druid/middleManager + druid.worker.capacity=1 + druid.indexer.runner.javaOpts=-server -XX:MaxDirectMemorySize=10240g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/druid/data/tmp -Dlog4j.debug -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=50 -XX:GCLogFileSize=10m -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:+UseG1GC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Xloggc:/druid/data/logs/peon.gc.%t.%p.log -XX:HeapDumpPath=/druid/data/logs/peon.%t.%p.hprof -Xms1G -Xmx1G + druid.indexer.task.baseTaskDir=/druid/data/baseTaskDir + druid.server.http.numThreads=10 + druid.indexer.fork.property.druid.processing.buffer.sizeBytes=268435456 + druid.indexer.fork.property.druid.processing.numMergeBuffers=1 + druid.indexer.fork.property.druid.processing.numThreads=1 + druid.indexer.task.hadoopWorkingPath=/druid/data/hadoop-working-path + druid.indexer.task.defaultHadoopCoordinates=[\"org.apache.hadoop:hadoop-client:2.7.3\"] + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "3Gi" + cpu: "4" + limits: + memory: "3Gi" + cpu: "4" diff --git a/deploy/crds/druid.apache.org_druids.yaml b/deploy/crds/druid.apache.org_druids.yaml index 5127c12b..d7ec5d34 100644 --- a/deploy/crds/druid.apache.org_druids.yaml +++ b/deploy/crds/druid.apache.org_druids.yaml @@ -28,6 +28,210 @@ spec: type: object spec: properties: + additionalContainer: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + containerName: + type: string + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + required: + - command + - containerName + - image + type: object + type: array affinity: properties: nodeAffinity: diff --git a/docs/examples.md b/docs/examples.md index 9f31304a..23ec6c89 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -238,3 +238,35 @@ ... ... ``` +## Configure Additional Containers + +``` + additionalContainer: + - image: universalforwarder-sidekick:next + containerName: forwarder + command: + - /bin/sidekick + imagePullPolicy: Always + securityContext: + runAsUser: 506 + volumeMounts: + - name: logstore + mountPath: /logstore + env: + - name: SAMPLE_ENV + value: SAMPLE_VALUE + resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "500m" + args: + - -loggingEnabled=true + - -dataCenter=dataCenter + - -environment=environment + - -application=application + - -instance=instance + - -logFiles=logFiles +``` \ No newline at end of file diff --git a/docs/features.md b/docs/features.md index 5fd6de64..d1002367 100644 --- a/docs/features.md +++ b/docs/features.md @@ -8,6 +8,7 @@ * [Force Delete of Sts Pods](#Force-Delete-of-Sts-Pods) * [Scaling of Druid Nodes](#Scaling-of-Druid-Nodes) * [Volume Expansion of Druid Nodes Running As StatefulSets](#Scaling-of-Druid-Nodes) +* [Add Additional Containers in Druid Nodes](#Add-Additional-Containers-in-Druid-Nodes) ## Deny List in Operator @@ -60,3 +61,9 @@ - Shrinkage of pvc's isnt supported, desiredSize cannot be less than currentSize as well as counts. - To enable this feature ```scalePvcSts``` needs to be enabled to ```true```. - By default, this feature is disabled. + +## Add Additional Containers in Druid Nodes +- The Druid operator supports additional containers to run along with the druid services. This helps support co-located, co-managed helper processes for the primary druid application +- This can be used for init containers or sidecars or proxies etc. +- To enable this features users just need to add a new container to the container list +- This is scoped at cluster scope only, which means that additional container will be common to all the nodes