diff --git a/helm/clickhouse-eks/README.md b/helm/clickhouse-eks/README.md
new file mode 100644
index 0000000..a0688b6
--- /dev/null
+++ b/helm/clickhouse-eks/README.md
@@ -0,0 +1,3 @@
+# clickhouse-eks
+
+> TBA
\ No newline at end of file
diff --git a/helm/keeper-ss/Chart.yaml b/helm/keeper-ss/Chart.yaml
new file mode 100644
index 0000000..cbc6674
--- /dev/null
+++ b/helm/keeper-ss/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: keeper-ss
+description: A Helm chart for setting up ClickHouse Keeper using StatefulSet
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
diff --git a/helm/keeper-ss/README.md b/helm/keeper-ss/README.md
new file mode 100644
index 0000000..a546de4
--- /dev/null
+++ b/helm/keeper-ss/README.md
@@ -0,0 +1,3 @@
+# keepers-ss
+
+> TBA
\ No newline at end of file
diff --git a/helm/keeper-ss/templates/chk.yaml b/helm/keeper-ss/templates/chk.yaml
new file mode 100644
index 0000000..47d1b9d
--- /dev/null
+++ b/helm/keeper-ss/templates/chk.yaml
@@ -0,0 +1,414 @@
+---
+# Setup Service to provide access to ClickHouse keeper for clients
+apiVersion: v1
+kind: Service
+metadata:
+ # DNS would be like clickhouse-keeper.namespace.svc
+ name: keeper-{{ .Values.keeper.name }}
+ labels:
+ app: keeper-{{ .Values.keeper.name }}
+spec:
+ ports:
+ - port: {{ .Values.keeper.tcp_port }}
+ name: client
+ - port: 7000
+ name: prometheus
+ selector:
+ app: keeper-{{ .Values.keeper.name }}
+ what: node
+---
+# Setup Headless Service for StatefulSet
+apiVersion: v1
+kind: Service
+metadata:
+ # DNS would be like clickhouse-keeper-0.clickhouse-keepers.namespace.svc
+ name: keeper-{{ .Values.keeper.name }}s
+ labels:
+ app: keeper-{{ .Values.keeper.name }}
+spec:
+ ports:
+ - port: 9444
+ name: raft
+ clusterIP: None
+ selector:
+ app: keeper-{{ .Values.keeper.name }}
+ what: node
+---
+# Setup max number of unavailable pods in StatefulSet
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: keeper-{{ .Values.keeper.name }}-pod-disruption-budget
+spec:
+ selector:
+ matchLabels:
+ app: keeper-{{ .Values.keeper.name }}
+ maxUnavailable: 1
+---
+# Setup ClickHouse Keeper settings
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: keeper-{{ .Values.keeper.name }}-settings
+data:
+ keeper_config.xml: |
+
+ /tmp/clickhouse-keeper/config.d/generated-keeper-settings.xml
+
+ trace
+ true
+
+ {{ .Values.keeper.listen_host }}
+
+ true
+ /var/lib/clickhouse-keeper
+ {{ .Values.keeper.tcp_port }}
+ *
+
+
+ information
+
+
+
+ /metrics
+ 7000
+ true
+ true
+ true
+ true
+
+
+---
+# Setup ClickHouse Keeper scripts
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: keeper-{{ .Values.keeper.name }}-scripts
+ labels:
+ app: keeper-{{ .Values.keeper.name }}
+data:
+ env.sh: |
+ #!/usr/bin/env bash
+ export DOMAIN=`hostname -d`
+ export CLIENT_HOST=clickhouse-keeper
+ export CLIENT_PORT=2181
+ export RAFT_PORT=9444
+ keeperFunctions.sh: |
+ #!/usr/bin/env bash
+ set -ex
+ function keeperConfig() {
+ echo "$HOST.$DOMAIN:$RAFT_PORT;$ROLE;$WEIGHT"
+ }
+ function keeperConnectionString() {
+ # If the client service address is not yet available, then return localhost
+ set +e
+ getent hosts "${CLIENT_HOST}" 2>/dev/null 1>/dev/null
+ if [[ $? -ne 0 ]]; then
+ set -e
+ echo "-h localhost -p ${CLIENT_PORT}"
+ else
+ set -e
+ echo "-h ${CLIENT_HOST} -p ${CLIENT_PORT}"
+ fi
+ }
+
+ keeperStart.sh: |
+ #!/usr/bin/env bash
+ set -ex
+ source /conf/env.sh
+ source /conf/keeperFunctions.sh
+
+ HOST=`hostname -s`
+ if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
+ NAME=${BASH_REMATCH[1]}
+ ORD=${BASH_REMATCH[2]}
+ else
+ echo Failed to parse name and ordinal of Pod
+ exit 1
+ fi
+ export MY_ID=$((ORD+1))
+ set +e
+ getent hosts $DOMAIN
+ if [[ $? -eq 0 ]]; then
+ ACTIVE_ENSEMBLE=true
+ else
+ ACTIVE_ENSEMBLE=false
+ fi
+ set -e
+ mkdir -p /tmp/clickhouse-keeper/config.d/
+ if [[ "true" == "${ACTIVE_ENSEMBLE}" ]]; then
+ # get current config from clickhouse-keeper
+ CURRENT_KEEPER_CONFIG=$(clickhouse-keeper-client --history-file=/dev/null -h ${CLIENT_HOST} -p ${CLIENT_PORT} -q "get /keeper/config" || true)
+ # generate dynamic config, add current server to xml
+ {
+ echo ""
+ echo "${MY_ID}"
+ echo ""
+ if [[ "0" == $(echo "${CURRENT_KEEPER_CONFIG}" | grep -c "${HOST}.${DOMAIN}") ]]; then
+ echo "${MY_ID}${HOST}.${DOMAIN}${RAFT_PORT}1true"
+ fi
+ while IFS= read -r line; do
+ id=$(echo "$line" | cut -d '=' -f 1 | cut -d '.' -f 2)
+ if [[ "" != "${id}" ]]; then
+ hostname=$(echo "$line" | cut -d '=' -f 2 | cut -d ';' -f 1 | cut -d ':' -f 1)
+ port=$(echo "$line" | cut -d '=' -f 2 | cut -d ';' -f 1 | cut -d ':' -f 2)
+ priority=$(echo "$line" | cut -d ';' -f 3)
+ priority=${priority:-1}
+ port=${port:-$RAFT_PORT}
+ echo "$id$hostname$port$priority"
+ fi
+ done <<< "$CURRENT_KEEPER_CONFIG"
+ echo ""
+ echo ""
+ } > /tmp/clickhouse-keeper/config.d/generated-keeper-settings.xml
+ else
+ # generate dynamic config, add current server to xml
+ {
+ echo ""
+ echo "${MY_ID}"
+ echo ""
+ echo "${MY_ID}${HOST}.${DOMAIN}${RAFT_PORT}1"
+ echo ""
+ echo ""
+ } > /tmp/clickhouse-keeper/config.d/generated-keeper-settings.xml
+ fi
+
+ # run clickhouse-keeper
+ cat /tmp/clickhouse-keeper/config.d/generated-keeper-settings.xml
+ rm -rfv /var/lib/clickhouse-keeper/terminated
+ clickhouse-keeper --config-file=/etc/clickhouse-keeper/keeper_config.xml
+
+ keeperTeardown.sh: |
+ #!/usr/bin/env bash
+ set -ex
+ exec > /proc/1/fd/1
+ exec 2> /proc/1/fd/2
+ source /conf/env.sh
+ source /conf/keeperFunctions.sh
+ set +e
+ KEEPER_URL=$(keeperConnectionString)
+ set -e
+ HOST=`hostname -s`
+ if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
+ NAME=${BASH_REMATCH[1]}
+ ORD=${BASH_REMATCH[2]}
+ else
+ echo Failed to parse name and ordinal of Pod
+ exit 1
+ fi
+ export MY_ID=$((ORD+1))
+
+ CURRENT_KEEPER_CONFIG=$(clickhouse-keeper-client --history-file=/dev/null -h localhost -p ${CLIENT_PORT} -q "get /keeper/config")
+ CLUSTER_SIZE=$(echo -e "${CURRENT_KEEPER_CONFIG}" | grep -c -E '^server\.[0-9]+=')
+ echo "CLUSTER_SIZE=$CLUSTER_SIZE, MyId=$MY_ID"
+ # If CLUSTER_SIZE > 1, this server is being permanently removed from raft_configuration.
+ if [[ "$CLUSTER_SIZE" -gt "1" ]]; then
+ clickhouse-keeper-client --history-file=/dev/null -q "reconfig remove $MY_ID" ${KEEPER_URL}
+ fi
+
+ # Wait to remove $MY_ID from quorum
+ # for (( i = 0; i < 6; i++ )); do
+ # CURRENT_KEEPER_CONFIG=$(clickhouse-keeper-client --history-file=/dev/null -h localhost -p ${CLIENT_PORT} -q "get /keeper/config")
+ # if [[ "0" == $(echo -e "${CURRENT_KEEPER_CONFIG}" | grep -c -E "^server.${MY_ID}=$HOST.+participant;[0-1]$") ]]; then
+ # echo "$MY_ID removed from quorum"
+ # break
+ # else
+ # echo "$MY_ID still present in quorum"
+ # fi
+ # sleep 1
+ # done
+
+ # Wait for client connections to drain. Kubernetes will wait until the configured
+ # "terminationGracePeriodSeconds" before forcibly killing the container
+ for (( i = 0; i < 3; i++ )); do
+ CONN_COUNT=`echo $(exec 3<>/dev/tcp/127.0.0.1/2181 ; printf "cons" >&3 ; IFS=; tee <&3; exec 3<&- ;) | grep -v "^$" | grep -v "127.0.0.1" | wc -l`
+ if [[ "$CONN_COUNT" -gt "0" ]]; then
+ echo "$CONN_COUNT non-local connections still connected."
+ sleep 1
+ else
+ echo "$CONN_COUNT non-local connections"
+ break
+ fi
+ done
+
+ touch /var/lib/clickhouse-keeper/terminated
+ # Kill the primary process ourselves to circumvent the terminationGracePeriodSeconds
+ ps -ef | grep clickhouse-keeper | grep -v grep | awk '{print $1}' | xargs kill
+
+
+ keeperLive.sh: |
+ #!/usr/bin/env bash
+ set -ex
+ source /conf/env.sh
+ OK=$(exec 3<>/dev/tcp/127.0.0.1/${CLIENT_PORT} ; printf "ruok" >&3 ; IFS=; tee <&3; exec 3<&- ;)
+ # Check to see if keeper service answers
+ if [[ "$OK" == "imok" ]]; then
+ exit 0
+ else
+ exit 1
+ fi
+
+ keeperReady.sh: |
+ #!/usr/bin/env bash
+ set -ex
+ exec > /proc/1/fd/1
+ exec 2> /proc/1/fd/2
+ source /conf/env.sh
+ source /conf/keeperFunctions.sh
+
+ HOST=`hostname -s`
+
+ # Check to see if clickhouse-keeper service answers
+ set +e
+ getent hosts $DOMAIN
+ if [[ $? -ne 0 ]]; then
+ echo "no active DNS records in service, first running pod"
+ exit 0
+ elif [[ -f /var/lib/clickhouse-keeper/terminated ]]; then
+ echo "termination in progress"
+ exit 0
+ else
+ set -e
+ # An ensemble exists, check to see if this node is already a member.
+ # Extract resource name and this members' ordinal value from pod hostname
+ if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
+ NAME=${BASH_REMATCH[1]}
+ ORD=${BASH_REMATCH[2]}
+ else
+ echo "Failed to parse name and ordinal of Pod"
+ exit 1
+ fi
+ MY_ID=$((ORD+1))
+
+ CURRENT_KEEPER_CONFIG=$(clickhouse-keeper-client --history-file=/dev/null -h ${CLIENT_HOST} -p ${CLIENT_PORT} -q "get /keeper/config" || exit 0)
+ # Check to see if clickhouse-keeper for this node is a participant in raft cluster
+ if [[ "1" == $(echo -e "${CURRENT_KEEPER_CONFIG}" | grep -c -E "^server.${MY_ID}=${HOST}.+participant;1$") ]]; then
+ echo "clickhouse-keeper instance is available and an active participant"
+ exit 0
+ else
+ echo "clickhouse-keeper instance is ready to add as participant with 1 weight."
+
+ ROLE=participant
+ WEIGHT=1
+ KEEPER_URL=$(keeperConnectionString)
+ NEW_KEEPER_CONFIG=$(keeperConfig)
+ clickhouse-keeper-client --history-file=/dev/null -q "reconfig add 'server.$MY_ID=$NEW_KEEPER_CONFIG'" ${KEEPER_URL}
+ exit 0
+ fi
+ fi
+
+---
+# Setup ClickHouse Keeper StatefulSet
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ # nodes would be named as clickhouse-keeper-0, clickhouse-keeper-1, clickhouse-keeper-2
+ name: keeper-{{ .Values.keeper.name }}
+ labels:
+ app: keeper-{{ .Values.keeper.name }}
+spec:
+ selector:
+ matchLabels:
+ app: keeper-{{ .Values.keeper.name }}
+ serviceName: keeper-{{ .Values.keeper.name }}s
+ replicas: {{ .Values.keeper.replicas }}
+ podManagementPolicy: OrderedReady
+ template:
+ metadata:
+ labels:
+ app: keeper-{{ .Values.keeper.name }}
+ what: node
+ annotations:
+ prometheus.io/port: '7000'
+ prometheus.io/scrape: 'true'
+ spec:
+ # Workaround to ensure correct FS permissions.
+ securityContext:
+ fsGroup: 101
+ runAsUser: 101
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: "app"
+ operator: In
+ values:
+ - keeper-{{ .Values.keeper.name }}
+ topologyKey: "kubernetes.io/hostname"
+ volumes:
+ - name: clickhouse-keeper-settings
+ configMap:
+ name: keeper-{{ .Values.keeper.name }}-settings
+ items:
+ - key: keeper_config.xml
+ path: keeper_config.xml
+ - name: clickhouse-keeper-scripts
+ configMap:
+ name: keeper-{{ .Values.keeper.name }}-scripts
+ defaultMode: 0755
+ containers:
+ - name: clickhouse-keeper
+ imagePullPolicy: IfNotPresent
+ image: "{{ .Values.keeper.image }}"
+ resources:
+ requests:
+ memory: "256M"
+ cpu: "1"
+ limits:
+ memory: "4Gi"
+ cpu: "2"
+ volumeMounts:
+ - name: clickhouse-keeper-settings
+ mountPath: /etc/clickhouse-keeper/
+ - name: keeper-{{ .Values.keeper.name }}-datadir-volume
+ mountPath: /var/lib/clickhouse-keeper
+ - name: clickhouse-keeper-scripts
+ mountPath: /conf/
+ command:
+ - /conf/keeperStart.sh
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /conf/keeperTeardown.sh
+ livenessProbe:
+ exec:
+ command:
+ - /conf/keeperLive.sh
+ failureThreshold: 3
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 10
+ readinessProbe:
+ exec:
+ command:
+ - /conf/keeperReady.sh
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 20
+ ports:
+ - containerPort: {{ .Values.keeper.tcp_port }}
+ name: client
+ protocol: TCP
+ - containerPort: 9444
+ name: quorum
+ protocol: TCP
+ - containerPort: 7000
+ name: metrics
+ protocol: TCP
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ terminationGracePeriodSeconds: 40
+ volumeClaimTemplates:
+ - metadata:
+ name: keeper-{{ .Values.keeper.name }}-datadir-volume
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 25Gi
\ No newline at end of file
diff --git a/helm/keeper-ss/values.yaml b/helm/keeper-ss/values.yaml
new file mode 100644
index 0000000..448fea8
--- /dev/null
+++ b/helm/keeper-ss/values.yaml
@@ -0,0 +1,13 @@
+all:
+ metadata:
+ labels:
+ application_group: keeper
+
+keeper:
+ name: keeper
+ replicas: 1
+ listen_host: "0.0.0.0"
+ tcp_port: 2181
+ image: "altinity/clickhouse-keeper:23.8.8.21.altinitystable"
+ storage: 25Gi
+