Bump chart to 0.8.1 #535
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: CI | deploy & test | |
on: | |
push: | |
branches: | |
- "*" | |
paths-ignore: | |
- '**.md' | |
pull_request: | |
branches: | |
- "main" | |
paths-ignore: | |
- '**.md' | |
env: | |
DB: "ejabberdTest1" | |
DB_PASS: "ejabberdTest1Pass" | |
KIND_VSN: "0.20.0" | |
K8S_VSN: "1.29.0" | |
METALLB_VSN: "0.14.3" | |
CHAOS_VSN: "2.6.2" | |
jobs: | |
deploy_test: | |
runs-on: ubuntu-latest | |
# concurrency: ci-${{ github.ref }} | |
strategy: | |
matrix: | |
dbApp: [pgsql, mariadb, mssql] | |
dbSchema: [default, new] | |
dbBaseline: [23.04, none] | |
fail-fast: false | |
steps: | |
## If more is needed: | |
## https://github.com/actions/runner-images/issues/2840#issuecomment-1540506686 | |
- name: Free disk space | |
run: | | |
sudo docker rmi $(docker image ls -aq) || true | |
sudo rm -rf /usr/share/dotnet | |
sudo rm -rf /opt/ghc | |
sudo rm -rf "/usr/local/share/boost" | |
sudo rm -rf "$AGENT_TOOLSDIRECTORY" | |
- name: Check out code | |
uses: actions/checkout@v4 | |
- name: Lint helm chart | |
working-directory: ./charts/ejabberd | |
run: | | |
helm lint . | |
- name: Install kubectl version v${{ env.K8S_VSN }} | |
uses: azure/[email protected] | |
with: | |
version: 'v${{ env.K8S_VSN }}' | |
- name: Create KinD v${{ env.KIND_VSN }} cluster | |
run: | | |
curl -Lo /tmp/kind https://kind.sigs.k8s.io/dl/v${{ env.KIND_VSN }}/kind-linux-amd64 | |
chmod +x /tmp/kind | |
/tmp/kind create cluster --config=$GITHUB_WORKSPACE/.github/ci/kind-conf.yml --image kindest/node:v${{ env.K8S_VSN }} | |
kubectl get nodes -o wide | |
echo "DB_APP=$(echo ${{ matrix.dbApp }} | sed -e 's|pgsql|postgresql|')" >> $GITHUB_ENV | |
echo "XMPP_DOMAIN=${{ github.run_id }}-${{ strategy.job-index }}.example.com" >> $GITHUB_ENV | |
- name: ${{ matrix.dbApp }} | w/ db baseline ${{ matrix.dbSchema }} | deploy helm chart | |
if: matrix.dbBaseline != 'none' && matrix.dbApp != 'mssql' | |
run: | | |
echo ">> install ${{ matrix.dbApp }} helm chart" | |
kubectl apply -f .github/ci/flyway-baseline/${{ matrix.dbBaseline }}/${{ matrix.dbApp }}-${{ matrix.dbSchema }}.yaml | |
if [ "${{ matrix.dbApp }}" = 'pgsql' ] | |
then | |
initdb="--set primary.initdb.user=${{ env.DB }} \ | |
--set primary.initdb.password=${{ env.DB_PASS }} \ | |
--set primary.initdb.scriptsConfigMap=ejabberd-${{ matrix.dbApp }}-${{ matrix.dbSchema }}" | |
elif [ "${{ matrix.dbApp }}" = 'mariadb' ] || [ "${{ matrix.dbApp }}" = 'mysql' ] | |
then | |
initdb="--set initdbScriptsConfigMap=ejabberd-mysql-${{ matrix.dbSchema }}" | |
fi | |
helm install ${{ matrix.dbApp }} oci://registry-1.docker.io/bitnamicharts/${{ env.DB_APP }} \ | |
-f .github/ci/values-bitnami-${{ matrix.dbApp }}.yaml ${initdb:-} | |
- name: ${{ matrix.dbApp }} | w/o db baseline | deploy helm chart | |
if: matrix.dbBaseline == 'none' && matrix.dbApp != 'mssql' | |
run: | | |
helm install ${{ matrix.dbApp }} oci://registry-1.docker.io/bitnamicharts/${{ env.DB_APP }} \ | |
-f .github/ci/values-bitnami-${{ matrix.dbApp }}.yaml | |
- name: ${{ matrix.dbApp }} | deploy resources | |
if: matrix.dbApp == 'mssql' | |
run: | | |
echo ">> install ${{ matrix.dbApp }} CI resources" | |
if [ ! "${{ matrix.dbBaseline }}" = 'none' ] | |
then | |
kubectl apply -f .github/ci/flyway-baseline/${{ matrix.dbBaseline }}/${{ matrix.dbApp }}-${{ matrix.dbSchema }}.yaml | |
else | |
kubectl apply -f .github/ci/flyway-baseline/${{ matrix.dbApp }}-initdb.yaml | |
fi | |
kubectl apply -f .github/ci/mssql-ci.yaml | |
- name: redis | deploy helm chart | |
run: | | |
helm install redis oci://registry-1.docker.io/bitnamicharts/redis \ | |
-f .github/ci/values-bitnami-redis.yaml | |
# https://kind.sigs.k8s.io/docs/user/loadbalancer/ | |
- name: metalLB v${{ env.METALLB_VSN }} | deploy LoadBalancer service | |
run: | | |
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v${{ env.METALLB_VSN }}/config/manifests/metallb-native.yaml | |
- name: Setup chaos mesh v${{ env.CHAOS_VSN }} | |
run: | | |
curl -sSL https://mirrors.chaos-mesh.org/v${{ env.CHAOS_VSN }}/install.sh \ | |
| bash -s -- --local kind --kind-version v${{ env.KIND_VSN }} | |
- name: Create self signed certificate | |
run: | | |
openssl req -x509 -newkey rsa:4096 -nodes -subj "/CN=${{ env.XMPP_DOMAIN }}" -keyout cert.pem -days 365 | |
kubectl create secret generic custom-cert --from-file=./cert.pem | |
kubectl label secret custom-cert "helm-ejabberd/watcher=true" | |
kubectl annotate secret custom-cert "k8s-sidecar-target-directory=certs/custom-cert" | |
- name: ${{ matrix.dbApp }} | check deployment | |
run: | | |
echo ">> check rollout status" | |
kubectl rollout status sts ${{ matrix.dbApp }} | |
echo ">> print resources" | |
kubectl get all -o wide | |
echo ">> print database log entries" | |
kubectl logs sts/${{ matrix.dbApp }} | |
- name: ${{ matrix.dbApp }} | create initdb | |
if: matrix.dbApp == 'mssql' | |
run: | | |
echo ">> create initDB for mssql and optionally migrate db baseline" | |
kubectl exec sts/mssql -- /opt/mssql-tools/bin/sqlcmd -U SA -P "${{ env.DB_PASS }}" -S localhost -i /tmp/mssql/initdb_mssql.sql | |
if [ ! "${{ matrix.dbBaseline }}" = 'none' ] | |
then | |
kubectl exec sts/mssql -- /opt/mssql-tools/bin/sqlcmd -U SA -P "${{ env.DB_PASS }}" -S localhost -d "${{ env.DB }}" -i /tmp/mssql/V${{ matrix.dbBaseline }}__ejabberd.sql | |
kubectl delete -f .github/ci/flyway-baseline/${{ matrix.dbBaseline }}/${{ matrix.dbApp }}-${{ matrix.dbSchema }}.yaml | |
fi | |
- name: ${{ matrix.dbApp }} | w/ db baseline ${{ matrix.dbSchema }} | remove initdb configmaps | |
if: matrix.dbBaseline != 'none' && matrix.dbApp != 'mssql' | |
run: | | |
kubectl delete -f .github/ci/flyway-baseline/${{ matrix.dbBaseline }}/${{ matrix.dbApp }}-${{ matrix.dbSchema }}.yaml | |
- name: metalLB v${{ env.METALLB_VSN }} | check readiness, apply config | |
run: | | |
kubectl wait --namespace metallb-system \ | |
--for=condition=ready pod \ | |
--selector=app=metallb \ | |
--timeout=90s | |
ipam="$(docker network inspect -f '{{.IPAM.Config}}' kind | awk '{gsub("[\\[\\{\\.]", " "); print $1"."$2}')" | |
kubectl apply -f - <<-EOF | |
apiVersion: metallb.io/v1beta1 | |
kind: IPAddressPool | |
metadata: | |
name: kind-ipaddresspool | |
namespace: metallb-system | |
spec: | |
addresses: | |
- $ipam.255.200-$ipam.255.250 | |
--- | |
apiVersion: metallb.io/v1beta1 | |
kind: L2Advertisement | |
metadata: | |
name: kind-l2advertisment | |
namespace: metallb-system | |
EOF | |
- name: ejabberd | w/ sql schema ${{ matrix.dbSchema }} | deploy helm chart | |
working-directory: ./charts/ejabberd | |
run: | | |
echo ">> install ejabberd helm chart" | |
if [ ! ${{ matrix.dbBaseline }} = 'none' ] | |
then baseline="--set sqlDatabase.flyway.baselineVersion=${{ matrix.dbBaseline }}" | |
fi | |
if [ ${{ matrix.dbSchema }} = 'new' ] | |
then sql_schema="--set sqlDatabase.newSqlSchema=true" | |
fi | |
if [ "${{ matrix.dbApp }}" = 'pgsql' ] | |
then | |
sql_options="--set sqlDatabase.config.sql_type=${{ matrix.dbApp }} \ | |
--set sqlDatabase.config.sql_server=${{ matrix.dbApp }} \ | |
--set sqlDatabase.flyway.keyValue=?sslmode=disable" | |
elif [ ${{ matrix.dbApp }} = 'mysql' ] || [ ${{ matrix.dbApp }} = 'mariadb' ] | |
then | |
sql_options="--set sqlDatabase.config.sql_type=mysql \ | |
--set sqlDatabase.config.sql_server=${{ matrix.dbApp }} \ | |
--set sqlDatabase.flyway.keyValue=?useSSL=false \ | |
--set sqlDatabase.flyway.mysqlFlavor=${{ matrix.dbApp }}" | |
elif [ "${{ matrix.dbApp }}" = 'mssql' ] | |
then | |
sql_options="--set sqlDatabase.config.sql_type=${{ matrix.dbApp }} \ | |
--set sqlDatabase.config.sql_server=${{ matrix.dbApp }} \ | |
--set sqlDatabase.flyway.keyValue=;integratedSecurity=false;encrypt=false;trustServerCertificate=true;" | |
fi | |
helm install ejabberd \ | |
-f ../../.github/ci/values-ejabberd.yaml \ | |
${baseline:-} ${sql_schema:-} ${sql_options:-} \ | |
--set hosts[0]=${{ env.XMPP_DOMAIN }} \ | |
--set logging.loglevel=info . | |
echo ">> Sleep shortly to obtain LoadBalancer IP address" | |
sleep 10 | |
echo "LB_IP=$(kubectl get svc/ejabberd -o=jsonpath='{.status.loadBalancer.ingress[0].ip}')" >> $GITHUB_ENV | |
- name: processone/rtb | prepare processone RTB tests | |
run: | | |
export CAPACITY='120' | |
export LB_IP="${{ env.LB_IP }}" | |
export XMPP_DOMAIN="${{ env.XMPP_DOMAIN }}" | |
export RUNNER_UID="$(id -u)" | |
mkdir -p /tmp/rtb/${{ env.XMPP_DOMAIN }}/{stats,www} | |
echo "$LB_IP $XMPP_DOMAIN" | sudo tee -a /etc/hosts | |
./.github/ci/rtb/setup-rtb.sh | |
- name: ejabberd | check rollout status | |
run: | | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
for (( i=1; i != 2; i++ )) | |
do | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 30s | |
else | |
break | |
fi | |
done | |
- name: ejabberd | print resources, verify deployements | |
run: kubectl get all -o wide -l app.kubernetes.io/name=ejabberd | |
- name: ejabberd | print sidecar logs | |
run: kubectl logs sts/ejabberd -c watcher | |
- name: ejabberd | print ejabberd logs | |
run: kubectl logs sts/ejabberd -c ejabberd | |
- name: ejabberd | check statefulset, register admin user | |
run: | | |
kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl status | |
kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl register admin ${{ env.XMPP_DOMAIN }} password | |
- name: ejabberd | call API, check healthiness | |
run: | | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
curl -L -d "{}" -X POST -k http://${{ env.XMPP_DOMAIN }}:5280/api/status | |
echo ">> check clustering status" | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then exit 1 | |
fi | |
- name: processone/rtb | start tests | |
run: | | |
echo '>> Start RTB container' | |
docker run --rm -d --name rtb --net=host \ | |
-v $PWD/rtb.yml:/rtb/rtb.yml \ | |
-v $PWD/cert.pem:/rtb/cert.pem \ | |
-v /tmp/rtb/${{ env.XMPP_DOMAIN }}/stats:/rtb/stats:rw \ | |
-v /tmp/rtb/${{ env.XMPP_DOMAIN }}/www:/rtb/www:rw \ | |
localhost/rtb:latest | |
echo '>> Print docker container deployments' | |
docker ps | |
echo '>> Print docker container logs' | |
docker logs rtb | |
- name: ejabberd | upgrade helm chart | |
working-directory: ./charts/ejabberd | |
run: | | |
helm upgrade ejabberd --no-hooks --reuse-values \ | |
--set logging.loglevel=notice . | |
echo ">> Check deployment" | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
sleep 15s | |
for (( i=1; i != 10; i++ )) | |
do | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 15s | |
else | |
break | |
fi | |
done | |
- name: chaos | scheduled ejabberd pod-failure, wait for recovery | |
run: | | |
kubectl apply -f .github/ci/chaos-mesh/chaos-1.yaml | |
echo ">> Check deployment" | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
sleep 15s | |
while [ ! $(kubectl get sts ejabberd --no-headers | awk '{{print $2}}') = '3/3' ] | |
do | |
for (( i=1; i != 10; i++ )) | |
do | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 15s | |
done | |
done | |
- name: ejabberd | call API, check healthiness | |
run: | | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
curl -L -d "{}" -X POST -k http://${{ env.XMPP_DOMAIN }}:5280/api/status | |
echo ">> check clustering status" | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then exit 1 | |
fi | |
echo ">> print RTB container logs" | |
docker logs rtb | |
- name: chaos | stop scheduled ejabberd pod-failure | |
run: | | |
kubectl delete -f .github/ci/chaos-mesh/chaos-1.yaml | |
- name: ejabberd | scale down helm release | |
working-directory: ./charts/ejabberd | |
run: | | |
helm upgrade ejabberd --no-hooks --reuse-values \ | |
--set statefulSet.replicas=1 . | |
echo ">> Check deployment" | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
sleep 15s | |
for (( i=1; i != 10; i++ )) | |
do | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '1' ] | |
then | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 15s | |
else | |
break | |
fi | |
done | |
- name: ejabberd | call API, check healthiness | |
run: | | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
curl -L -d "{}" -X POST -k http://${{ env.XMPP_DOMAIN }}:5280/api/status | |
echo ">> check clustering status" | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '1' ] | |
then exit 1 | |
fi | |
echo ">> print RTB container logs" | |
docker logs rtb | |
- name: ejabberd | scale up helm release | |
working-directory: ./charts/ejabberd | |
run: | | |
helm upgrade ejabberd --no-hooks --reuse-values \ | |
--set statefulSet.replicas=3 . | |
echo ">> Check deployment" | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
sleep 75s | |
for (( i=1; i != 10; i++ )) | |
do | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 30s | |
else | |
break | |
fi | |
done | |
- name: ejabberd | call API, check healthiness | |
run: | | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
curl -L -d "{}" -X POST -k http://${{ env.XMPP_DOMAIN }}:5280/api/status | |
echo ">> check clustering status" | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then exit 1 | |
fi | |
echo ">> print RTB container logs" | |
docker logs rtb | |
- name: chaos | ejabberd pod-kill <= 66%, wait for recovery | |
run: | | |
kubectl apply -f .github/ci/chaos-mesh/chaos-2.yaml | |
echo ">> Check deployment" | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
sleep 75s | |
for (( i=1; i != 10; i++ )) | |
do | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 30s | |
else | |
break | |
fi | |
done | |
- name: ejabberd | call API, check healthiness | |
run: | | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
curl -L -d "{}" -X POST -k http://${{ env.XMPP_DOMAIN }}:5280/api/status | |
echo ">> check clustering status" | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then exit 1 | |
fi | |
echo ">> print RTB container logs" | |
docker logs rtb | |
- name: chaos | ejabberd pod-kill 100%, wait for recovery | |
run: | | |
kubectl apply -f .github/ci/chaos-mesh/chaos-3.yaml | |
echo ">> Check deployment" | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
sleep 105s | |
for (( i=1; i != 10; i++ )) | |
do | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 30s | |
else | |
break | |
fi | |
done | |
- name: ejabberd | call API, check healthiness | |
run: | | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
curl -L -d "{}" -X POST -k http://${{ env.XMPP_DOMAIN }}:5280/api/status | |
echo ">> check clustering status" | |
if [ ! "$(kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | wc -l)" = '3' ] | |
then exit 1 | |
fi | |
echo ">> print RTB container logs" | |
docker logs rtb | |
- name: ejabberd | on failure | print logs - ejabberd-0 | |
if: failure() | |
run: kubectl logs ejabberd-0 -c ejabberd | |
- name: ejabberd | on failure | print logs - ejabberd-1 | |
if: failure() | |
run: kubectl logs ejabberd-1 -c ejabberd | |
- name: ejabberd | on failure | print logs - ejabberd-2 | |
if: failure() | |
run: kubectl logs ejabberd-2 -c ejabberd | |
- name: ejabberd | on failure | delete & recreate statefulset | |
if: failure() | |
run: | | |
echo ">> Delete statefulset" | |
kubectl get sts ejabberd -o yaml > sts.yaml | |
kubectl delete -f sts.yaml | |
kubectl apply -f sts.yaml | |
echo ">> Check deployment" | |
kubectl get all -l app.kubernetes.io/name=ejabberd | |
sleep 105s | |
for (( i=1; i != 10; i++ )) | |
do | |
if [ ! $(kubectl get sts ejabberd --no-headers | awk '{{print $2}}') = '3/3' ] | |
then | |
echo "Check number $i" | |
kubectl get pods -l app.kubernetes.io/name=ejabberd | |
kubectl logs --all-containers=true sts/ejabberd | |
sleep 30s | |
else | |
break | |
fi | |
done | |
- name: ejabberd | print final resources | |
if: failure() || success() | |
run: | | |
echo ">> print kubernetes resources" | |
kubectl get all -o wide | |
kubectl get pvc | |
echo ">> print clustering status" | |
kubectl exec sts/ejabberd -c ejabberd -- ejabberdctl list_cluster | |
echo ">> print RTB container logs" | |
docker logs rtb | |
echo ">> stop RTB container" | |
docker stop rtb | |
- name: ejabberd | print logs - ejabberd-0 | |
if: failure() || success() | |
run: kubectl logs ejabberd-0 -c ejabberd | |
- name: ejabberd | print logs - ejabberd-1 | |
if: failure() || success() | |
run: kubectl logs ejabberd-1 -c ejabberd | |
- name: ejabberd | print logs - ejabberd-2 | |
if: failure() || success() | |
run: kubectl logs ejabberd-2 -c ejabberd | |
## Define an additional job, which depends on the deploy_test and | |
## downloads the rtb results as action artifacts | |
# | |
# - name: Publish rtb results to gh-pages | |
# if: failure() || success() | |
# uses: JamesIves/github-pages-deploy-action@v4 | |
# with: | |
# folder: /tmp/rtb | |
# target-folder: rtb/${{ env.XMPP_DOMAIN }} |