Skip to content

Commit

Permalink
ROX-27073: script for multi cluster test setup (#2119)
Browse files Browse the repository at this point in the history
* add env variable to only deploy dataplane components
* add script to deploy to 2 clusters
  • Loading branch information
johannes94 authored Dec 10, 2024
1 parent 9d61e3a commit ef28c4b
Show file tree
Hide file tree
Showing 3 changed files with 114 additions and 25 deletions.
56 changes: 31 additions & 25 deletions dev/env/scripts/up.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,6 @@ if [[ -d "${MANIFESTS_DIR}/cluster-type-${CLUSTER_TYPE}" ]]; then
apply "${MANIFESTS_DIR}/cluster-type-${CLUSTER_TYPE}"
fi

# Deploy database.
log "Deploying database"
make -C "$GITROOT" deploy/db
wait_for_container_to_become_ready "$ACSCS_NAMESPACE" "application=fleet-manager-db" "postgresql"
log "Database is ready."

# Deploy Cloud Service components.
log "Deploying secrets"
chamber exec "fleet-manager" -- make -C "$GITROOT" deploy/secrets
Expand All @@ -58,12 +52,21 @@ if ! is_openshift_cluster "$CLUSTER_TYPE"; then
make -C "$GITROOT" deploy/redhat-pull-secret
fi

log "Deploying fleet-manager"
make -C "$GITROOT" deploy/service
DATAPLANE_ONLY=${DATAPLANE_ONLY:-}
if [[ -z "${DATAPLANE_ONLY}" ]]; then
# Deploy database.
log "Deploying database"
make -C "$GITROOT" deploy/db
wait_for_container_to_become_ready "$ACSCS_NAMESPACE" "application=fleet-manager-db" "postgresql"
log "Database is ready."

wait_for_container_to_appear "$ACSCS_NAMESPACE" "app=fleet-manager" "service"
if [[ "$SPAWN_LOGGER" == "true" && -n "${LOG_DIR:-}" ]]; then
log "Deploying fleet-manager"
make -C "$GITROOT" deploy/service
wait_for_container_to_appear "$ACSCS_NAMESPACE" "app=fleet-manager" "service"

if [[ "$SPAWN_LOGGER" == "true" && -n "${LOG_DIR:-}" ]]; then
$KUBECTL -n "$ACSCS_NAMESPACE" logs -l app=fleet-manager --all-containers --pod-running-timeout=1m --since=1m --tail=100 -f >"${LOG_DIR}/pod-logs_fleet-manager.txt" 2>&1 &
fi
fi

log "Deploying fleetshard-sync"
Expand All @@ -90,25 +93,28 @@ fi

# Sanity check.
wait_for_container_to_become_ready "$ACSCS_NAMESPACE" "app=fleetshard-sync" "fleetshard-sync" 500
# Prerequisite for port-forwarding are pods in ready state.
wait_for_container_to_become_ready "$ACSCS_NAMESPACE" "app=fleet-manager" "service"

if [[ "$ENABLE_EMAIL_SENDER" == "true" ]]; then
wait_for_container_to_become_ready "$ACSCS_NAMESPACE" "application=emailsender" "emailsender"
fi
if [[ -z "${DATAPLANE_ONLY}" ]]; then
# Prerequisite for port-forwarding are pods in ready state.
wait_for_container_to_become_ready "$ACSCS_NAMESPACE" "app=fleet-manager" "service"

if [[ "$ENABLE_FM_PORT_FORWARDING" == "true" ]]; then
log "Starting port-forwarding for fleet-manager"
port-forwarding start fleet-manager 8000 8000
else
log "Skipping port-forwarding for fleet-manager"
if [[ "$ENABLE_FM_PORT_FORWARDING" == "true" ]]; then
log "Starting port-forwarding for fleet-manager"
port-forwarding start fleet-manager 8000 8000
else
log "Skipping port-forwarding for fleet-manager"
fi

if [[ "$ENABLE_DB_PORT_FORWARDING" == "true" ]]; then
log "Starting port-forwarding for db"
port-forwarding start fleet-manager-db 5432 5432
else
log "Skipping port-forwarding for db"
fi
fi

if [[ "$ENABLE_DB_PORT_FORWARDING" == "true" ]]; then
log "Starting port-forwarding for db"
port-forwarding start fleet-manager-db 5432 5432
else
log "Skipping port-forwarding for db"
if [[ "$ENABLE_EMAIL_SENDER" == "true" ]]; then
wait_for_container_to_become_ready "$ACSCS_NAMESPACE" "application=emailsender" "emailsender"
fi

log
Expand Down
64 changes: 64 additions & 0 deletions scripts/ci/multicluster_tests/deploy.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
#!/usr/bin/env bash
set -e
# This script assumes that there were two clusters created before execution.
# It expects that those clusters are accessible through kubeconfig files at the path
# value stored in following environment variables:
export CLUSTER_1_KUBECONFIG=${CLUSTER_1_KUBECONFIG:-"$HOME/.kube/cluster1"}
export CLUSTER_2_KUBECONFIG=${CLUSTER_2_KUBECONFIG:-"$HOME/.kube/cluster2"}
# During execution cluster 1 will act as control plane and data plane running fleet-manager and fleetshard components
# Cluster 2 will act as a data plane, running only fleetshard components

# Bootstrap C1
export KUBECONFIG="$CLUSTER_1_KUBECONFIG"
export INHERIT_IMAGEPULLSECRETS="true" # pragma: allowlist secret

# TODO: Double check how setup is done in OSCI so that we
# Get the propper certificates to allow enabling creation of routes and DNS entries
# Get the propper secrets to allow communication of FM to Route 53
# Get the quay configuration to pull images
# Maybe we wanna rely on prebuild images instead of building them ourselves, which might
# as well need additional / other commands
make deploy/bootstrap
make deploy/dev

# service template for dev defines a reencrypt route which requires manual creation of a self
# signed certificate before starting fleet-manager. We don't want that which is why we're seeting
# termination to edge for this route
kubectl patch -n rhacs route fleet-manager -p '{"spec":{"tls":{"termination":"edge"}}}'
FM_URL="https://$(kubectl get routes -n rhacs fleet-manager -o yaml | yq .spec.host)"
export FM_URL

kubectl get cm -n rhacs fleet-manager-dataplane-cluster-scaling-config -o yaml > fm-dataplane-config.yaml
yq '.data."dataplane-cluster-configuration.yaml"' fm-dataplane-config.yaml | yq .clusters > cluster-list.json

KUBECONFIG="$CLUSTER_2_KUBECONFIG" make cluster-list \
| jq '.[0] | .name="dev2" | .cluster_id="1234567890abcdef1234567890abcdeg"' \
| jq --slurp . > cluster-list2.json

cluster_list_value=$(jq --slurp '. | add' cluster-list.json cluster-list2.json -c)
export new_cluster_config_value="clusters: $cluster_list_value"
yq -i '.data."dataplane-cluster-configuration.yaml" = strenv(new_cluster_config_value)' fm-dataplane-config.yaml

kubectl apply -f fm-dataplane-config.yaml
# Restart fleet-manager to pickup the config
kubectl delete pod -n rhacs -l app=fleet-manager

export KUBECONFIG="$CLUSTER_2_KUBECONFIG"
make deploy/bootstrap
DATAPLANE_ONLY="true" make deploy/dev

# Get a static token from cluster1 which will be used for FS -> FM communication
# for the FS running on cluster2
STATIC_TOKEN=$(KUBECONFIG="$CLUSTER_1_KUBECONFIG" kubectl create token -n rhacs fleetshard-sync --audience acs-fleet-manager-private-api --duration 8760h)

# Configure FS on cluster2 to reach out to FM on cluster1
kubectl patch fleetshards -n rhacs rhacs-terraform --type='merge' -p "{\"spec\":{\"fleetshardSync\":{\"authType\":\"STATIC_TOKEN\",\"staticToken\":\"$STATIC_TOKEN\",\"fleetManagerEndpoint\":\"$FM_URL\",\"clusterId\":\"1234567890abcdef1234567890abcdeg\"}}}"

# TODO: remove this as soon as the feature flag RHACS_CLUSTER_MIGRATION is retired
export KUBECONFIG=$CLUSTER_1_KUBECONFIG
kubectl patch deploy -n rhacs fleetshard-sync -p '{"spec":{"template":{"spec":{"containers":[{"name":"fleetshard-sync","env":[{"name":"RHACS_CLUSTER_MIGRATION", "value":"true"}]}]}}}}'
kubectl patch deploy -n rhacs fleet-manager -p '{"spec":{"template":{"spec":{"containers":[{"name":"service","env":[{"name":"RHACS_CLUSTER_MIGRATION", "value":"true"}]}]}}}}'

export KUBECONFIG=$CLUSTER_2_KUBECONFIG
kubectl patch deploy -n rhacs fleetshard-sync -p '{"spec":{"template":{"spec":{"containers":[{"name":"fleetshard-sync","env":[{"name":"RHACS_CLUSTER_MIGRATION", "value":"true"}]}]}}}}'
# Start test execution in Go
19 changes: 19 additions & 0 deletions scripts/ci/multicluster_tests/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/usr/bin/env bash
export CLUSTER_TYPE="infra-openshift"
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/../../.. && pwd)"
SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

cd "$ROOT_DIR"

source "$ROOT_DIR/scripts/ci/lib.sh"
source "$ROOT_DIR/scripts/lib/log.sh"
source "$ROOT_DIR/dev/env/scripts/lib.sh"

bash "$SOURCE_DIR/deploy.sh"
EXIT_CODE="$?"
if [ "$EXIT_CODE" -ne "0" ]; then
echo "TODO(ROX-27073): add additional logging required here, once tests are actually executed"
fi

stat /tmp/pids-port-forward > /dev/null 2>&1 && xargs kill < /tmp/pids-port-forward
exit "$EXIT_CODE"

0 comments on commit ef28c4b

Please sign in to comment.