Skip to content

Commit

Permalink
feat: implement redis cluster ready state (OT-CONTAINER-KIT#867)
Browse files Browse the repository at this point in the history
Signed-off-by: drivebyer <[email protected]>
Signed-off-by: Matt Robinson <[email protected]>
  • Loading branch information
drivebyer authored and mattrobinsonsre committed Jul 11, 2024
1 parent 05d4106 commit 0b4f7a4
Show file tree
Hide file tree
Showing 5 changed files with 56 additions and 53 deletions.
1 change: 1 addition & 0 deletions api/status/redis-cluster_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ const (

// Status Field of the Redis Cluster
const (
// RedisClusterReady means the RedisCluster is ready for use, we use redis-cli --cluster check 127.0.0.1:6379 to check the cluster status
RedisClusterReady RedisClusterState = "Ready"
RedisClusterInitializing RedisClusterState = "Initializing"
RedisClusterBootstrap RedisClusterState = "Bootstrap"
Expand Down
11 changes: 6 additions & 5 deletions controllers/rediscluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,16 +230,17 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
if k8sutils.CheckRedisNodeCount(ctx, r.K8sClient, r.Log, instance, "") == totalReplicas {
k8sutils.CheckIfEmptyMasters(ctx, r.K8sClient, r.Log, instance)
}
reqLogger.Info("Will reconcile redis cluster operator in again 10 seconds")

// Mark the cluster status as ready if all the leader and follower nodes are ready
if instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas {
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient)
if err != nil {
return ctrl.Result{}, err
if k8sutils.RedisClusterStatusHealth(ctx, r.K8sClient, r.Log, instance) {
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient)
if err != nil {
return ctrl.Result{}, err
}
}
}

reqLogger.Info("Will reconcile redis cluster operator in again 10 seconds")
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}

Expand Down
48 changes: 42 additions & 6 deletions k8sutils/redis.go
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,34 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge
return int32(count)
}

// RedisClusterStatusHealth use `redis-cli --cluster check 127.0.0.1:6379`
func RedisClusterStatusHealth(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) bool {
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

cmd := []string{"redis-cli", "--cluster", "check", "127.0.0.1:6379"}
if cr.Spec.KubernetesConfig.ExistingPasswordSecret != nil {
pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key)
if err != nil {
logger.Error(err, "Error in getting redis password")
}
cmd = append(cmd, "-a")
cmd = append(cmd, pass)
}
cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, cr.ObjectMeta.Name+"-leader-0")...)
out, err := executeCommand1(client, logger, cr, cmd, cr.ObjectMeta.Name+"-leader-0")
if err != nil {
return false
}
// [OK] xxx keys in xxx masters.
// [OK] All nodes agree about slots configuration.
// [OK] All 16384 slots covered.
if strings.Count(out, "[OK]") != 3 {
return false
}
return true
}

// CheckRedisClusterState will check the redis cluster state
func CheckRedisClusterState(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) int {
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
Expand Down Expand Up @@ -369,19 +397,28 @@ func configureRedisClient(client kubernetes.Interface, logger logr.Logger, cr *r

// executeCommand will execute the commands in pod
func executeCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) {
execOut, execErr := executeCommand1(client, logger, cr, cmd, podName)
if execErr != nil {
logger.Error(execErr, "Could not execute command", "Command", cmd, "Output", execOut)
return
}
logger.V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut)
}

func executeCommand1(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, cmd []string, podName string) (stdout string, stderr error) {
var (
execOut bytes.Buffer
execErr bytes.Buffer
)
config, err := GenerateK8sConfig()()
if err != nil {
logger.Error(err, "Could not find pod to execute")
return
return "", err
}
targetContainer, pod := getContainerID(client, logger, cr, podName)
if targetContainer < 0 {
logger.Error(err, "Could not find pod to execute")
return
return "", err
}

req := client.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(cr.Namespace).SubResource("exec")
Expand All @@ -394,7 +431,7 @@ func executeCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
logger.Error(err, "Failed to init executor")
return
return "", err
}

err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{
Expand All @@ -403,10 +440,9 @@ func executeCommand(client kubernetes.Interface, logger logr.Logger, cr *redisv1
Tty: false,
})
if err != nil {
logger.Error(err, "Could not execute command", "Command", cmd, "Output", execOut.String(), "Error", execErr.String())
return
return execOut.String(), fmt.Errorf("execute command with error: %w, stderr: %s", err, execErr.String())
}
logger.V(1).Info("Successfully executed the command", "Command", cmd, "Output", execOut.String())
return execOut.String(), nil
}

// getContainerID will return the id of container from pod
Expand Down
47 changes: 5 additions & 42 deletions tests/e2e-chainsaw/v1beta2/setup/redis-cluster/chainsaw-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,11 @@ spec:
selector: control-plane=redis-operator
container: manager
tail: -1 # tail all logs
- name: Sleep for five minutes
try:
- sleep:
duration: 5m
# no need to wait for 5 minutes, when we have ready-cluster.yaml, we can proceed
# - name: Sleep for five minutes
# try:
# - sleep:
# duration: 3m
- name: Ping Cluster
try:
- script:
Expand Down Expand Up @@ -66,44 +67,6 @@ spec:
redis-cli -c -p 6379 ping
check:
($stdout=='PONG'): true
- name: Check Cluster
try:
- script:
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 --
redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered.'
check:
($stdout=='[OK] All 16384 slots covered.'): true
- script:
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 --
redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered'
check:
($stdout=='[OK] All 16384 slots covered.'): true
- script:
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 --
redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered'
check:
($stdout=='[OK] All 16384 slots covered.'): true
- script:
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 --
redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered'
check:
($stdout=='[OK] All 16384 slots covered.'): true
- script:
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 --
redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered'
check:
($stdout=='[OK] All 16384 slots covered.'): true
- script:
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 --
redis-cli --cluster check 127.0.0.1:6379 | grep 'All 16384 slots covered'
check:
($stdout=='[OK] All 16384 slots covered.'): true
- name: Try saving a key With Password
try:
- script:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,5 @@ metadata:
status:
readyFollowerReplicas: 3
readyLeaderReplicas: 3
state: Ready
reason: RedisCluster is ready

0 comments on commit 0b4f7a4

Please sign in to comment.