Skip to content

Commit

Permalink
test: Add unit test (OT-CONTAINER-KIT#856)
Browse files Browse the repository at this point in the history
* test: Add unit test

Signed-off-by: Shubham Gupta <[email protected]>

* test: Cover TestCheckRedisCluster

Signed-off-by: Shubham Gupta <[email protected]>

* test: fix coverage

Signed-off-by: Shubham Gupta <[email protected]>

* fix: coverage

Signed-off-by: Shubham Gupta <[email protected]>

* fix: remove print statement

Signed-off-by: Shubham Gupta <[email protected]>

---------

Signed-off-by: Shubham Gupta <[email protected]>
Signed-off-by: wkd-woo <[email protected]>
  • Loading branch information
shubham-cmyk authored and wkd-woo committed Apr 1, 2024
1 parent 4bea59d commit 8373c87
Show file tree
Hide file tree
Showing 5 changed files with 210 additions and 29 deletions.
2 changes: 0 additions & 2 deletions k8sutils/redis-cluster.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package k8sutils

import (
"fmt"
"strconv"
"strings"

Expand Down Expand Up @@ -35,7 +34,6 @@ type RedisClusterService struct {

// generateRedisClusterParams generates Redis cluster information
func generateRedisClusterParams(cr *redisv1beta2.RedisCluster, replicas int32, externalConfig *string, params RedisClusterSTS) statefulSetParameters {
fmt.Println(cr.Spec.Storage.NodeConfVolume)
res := statefulSetParameters{
Replicas: &replicas,
ClusterMode: true,
Expand Down
30 changes: 15 additions & 15 deletions k8sutils/redis.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,9 @@ func createRedisReplicationCommand(client kubernetes.Interface, logger logr.Logg
pass, err := getRedisPassword(client, logger, cr.Namespace, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name, *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Key)
if err != nil {
logger.Error(err, "Failed to retrieve Redis password", "Secret", *cr.Spec.KubernetesConfig.ExistingPasswordSecret.Name)
} else {
cmd = append(cmd, "-a", pass)
}
cmd = append(cmd, "-a", pass)
}

cmd = append(cmd, getRedisTLSArgs(cr.Spec.TLS, leaderPod.PodName)...)
Expand All @@ -185,7 +186,10 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter
leaderCounts := cr.Spec.GetReplicaCounts("leader")
followerPerLeader := followerCounts / leaderCounts

nodes := checkRedisCluster(ctx, client, logger, cr)
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()

nodes := checkRedisCluster(ctx, redisClient, logger)
for followerIdx := 0; followerIdx <= int(followerCounts)-1; {
for i := 0; i < int(followerPerLeader) && followerIdx <= int(followerCounts)-1; i++ {
followerPod := RedisDetails{
Expand Down Expand Up @@ -222,18 +226,10 @@ func ExecuteRedisReplicationCommand(ctx context.Context, client kubernetes.Inter
}

// checkRedisCluster will check the redis cluster have sufficient nodes or not
func checkRedisCluster(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) [][]string {
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()
cmd := redis.NewStringCmd(ctx, "cluster", "nodes")
err := redisClient.Process(ctx, cmd)
func checkRedisCluster(ctx context.Context, redisClient *redis.Client, logger logr.Logger) [][]string {
output, err := redisClient.ClusterNodes(ctx).Result()
if err != nil {
logger.Error(err, "Redis command failed with this error")
}

output, err := cmd.Result()
if err != nil {
logger.Error(err, "Redis command failed with this error")
logger.Error(err, "Error in getting Redis cluster nodes")
}
logger.V(1).Info("Redis cluster nodes are listed", "Output", output)

Expand Down Expand Up @@ -298,8 +294,10 @@ func executeFailoverCommand(ctx context.Context, client kubernetes.Interface, lo

// CheckRedisNodeCount will check the count of redis nodes
func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster, nodeType string) int32 {
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()
var redisNodeType string
clusterNodes := checkRedisCluster(ctx, client, logger, cr)
clusterNodes := checkRedisCluster(ctx, redisClient, logger)
count := len(clusterNodes)

switch nodeType {
Expand All @@ -326,7 +324,9 @@ func CheckRedisNodeCount(ctx context.Context, client kubernetes.Interface, logge

// CheckRedisClusterState will check the redis cluster state
func CheckRedisClusterState(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) int {
clusterNodes := checkRedisCluster(ctx, client, logger, cr)
redisClient := configureRedisClient(client, logger, cr, cr.ObjectMeta.Name+"-leader-0")
defer redisClient.Close()
clusterNodes := checkRedisCluster(ctx, redisClient, logger)
count := 0
for _, node := range clusterNodes {
if strings.Contains(node[2], "fail") || strings.Contains(node[7], "disconnected") {
Expand Down
188 changes: 178 additions & 10 deletions k8sutils/redis_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"strings"
"testing"

"github.com/OT-CONTAINER-KIT/redis-operator/api"
redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2"
mock_utils "github.com/OT-CONTAINER-KIT/redis-operator/mocks/utils"
"github.com/go-logr/logr"
Expand All @@ -16,6 +17,8 @@ import (
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
k8sClientFake "k8s.io/client-go/kubernetes/fake"
"k8s.io/utils/ptr"
)
Expand Down Expand Up @@ -349,22 +352,84 @@ func TestGetRedisTLSArgs(t *testing.T) {
}

func TestCreateRedisReplicationCommand(t *testing.T) {
logger := logr.Discard()
type secret struct {
name string
namespace string
key string
}
tests := []struct {
name string
redisCluster *redisv1beta2.RedisCluster
name string
redisCluster *redisv1beta2.RedisCluster
secret
leaderPod RedisDetails
followerPod RedisDetails
expectedCommand []string
}{
{
name: "Test case with cluster version v7",
secret: secret{
name: "redis-password-secret",
namespace: "default",
key: "password",
},
redisCluster: &redisv1beta2.RedisCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-cluster",
Namespace: "default",
},
Spec: redisv1beta2.RedisClusterSpec{
Size: ptr.To(int32(3)),
Size: ptr.To(int32(3)),
KubernetesConfig: redisv1beta2.KubernetesConfig{
KubernetesConfig: api.KubernetesConfig{
ExistingPasswordSecret: &api.ExistingPasswordSecret{
Name: ptr.To("redis-password-secret"),
Key: ptr.To("password"),
},
},
},
ClusterVersion: ptr.To("v7"),
Port: ptr.To(6379),
},
},
leaderPod: RedisDetails{
PodName: "redis-cluster-leader-0",
Namespace: "default",
},
followerPod: RedisDetails{
PodName: "redis-cluster-follower-0",
Namespace: "default",
},
expectedCommand: []string{
"redis-cli", "--cluster", "add-node",
"redis-cluster-follower-0.redis-cluster-follower-headless.default.svc:6379",
"redis-cluster-leader-0.redis-cluster-leader-headless.default.svc:6379",
"--cluster-slave",
"-a", "password",
},
},
{
name: "Test case with cluster version v7 failed to get password",
secret: secret{
name: "wrong-name",
namespace: "default",
key: "password",
},
redisCluster: &redisv1beta2.RedisCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "redis-cluster",
Namespace: "default",
},
Spec: redisv1beta2.RedisClusterSpec{
Size: ptr.To(int32(3)),
KubernetesConfig: redisv1beta2.KubernetesConfig{
KubernetesConfig: api.KubernetesConfig{
ExistingPasswordSecret: &api.ExistingPasswordSecret{
Name: ptr.To("redis-password-secret"),
Key: ptr.To("password"),
},
},
},
ClusterVersion: ptr.To("v7"),
Port: ptr.To(6379),
},
Expand Down Expand Up @@ -415,9 +480,17 @@ func TestCreateRedisReplicationCommand(t *testing.T) {

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
client := mock_utils.CreateFakeClientWithPodIPs(tt.redisCluster)
logger := testr.New(t)
pods := mock_utils.CreateFakeObjectWithPodIPs(tt.redisCluster)
var secret []runtime.Object
if tt.redisCluster.Spec.KubernetesConfig.ExistingPasswordSecret != nil {
secret = mock_utils.CreateFakeObjectWithSecret(tt.secret.name, tt.secret.namespace, tt.secret.key)
}

var objects []runtime.Object
objects = append(objects, pods...)
objects = append(objects, secret...)

client := fake.NewSimpleClientset(objects...)
cmd := createRedisReplicationCommand(client, logger, tt.redisCluster, tt.leaderPod, tt.followerPod)

// Assert the command is as expected using testify
Expand Down Expand Up @@ -568,6 +641,22 @@ func Test_checkAttachedSlave(t *testing.T) {
infoErr: redis.ErrClosed,
expectedSlaveCount: -1,
},
{
name: "field not found",
podName: "pod2",
infoReturn: "# Replication\r\n" +
"role:master\r\n" +
"master_failover_state:no-failover\r\n" +
"master_replid:7b634a76ebb7d5f07007f1d5aec8abff8200704e\r\n" +
"master_replid2:0000000000000000000000000000000000000000\r\n" +
"master_repl_offset:0\r\n" +
"second_repl_offset:-1\r\n" +
"repl_backlog_active:0\r\n" +
"repl_backlog_size:1048576\r\n" +
"repl_backlog_first_byte_offset:0\r\n" +
"repl_backlog_histlen:0\r\n",
expectedSlaveCount: 0,
},
}

for _, tt := range tests {
Expand Down Expand Up @@ -598,6 +687,7 @@ func Test_checkRedisServerRole(t *testing.T) {
podName string
infoReturn string
infoErr error
shouldFail bool
expectedResult string
}{
{
Expand Down Expand Up @@ -635,10 +725,26 @@ func Test_checkRedisServerRole(t *testing.T) {
expectedResult: "slave",
},
{
name: "error fetching role info",
podName: "pod3",
infoErr: redis.ErrClosed,
expectedResult: "",
name: "error fetching role info",
podName: "pod3",
infoErr: redis.ErrClosed,
shouldFail: true,
},
{
name: "field not found",
podName: "pod2",
infoReturn: "# Replication\r\n" +
"connected_slaves:0\r\n" +
"master_failover_state:no-failover\r\n" +
"master_replid:7b634a76ebb7d5f07007f1d5aec8abff8200704e\r\n" +
"master_replid2:0000000000000000000000000000000000000000\r\n" +
"master_repl_offset:0\r\n" +
"second_repl_offset:-1\r\n" +
"repl_backlog_active:0\r\n" +
"repl_backlog_size:1048576\r\n" +
"repl_backlog_first_byte_offset:0\r\n" +
"repl_backlog_histlen:0\r\n",
shouldFail: true,
},
}

Expand All @@ -654,10 +760,72 @@ func Test_checkRedisServerRole(t *testing.T) {
}

role := checkRedisServerRole(ctx, client, logger, tt.podName)
assert.Equal(t, tt.expectedResult, role, "Test case: "+tt.name)
if tt.shouldFail {
assert.Empty(t, role, "Test case: "+tt.name)
} else {
assert.Equal(t, tt.expectedResult, role, "Test case: "+tt.name)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unmet expectations: %s", err)
}
})
}
}

func TestCheckRedisCluster(t *testing.T) {
logger := logr.Discard() // Discard logs

tests := []struct {
name string
expectError error
clusterNodesOutput string
expectedResult [][]string
}{
{
name: "Detailed cluster nodes output",
clusterNodesOutput: `07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1:30004@31004,hostname4 slave e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 0 1426238317239 4 connected
67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002@31002,hostname2 master - 0 1426238316232 2 connected 5461-10922
292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003@31003,hostname3 master - 0 1426238318243 3 connected 10923-16383
6ec23923021cf3ffec47632106199cb7f496ce01 127.0.0.1:30005@31005,hostname5 slave 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 0 1426238316232 5 connected
824fe116063bc5fcf9f4ffd895bc17aee7731ac3 127.0.0.1:30006@31006,hostname6 slave 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 0 1426238317741 6 connected
e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001,hostname1 myself,master - 0 0 1 connected 0-5460`,
expectedResult: [][]string{
{"07c37dfeb235213a872192d90877d0cd55635b91", "127.0.0.1:30004@31004,hostname4", "slave", "e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca", "0", "1426238317239", "4", "connected"},
{"67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1", "127.0.0.1:30002@31002,hostname2", "master", "-", "0", "1426238316232", "2", "connected", "5461-10922"},
{"292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f", "127.0.0.1:30003@31003,hostname3", "master", "-", "0", "1426238318243", "3", "connected", "10923-16383"},
{"6ec23923021cf3ffec47632106199cb7f496ce01", "127.0.0.1:30005@31005,hostname5", "slave", "67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1", "0", "1426238316232", "5", "connected"},
{"824fe116063bc5fcf9f4ffd895bc17aee7731ac3", "127.0.0.1:30006@31006,hostname6", "slave", "292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f", "0", "1426238317741", "6", "connected"},
{"e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca", "127.0.0.1:30001@31001,hostname1", "myself,master", "-", "0", "0", "1", "connected", "0-5460"},
},
},
{
name: "Error from ClusterNodes",
expectError: redis.ErrClosed,
expectedResult: nil,
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
db, mock := redismock.NewClientMock()

if tc.expectError != nil {
mock.ExpectClusterNodes().SetErr(tc.expectError)
} else {
mock.ExpectClusterNodes().SetVal(tc.clusterNodesOutput)
}
result := checkRedisCluster(context.TODO(), db, logger)

if tc.expectError != nil {
assert.Nil(t, result)
} else {
assert.ElementsMatch(t, tc.expectedResult, result)
}

// Ensure all expectations are met
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
})
}
}
2 changes: 2 additions & 0 deletions k8sutils/secrets.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,11 @@ func getRedisPassword(client kubernetes.Interface, logger logr.Logger, namespace
}
for key, value := range secretName.Data {
if key == secretKey {
logger.V(1).Info("Secret key found in the secret", "secretKey", secretKey)
return strings.TrimSpace(string(value)), nil
}
}
logger.Error(errors.New("secret key not found"), "Secret key not found in the secret")
return "", nil
}

Expand Down
17 changes: 15 additions & 2 deletions mocks/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func CreateFakeClientWithPodIPs_LeaderPods(cr *redisv1beta2.RedisCluster) *fake.
return fake.NewSimpleClientset(pods...)
}

func CreateFakeClientWithPodIPs(cr *redisv1beta2.RedisCluster) *fake.Clientset {
func CreateFakeObjectWithPodIPs(cr *redisv1beta2.RedisCluster) []runtime.Object {
leaderReplicas := cr.Spec.GetReplicaCounts("leader")
followerReplicas := cr.Spec.GetReplicaCounts("follower")
pods := make([]runtime.Object, leaderReplicas+followerReplicas)
Expand Down Expand Up @@ -60,7 +60,20 @@ func CreateFakeClientWithPodIPs(cr *redisv1beta2.RedisCluster) *fake.Clientset {
}
}

return fake.NewSimpleClientset(pods...)
return pods
}

func CreateFakeObjectWithSecret(name, namespace, key string) []runtime.Object {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Data: map[string][]byte{
key: []byte("password"),
},
}
return []runtime.Object{secret}
}

func CreateFakeClientWithSecrets(cr *redisv1beta2.RedisCluster, secretName, secretKey, secretValue string) *fake.Clientset {
Expand Down

0 comments on commit 8373c87

Please sign in to comment.