Skip to content

Commit

Permalink
add e2e case R00008
Browse files Browse the repository at this point in the history
Signed-off-by: bzsuni <[email protected]>
  • Loading branch information
bzsuni committed Oct 18, 2023
1 parent ce4ec22 commit f4d0b47
Show file tree
Hide file tree
Showing 8 changed files with 535 additions and 3 deletions.
1 change: 1 addition & 0 deletions test/doc/reliability.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@
| R00005 | When the node where `eip` takes effect is shut down, `eip` will take effect to another node matching `NodeSelector`, and `egressGatewayStatus` and `EgressClusterStatus` are updated as expected, and the `EgressTunnel` corresponding to the shutdown node ` will be deleted and the egress IP will be accessed as expected | p3 | false | | |
| R00006 | After shutting down all nodes matched by `NodeSelector` in `egressGateway`,<br/>`Pod`’s egress IP will be changed from `eip` to non-`eip`, `egressGatewayStatus.NodeList` will be empty, and the related `EgressIgnoreCIDR.NodeIP` will be deleted and the `EgressTunnel` corresponding to the shutdown node will be deleted. <br/> After one of the `node` is turned on, `egressgateway` will recover in a short time and record the recovery time, and `eip` will be revalidated as the egress IP of `Pod`, and the `nodeIP` will be added to `EgressIgnoreCIDR.NodeIP` and `node` related information in `egressGatewayStatus.NodeList` is updated correctly, <br/>after all boots, `eip` will only take effect on the first recovered `node`, and `EgressIgnoreCIDR.NodeIP` is updated correct | p3 | false | | |
| R00007 | Restart each component in the cluster (including calico, kube-proxy) `Pod` in turn. During the restart process, the access IP to outside the cluster is the set `eip` before, and the traffic cannot be interrupted. After the cluster returns to normal, `egressgateway` The individual `cr` state of the component is correct | p1 | false | | |
| R00008 | Create an `egressGateway` with a pool of 100 IPs. Create 120 `policies`. After multiple deletions and creations, expect the `egressGateway` and `Policy` statuses to be correct, and the `pod`'s egress IPs to match expectations | p1 | true | | |
1 change: 1 addition & 0 deletions test/doc/reliability_zh.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@
| R00005 | 当关机 `eip` 生效的节点后,`eip` 会生效到另外匹配 `NodeSelector` 的节点上,<br/>并且 `egressGatewayStatus``EgressClusterStatus` 如预期更新,与被关机的节点对应的 `EgressTunnel` 将被删除,出口 IP 如预期访问 | p3 | false | | |
| R00006 | 当关机 `egressGateway``NodeSelector` 匹配的所有节点后,<br/>`Pod` 的出口 IP 将由 `eip` 改为非 `eip``egressGatewayStatus.NodeList` 将为空,相关的 `EgressIgnoreCIDR.NodeIP` 将被删除,与被关机的节点对应的 `EgressTunnel` 将被删除。<br/> 将其中一个 `node` 开机后,`egressgateway` 会在短时间内恢复并记录恢复时间,并且 `eip` 重新生效为 `Pod` 的出口 IP,`EgressIgnoreCIDR.NodeIP` 将对应的 `nodeIP` 添加并且 `egressGatewayStatus.NodeList``node` 相关信息更新正确,<br/>全部开机最后 `eip` 只会生效在第一个恢复的 `node` 上,`EgressIgnoreCIDR.NodeIP` 更新正确 | p3 | false | | |
| R00007 | 依次重启集群中各个组件(包含 calico,kube-proxy)`Pod`, 重启过程中访问集群外部的出口 IP 为设置好的 `eip`,并且业务不能断流, 等待集群恢复正常后,`egressgateway` 组件的各个 `cr` 状态正确 | p1 | false | | |
| R00008 | 创建 `egressGateway` 分配有 100 个 IP 的池,创建 120 个 policy,做多次删除和创建操作之后,期望 `egressGateway``Policy` 状态正确, `pod` 的出口 IP 符合预期 | p1 | true | | |
38 changes: 38 additions & 0 deletions test/e2e/common/beforeeach.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
// Copyright 2022 Authors of spidernet-io
// SPDX-License-Identifier: Apache-2.0

package common

import (
"context"

corev1 "k8s.io/api/core/v1"

"github.com/go-faker/faker/v4"
egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)

func CreateEgressGatewayAndPodsBeforeEach(ctx context.Context, cli client.Client, nodeNameList []string, podImg string, IPNum int64, increase uint8) (*egressv1.EgressGateway, []*corev1.Pod, error) {
// create EgressGateway
pool, err := GenIPPools(ctx, cli, IPNum, increase)
if err != nil {
return nil, nil, err
}

labels := map[string]string{"ip-test": faker.Word()}
err = LabelNodes(ctx, cli, nodeNameList, labels)
if err != nil {
return nil, nil, err
}
nodeSelector := egressv1.NodeSelector{Selector: &metav1.LabelSelector{MatchLabels: labels}}

egw, err := CreateGatewayNew(ctx, cli, "egw-"+faker.Word(), pool, nodeSelector)
if err != nil {
return nil, nil, err
}
// create pods
pods := CreatePods(ctx, cli, podImg, int(IPNum))
return egw, pods, nil
}
27 changes: 24 additions & 3 deletions test/e2e/common/check_eip.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
)

func CheckDaemonSetEgressIP(
Expand All @@ -38,15 +40,15 @@ func CheckDaemonSetEgressIP(
for _, pod := range list.Items {
// check v4
if egressConfig.EnableIPv4 {
err = checkPodEgressIP(ctx, cfg, pod, ipv4, cfg.ServerAIPv4, expectUsedEip)
err = CheckPodEgressIP(ctx, cfg, pod, ipv4, cfg.ServerAIPv4, expectUsedEip)
if err != nil {
return err
}
}

// check v6
if egressConfig.EnableIPv6 {
err = checkPodEgressIP(ctx, cfg, pod, ipv6, cfg.ServerAIPv6, expectUsedEip)
err = CheckPodEgressIP(ctx, cfg, pod, ipv6, cfg.ServerAIPv6, expectUsedEip)
if err != nil {
return err
}
Expand Down Expand Up @@ -86,7 +88,7 @@ func debugPodList(config *Config) string {
return string(raw)
}

func checkPodEgressIP(ctx context.Context, cfg *Config, pod corev1.Pod, egressIP string, serverIP string, expectUsedEip bool) error {
func CheckPodEgressIP(ctx context.Context, cfg *Config, pod corev1.Pod, egressIP string, serverIP string, expectUsedEip bool) error {
cmd := generateCmd(ctx, cfg, pod, egressIP, serverIP, expectUsedEip)
raw, err := cmd.CombinedOutput()
if err != nil {
Expand Down Expand Up @@ -114,3 +116,22 @@ func generateCmd(ctx context.Context, config *Config, pod corev1.Pod, eip, serve
args := fmt.Sprintf("kubectl --kubeconfig %s exec %s -n %s -- %s", config.KubeConfigPath, pod.Name, pod.Namespace, curlServer)
return exec.CommandContext(ctx, "sh", "-c", args)
}

// CheckPodsEgressIP check pods egressIP my pod-egressPolicy-map
func CheckPodsEgressIP(ctx context.Context, cfg *Config, p2p map[*corev1.Pod]*egressv1.EgressPolicy, checkv4, checkv6 bool, expectUsedEip bool) error {
for pod, egp := range p2p {
if checkv4 {
if len(egp.Status.Eip.Ipv4) == 0 {
return fmt.Errorf("failed get eipV4")
}
return CheckPodEgressIP(ctx, cfg, *pod, egp.Status.Eip.Ipv4, cfg.ServerAIPv4, expectUsedEip)
}
if checkv6 {
if len(egp.Status.Eip.Ipv6) == 0 {
return fmt.Errorf("failed get eipV6")
}
return CheckPodEgressIP(ctx, cfg, *pod, egp.Status.Eip.Ipv6, cfg.ServerAIPv6, expectUsedEip)
}
}
return nil
}
160 changes: 160 additions & 0 deletions test/e2e/common/egp.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,13 @@ package common
import (
"context"
"fmt"
"strings"
"time"

. "github.com/onsi/ginkgo/v2"

corev1 "k8s.io/api/core/v1"

"github.com/go-faker/faker/v4"
econfig "github.com/spidernet-io/egressgateway/pkg/config"
egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
Expand Down Expand Up @@ -117,3 +122,158 @@ func CreateEgressClusterPolicy(ctx context.Context, cli client.Client, cfg econf
}
}
}

func CreateEgressPolicyCustom(ctx context.Context, cli client.Client, setUp func(egp *egressv1.EgressPolicy)) (*egressv1.EgressPolicy, error) {
name := "egp-" + strings.ToLower(faker.FirstName()) + "-" + faker.Word()
ns := "default"
res := &egressv1.EgressPolicy{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns},
}

setUp(res)

err := cli.Create(ctx, res)
if err != nil {
return nil, err
}
return res, nil
}

func CreateEgressClusterPolicyCustom(ctx context.Context, cli client.Client, setUp func(egcp *egressv1.EgressClusterPolicy)) (*egressv1.EgressClusterPolicy, error) {
name := "egcp-" + faker.Word()
res := &egressv1.EgressClusterPolicy{
ObjectMeta: metav1.ObjectMeta{Name: name},
}

setUp(res)

err := cli.Create(ctx, res)
if err != nil {
return nil, err
}
return res, nil
}

// DeleteEgressPolicies delete egressPolicies
func DeleteEgressPolicies(ctx context.Context, cli client.Client, egps []*egressv1.EgressPolicy) error {
for _, egp := range egps {
err := DeleteObj(ctx, cli, egp)
if err != nil {
return err
}
}
return nil
}

func WaitEgressPoliciesDeleted(ctx context.Context, cli client.Client, egps []*egressv1.EgressPolicy, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()

for {
select {
case <-ctx.Done():
return fmt.Errorf("timeout to wait egressPolicies deleted")
default:
err := DeleteEgressPolicies(ctx, cli, egps)
if err != nil {
GinkgoWriter.Printf("failed dlelete egressPolicies\nerror is:\n%v\ntry again\n", err)
time.Sleep(time.Second / 2)
continue
}
for _, egp := range egps {
err := cli.Get(ctx, types.NamespacedName{Name: egp.Name, Namespace: egp.Namespace}, egp)
if err == nil {
GinkgoWriter.Printf("egressPolicy: %s still exists\ntry again\n", egp.Name)
time.Sleep(time.Second / 2)
continue
}
}
return nil
}
}
}

// WaitEgressPolicyStatusReady waits for the EgressPolicy status.Eip to be allocated after the EgressPolicy is created
func WaitEgressPolicyStatusReady(ctx context.Context, cli client.Client, egp *egressv1.EgressPolicy, v4Enabled, v6Enabled bool, timeout time.Duration) error {
if !v4Enabled && !v6Enabled {
return fmt.Errorf("both v4 and v6 are not enabled")
}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()

var v4Ok, v6Ok bool

for {
select {
case <-ctx.Done():
return fmt.Errorf("timeout to wait egressPolicy status ready")
default:
err := cli.Get(ctx, types.NamespacedName{Name: egp.Name, Namespace: egp.Namespace}, egp)
if err != nil {
GinkgoWriter.Printf("failed get egp: %s\nerror is:\n%v\ntry again\n", egp.Name, err)
time.Sleep(time.Second / 2)
continue
}
if egp.Spec.EgressIP.UseNodeIP {
if v4Enabled && len(egp.Status.Eip.Ipv4) != 0 {
v4Ok = true
}
if v6Enabled && len(egp.Status.Eip.Ipv6) != 0 {
v6Ok = true
}
} else {
if len(egp.Status.Eip.Ipv4) == 0 && len(egp.Status.Eip.Ipv6) == 0 {
return nil
}
}
if v4Enabled && v6Enabled {
if v4Ok && v6Ok {
return nil
}
} else if v4Enabled && v4Ok {
return nil
} else if v6Enabled && v6Ok {
return nil
}
time.Sleep(time.Second / 2)
}
}
}

// CreateEgressPolicyWithEipAllocatorRR creates an egressPolicy and sets Spec.EgressIP.AllocatorPolicy to "rr"
func CreateEgressPolicyWithEipAllocatorRR(ctx context.Context, cli client.Client, egw *egressv1.EgressGateway, labels map[string]string) (*egressv1.EgressPolicy, error) {
return CreateEgressPolicyCustom(ctx, cli,
func(egp *egressv1.EgressPolicy) {
egp.Spec.EgressGatewayName = egw.Name
egp.Spec.EgressIP.AllocatorPolicy = egressv1.EipAllocatorRR
// egp.Spec.EgressIP.AllocatorPolicy = egressv1.EipAllocatorDefault
egp.Spec.AppliedTo.PodSelector = &metav1.LabelSelector{MatchLabels: labels}
})
}

// CreateEgressPoliciesForPods
func CreateEgressPoliciesForPods(ctx context.Context, cli client.Client, egw *egressv1.EgressGateway, pods []*corev1.Pod, v4Enabled, v6Enabled bool, timeout time.Duration) (
[]*egressv1.EgressPolicy, map[*corev1.Pod]*egressv1.EgressPolicy, error) {
egps := make([]*egressv1.EgressPolicy, 0)
pod2Policy := make(map[*corev1.Pod]*egressv1.EgressPolicy, 0)
for _, pod := range pods {
egp, err := CreateEgressPolicyWithEipAllocatorRR(ctx, cli, egw, pod.Labels)
if err != nil {
return nil, nil, err
}
// wait egressPolicy status updated
err = WaitEgressPolicyStatusReady(ctx, cli, egp, v4Enabled, v6Enabled, timeout)
if err != nil {
return nil, nil, err
}
// get egp after egressPolicy updated
err = cli.Get(ctx, types.NamespacedName{Name: egp.Name, Namespace: egp.Namespace}, egp)
if err != nil {
return nil, nil, err
}
egps = append(egps, egp)
pod2Policy[pod] = egp
}

return egps, pod2Policy, nil
}
103 changes: 103 additions & 0 deletions test/e2e/common/egw.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import (
"fmt"
"time"

. "github.com/onsi/ginkgo/v2"

"github.com/go-faker/faker/v4"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -123,3 +125,104 @@ func UpdateEgressGateway(ctx context.Context, cli client.Client, gateway *egress
}
}
}

// CheckEGWSyncedWithEGP check if egw status synced with egp status when egp's allocatorPolicy is "rr"
func CheckEGWSyncedWithEGP(cli client.Client, egw *egressv1.EgressGateway, checkV4, checkV6 bool, IPNum int) (bool, error) {
eipV4s := make(map[string]struct{})
eipV6s := make(map[string]struct{})
for _, eipStatus := range egw.Status.NodeList {
if checkV4 {
for _, eip := range eipStatus.Eips {
if len(eip.IPv4) != 0 {
if _, ok := eipV4s[eip.IPv4]; ok {
GinkgoWriter.Printf("ip reallocate, the egw yaml:\n%s\n", GetObjYAML(egw))
}
eipV4s[eip.IPv4] = struct{}{}
}
}
}
if checkV6 {
for _, eip := range eipStatus.Eips {
if len(eip.IPv6) != 0 {
if _, ok := eipV6s[eip.IPv6]; ok {
GinkgoWriter.Printf("ip reallocate, the egw yaml:\n%s\n", GetObjYAML(egw))
}
eipV6s[eip.IPv6] = struct{}{}
}
}
}
// check egw status synced with egp status
for _, eips := range eipStatus.Eips {
for _, policy := range eips.Policies {
egp := new(egressv1.EgressPolicy)
err := cli.Get(context.TODO(), types.NamespacedName{Name: policy.Name, Namespace: policy.Namespace}, egp)
if err != nil {
return false, err
}
if egp.Status.Node != eipStatus.Name {

GinkgoWriter.Printf("Node is not synced, the egp is: %s, nodeName is: %s, but egw nodeName is: %s\nthe egw yaml:\n%s\n",
egp.Name, egp.Status.Node, eipStatus.Name, GetObjYAML(egw))
return false, nil
}
if egp.Status.Eip.Ipv4 != eips.IPv4 {
GinkgoWriter.Printf("Eip.Ipv4 is not synced, the egp is: %s, eipV4 is: %s, but egw IPv4 is: %s\nthe egw yaml:\n%s\n",
egp.Name, egp.Status.Eip.Ipv4, eips.IPv4, GetObjYAML(egw))
return false, nil
}
if egp.Status.Eip.Ipv6 != eips.IPv6 {
GinkgoWriter.Printf("Eip.Ipv6 is not synced, the egp is: %s, eipV6 is: %s, but egw IPv6 is: %s\nthe egw yaml:\n%s\n",
egp.Name, egp.Status.Eip.Ipv6, eips.IPv6, GetObjYAML(egw))
return false, nil
}
}
}
}
if checkV4 && checkV6 {
if len(eipV4s) != IPNum || len(eipV6s) != IPNum {
GinkgoWriter.Printf("failed check ip number, expect num is %v but got eipV4s num: %v, eipV6s num: %v\n", IPNum, len(eipV4s), len(eipV6s))
return false, nil
}
return true, nil
}
if checkV4 {
if len(eipV4s) != IPNum {
GinkgoWriter.Printf("failed check ip number, expect num is %v but got eipV4s num: %v\n", IPNum, len(eipV4s))
return false, nil
}
return true, nil
}
if len(eipV6s) != IPNum {
GinkgoWriter.Printf("failed check ip number, expect num is %v but got eipV6s num: %v\n", IPNum, len(eipV6s))
return false, nil
}
return true, nil
}

// WaitEGWSyncedWithEGP
func WaitEGWSyncedWithEGP(cli client.Client, egw *egressv1.EgressGateway, checkV4, checkV6 bool, IPNum int, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
defer cancel()
for {
select {
case <-ctx.Done():
return fmt.Errorf("timeout wait egressGateway synced with egressPolicy")
default:
err := cli.Get(ctx, types.NamespacedName{Name: egw.Name}, egw)
if err != nil {
GinkgoWriter.Printf("failed get egressgateway: %s\nerror is:\n%v\ntry again\n", egw.Name, err)
time.Sleep(time.Second / 2)
continue
}
ok, err := CheckEGWSyncedWithEGP(cli, egw, checkV4, checkV6, IPNum)
if ok {
return nil
}
if err != nil {
GinkgoWriter.Printf("failed check egressgateway synced with egressPolicy\n,error is:\n%v\ntry again\n", err)
time.Sleep(time.Second / 2)
continue
}
}
}
}
Loading

0 comments on commit f4d0b47

Please sign in to comment.