diff --git a/test/doc/reliability.md b/test/doc/reliability.md
index 43890048f..703a2401f 100644
--- a/test/doc/reliability.md
+++ b/test/doc/reliability.md
@@ -9,3 +9,4 @@
| R00005 | When the node where `eip` takes effect is shut down, `eip` will take effect to another node matching `NodeSelector`, and `egressGatewayStatus` and `EgressClusterStatus` are updated as expected, and the `EgressTunnel` corresponding to the shutdown node ` will be deleted and the egress IP will be accessed as expected | p3 | false | | |
| R00006 | After shutting down all nodes matched by `NodeSelector` in `egressGateway`,
`Pod`’s egress IP will be changed from `eip` to non-`eip`, `egressGatewayStatus.NodeList` will be empty, and the related `EgressIgnoreCIDR.NodeIP` will be deleted and the `EgressTunnel` corresponding to the shutdown node will be deleted.
After one of the `node` is turned on, `egressgateway` will recover in a short time and record the recovery time, and `eip` will be revalidated as the egress IP of `Pod`, and the `nodeIP` will be added to `EgressIgnoreCIDR.NodeIP` and `node` related information in `egressGatewayStatus.NodeList` is updated correctly,
after all boots, `eip` will only take effect on the first recovered `node`, and `EgressIgnoreCIDR.NodeIP` is updated correct | p3 | false | | |
| R00007 | Restart each component in the cluster (including calico, kube-proxy) `Pod` in turn. During the restart process, the access IP to outside the cluster is the set `eip` before, and the traffic cannot be interrupted. After the cluster returns to normal, `egressgateway` The individual `cr` state of the component is correct | p1 | false | | |
+| R00008 | Create an `egressGateway` with a pool of 100 IPs. Create 120 `policies`. After multiple deletions and creations, expect the `egressGateway` and `Policy` statuses to be correct, and the `pod`'s egress IPs to match expectations | p1 | true | | |
\ No newline at end of file
diff --git a/test/doc/reliability_zh.md b/test/doc/reliability_zh.md
index 32c8a01c6..f78411d64 100644
--- a/test/doc/reliability_zh.md
+++ b/test/doc/reliability_zh.md
@@ -22,3 +22,4 @@
| R00005 | 当关机 `eip` 生效的节点后,`eip` 会生效到另外匹配 `NodeSelector` 的节点上,
并且 `egressGatewayStatus` 及 `EgressClusterStatus` 如预期更新,与被关机的节点对应的 `EgressTunnel` 将被删除,出口 IP 如预期访问 | p3 | false | | |
| R00006 | 当关机 `egressGateway` 中 `NodeSelector` 匹配的所有节点后,
`Pod` 的出口 IP 将由 `eip` 改为非 `eip`,`egressGatewayStatus.NodeList` 将为空,相关的 `EgressIgnoreCIDR.NodeIP` 将被删除,与被关机的节点对应的 `EgressTunnel` 将被删除。
将其中一个 `node` 开机后,`egressgateway` 会在短时间内恢复并记录恢复时间,并且 `eip` 重新生效为 `Pod` 的出口 IP,`EgressIgnoreCIDR.NodeIP` 将对应的 `nodeIP` 添加并且 `egressGatewayStatus.NodeList` 中 `node` 相关信息更新正确,
全部开机最后 `eip` 只会生效在第一个恢复的 `node` 上,`EgressIgnoreCIDR.NodeIP` 更新正确 | p3 | false | | |
| R00007 | 依次重启集群中各个组件(包含 calico,kube-proxy)`Pod`, 重启过程中访问集群外部的出口 IP 为设置好的 `eip`,并且业务不能断流, 等待集群恢复正常后,`egressgateway` 组件的各个 `cr` 状态正确 | p1 | false | | |
+| R00008 | 创建 `egressGateway` 分配有 100 个 IP 的池,创建 120 个 policy,做多次删除和创建操作之后,期望 `egressGateway` 及 `Policy` 状态正确, `pod` 的出口 IP 符合预期 | p1 | true | | |
\ No newline at end of file
diff --git a/test/e2e/common/beforeeach.go b/test/e2e/common/beforeeach.go
new file mode 100644
index 000000000..0f406d173
--- /dev/null
+++ b/test/e2e/common/beforeeach.go
@@ -0,0 +1,38 @@
+// Copyright 2022 Authors of spidernet-io
+// SPDX-License-Identifier: Apache-2.0
+
+package common
+
+import (
+ "context"
+
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/go-faker/faker/v4"
+ egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func CreateEgressGatewayAndPodsBeforeEach(ctx context.Context, cli client.Client, nodeNameList []string, podImg string, IPNum int64, increase uint8) (*egressv1.EgressGateway, []*corev1.Pod, error) {
+ // create EgressGateway
+ pool, err := GenIPPools(ctx, cli, IPNum, increase)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ labels := map[string]string{"ip-test": faker.Word()}
+ err = LabelNodes(ctx, cli, nodeNameList, labels)
+ if err != nil {
+ return nil, nil, err
+ }
+ nodeSelector := egressv1.NodeSelector{Selector: &metav1.LabelSelector{MatchLabels: labels}}
+
+ egw, err := CreateGatewayNew(ctx, cli, "egw-"+faker.Word(), pool, nodeSelector)
+ if err != nil {
+ return nil, nil, err
+ }
+ // create pods
+ pods := CreatePods(ctx, cli, podImg, int(IPNum))
+ return egw, pods, nil
+}
diff --git a/test/e2e/common/check_eip.go b/test/e2e/common/check_eip.go
index dbff0eb71..627fac43a 100644
--- a/test/e2e/common/check_eip.go
+++ b/test/e2e/common/check_eip.go
@@ -14,6 +14,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
)
func CheckDaemonSetEgressIP(
@@ -38,7 +40,7 @@ func CheckDaemonSetEgressIP(
for _, pod := range list.Items {
// check v4
if egressConfig.EnableIPv4 {
- err = checkPodEgressIP(ctx, cfg, pod, ipv4, cfg.ServerAIPv4, expectUsedEip)
+ err = CheckPodEgressIP(ctx, cfg, pod, ipv4, cfg.ServerAIPv4, expectUsedEip)
if err != nil {
return err
}
@@ -46,7 +48,7 @@ func CheckDaemonSetEgressIP(
// check v6
if egressConfig.EnableIPv6 {
- err = checkPodEgressIP(ctx, cfg, pod, ipv6, cfg.ServerAIPv6, expectUsedEip)
+ err = CheckPodEgressIP(ctx, cfg, pod, ipv6, cfg.ServerAIPv6, expectUsedEip)
if err != nil {
return err
}
@@ -86,7 +88,7 @@ func debugPodList(config *Config) string {
return string(raw)
}
-func checkPodEgressIP(ctx context.Context, cfg *Config, pod corev1.Pod, egressIP string, serverIP string, expectUsedEip bool) error {
+func CheckPodEgressIP(ctx context.Context, cfg *Config, pod corev1.Pod, egressIP string, serverIP string, expectUsedEip bool) error {
cmd := generateCmd(ctx, cfg, pod, egressIP, serverIP, expectUsedEip)
raw, err := cmd.CombinedOutput()
if err != nil {
@@ -114,3 +116,22 @@ func generateCmd(ctx context.Context, config *Config, pod corev1.Pod, eip, serve
args := fmt.Sprintf("kubectl --kubeconfig %s exec %s -n %s -- %s", config.KubeConfigPath, pod.Name, pod.Namespace, curlServer)
return exec.CommandContext(ctx, "sh", "-c", args)
}
+
+// CheckPodsEgressIP check pods egressIP my pod-egressPolicy-map
+func CheckPodsEgressIP(ctx context.Context, cfg *Config, p2p map[*corev1.Pod]*egressv1.EgressPolicy, checkv4, checkv6 bool, expectUsedEip bool) error {
+ for pod, egp := range p2p {
+ if checkv4 {
+ if len(egp.Status.Eip.Ipv4) == 0 {
+ return fmt.Errorf("failed get eipV4")
+ }
+ return CheckPodEgressIP(ctx, cfg, *pod, egp.Status.Eip.Ipv4, cfg.ServerAIPv4, expectUsedEip)
+ }
+ if checkv6 {
+ if len(egp.Status.Eip.Ipv6) == 0 {
+ return fmt.Errorf("failed get eipV6")
+ }
+ return CheckPodEgressIP(ctx, cfg, *pod, egp.Status.Eip.Ipv6, cfg.ServerAIPv6, expectUsedEip)
+ }
+ }
+ return nil
+}
diff --git a/test/e2e/common/egp.go b/test/e2e/common/egp.go
index b92302231..0081d5d91 100644
--- a/test/e2e/common/egp.go
+++ b/test/e2e/common/egp.go
@@ -6,8 +6,13 @@ package common
import (
"context"
"fmt"
+ "strings"
"time"
+ . "github.com/onsi/ginkgo/v2"
+
+ corev1 "k8s.io/api/core/v1"
+
"github.com/go-faker/faker/v4"
econfig "github.com/spidernet-io/egressgateway/pkg/config"
egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
@@ -117,3 +122,158 @@ func CreateEgressClusterPolicy(ctx context.Context, cli client.Client, cfg econf
}
}
}
+
+func CreateEgressPolicyCustom(ctx context.Context, cli client.Client, setUp func(egp *egressv1.EgressPolicy)) (*egressv1.EgressPolicy, error) {
+ name := "egp-" + strings.ToLower(faker.FirstName()) + "-" + faker.Word()
+ ns := "default"
+ res := &egressv1.EgressPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns},
+ }
+
+ setUp(res)
+
+ err := cli.Create(ctx, res)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+func CreateEgressClusterPolicyCustom(ctx context.Context, cli client.Client, setUp func(egcp *egressv1.EgressClusterPolicy)) (*egressv1.EgressClusterPolicy, error) {
+ name := "egcp-" + faker.Word()
+ res := &egressv1.EgressClusterPolicy{
+ ObjectMeta: metav1.ObjectMeta{Name: name},
+ }
+
+ setUp(res)
+
+ err := cli.Create(ctx, res)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// DeleteEgressPolicies delete egressPolicies
+func DeleteEgressPolicies(ctx context.Context, cli client.Client, egps []*egressv1.EgressPolicy) error {
+ for _, egp := range egps {
+ err := DeleteObj(ctx, cli, egp)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func WaitEgressPoliciesDeleted(ctx context.Context, cli client.Client, egps []*egressv1.EgressPolicy, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("timeout to wait egressPolicies deleted")
+ default:
+ err := DeleteEgressPolicies(ctx, cli, egps)
+ if err != nil {
+ GinkgoWriter.Printf("failed dlelete egressPolicies\nerror is:\n%v\ntry again\n", err)
+ time.Sleep(time.Second / 2)
+ continue
+ }
+ for _, egp := range egps {
+ err := cli.Get(ctx, types.NamespacedName{Name: egp.Name, Namespace: egp.Namespace}, egp)
+ if err == nil {
+ GinkgoWriter.Printf("egressPolicy: %s still exists\ntry again\n", egp.Name)
+ time.Sleep(time.Second / 2)
+ continue
+ }
+ }
+ return nil
+ }
+ }
+}
+
+// WaitEgressPolicyStatusReady waits for the EgressPolicy status.Eip to be allocated after the EgressPolicy is created
+func WaitEgressPolicyStatusReady(ctx context.Context, cli client.Client, egp *egressv1.EgressPolicy, v4Enabled, v6Enabled bool, timeout time.Duration) error {
+ if !v4Enabled && !v6Enabled {
+ return fmt.Errorf("both v4 and v6 are not enabled")
+ }
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ var v4Ok, v6Ok bool
+
+ for {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("timeout to wait egressPolicy status ready")
+ default:
+ err := cli.Get(ctx, types.NamespacedName{Name: egp.Name, Namespace: egp.Namespace}, egp)
+ if err != nil {
+ GinkgoWriter.Printf("failed get egp: %s\nerror is:\n%v\ntry again\n", egp.Name, err)
+ time.Sleep(time.Second / 2)
+ continue
+ }
+ if egp.Spec.EgressIP.UseNodeIP {
+ if v4Enabled && len(egp.Status.Eip.Ipv4) != 0 {
+ v4Ok = true
+ }
+ if v6Enabled && len(egp.Status.Eip.Ipv6) != 0 {
+ v6Ok = true
+ }
+ } else {
+ if len(egp.Status.Eip.Ipv4) == 0 && len(egp.Status.Eip.Ipv6) == 0 {
+ return nil
+ }
+ }
+ if v4Enabled && v6Enabled {
+ if v4Ok && v6Ok {
+ return nil
+ }
+ } else if v4Enabled && v4Ok {
+ return nil
+ } else if v6Enabled && v6Ok {
+ return nil
+ }
+ time.Sleep(time.Second / 2)
+ }
+ }
+}
+
+// CreateEgressPolicyWithEipAllocatorRR creates an egressPolicy and sets Spec.EgressIP.AllocatorPolicy to "rr"
+func CreateEgressPolicyWithEipAllocatorRR(ctx context.Context, cli client.Client, egw *egressv1.EgressGateway, labels map[string]string) (*egressv1.EgressPolicy, error) {
+ return CreateEgressPolicyCustom(ctx, cli,
+ func(egp *egressv1.EgressPolicy) {
+ egp.Spec.EgressGatewayName = egw.Name
+ egp.Spec.EgressIP.AllocatorPolicy = egressv1.EipAllocatorRR
+ // egp.Spec.EgressIP.AllocatorPolicy = egressv1.EipAllocatorDefault
+ egp.Spec.AppliedTo.PodSelector = &metav1.LabelSelector{MatchLabels: labels}
+ })
+}
+
+// CreateEgressPoliciesForPods
+func CreateEgressPoliciesForPods(ctx context.Context, cli client.Client, egw *egressv1.EgressGateway, pods []*corev1.Pod, v4Enabled, v6Enabled bool, timeout time.Duration) (
+ []*egressv1.EgressPolicy, map[*corev1.Pod]*egressv1.EgressPolicy, error) {
+ egps := make([]*egressv1.EgressPolicy, 0)
+ pod2Policy := make(map[*corev1.Pod]*egressv1.EgressPolicy, 0)
+ for _, pod := range pods {
+ egp, err := CreateEgressPolicyWithEipAllocatorRR(ctx, cli, egw, pod.Labels)
+ if err != nil {
+ return nil, nil, err
+ }
+ // wait egressPolicy status updated
+ err = WaitEgressPolicyStatusReady(ctx, cli, egp, v4Enabled, v6Enabled, timeout)
+ if err != nil {
+ return nil, nil, err
+ }
+ // get egp after egressPolicy updated
+ err = cli.Get(ctx, types.NamespacedName{Name: egp.Name, Namespace: egp.Namespace}, egp)
+ if err != nil {
+ return nil, nil, err
+ }
+ egps = append(egps, egp)
+ pod2Policy[pod] = egp
+ }
+
+ return egps, pod2Policy, nil
+}
diff --git a/test/e2e/common/egw.go b/test/e2e/common/egw.go
index 5ec7c85ec..02bcfbaea 100644
--- a/test/e2e/common/egw.go
+++ b/test/e2e/common/egw.go
@@ -8,6 +8,8 @@ import (
"fmt"
"time"
+ . "github.com/onsi/ginkgo/v2"
+
"github.com/go-faker/faker/v4"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -123,3 +125,104 @@ func UpdateEgressGateway(ctx context.Context, cli client.Client, gateway *egress
}
}
}
+
+// CheckEGWSyncedWithEGP check if egw status synced with egp status when egp's allocatorPolicy is "rr"
+func CheckEGWSyncedWithEGP(cli client.Client, egw *egressv1.EgressGateway, checkV4, checkV6 bool, IPNum int) (bool, error) {
+ eipV4s := make(map[string]struct{})
+ eipV6s := make(map[string]struct{})
+ for _, eipStatus := range egw.Status.NodeList {
+ if checkV4 {
+ for _, eip := range eipStatus.Eips {
+ if len(eip.IPv4) != 0 {
+ if _, ok := eipV4s[eip.IPv4]; ok {
+ GinkgoWriter.Printf("ip reallocate, the egw yaml:\n%s\n", GetObjYAML(egw))
+ }
+ eipV4s[eip.IPv4] = struct{}{}
+ }
+ }
+ }
+ if checkV6 {
+ for _, eip := range eipStatus.Eips {
+ if len(eip.IPv6) != 0 {
+ if _, ok := eipV6s[eip.IPv6]; ok {
+ GinkgoWriter.Printf("ip reallocate, the egw yaml:\n%s\n", GetObjYAML(egw))
+ }
+ eipV6s[eip.IPv6] = struct{}{}
+ }
+ }
+ }
+ // check egw status synced with egp status
+ for _, eips := range eipStatus.Eips {
+ for _, policy := range eips.Policies {
+ egp := new(egressv1.EgressPolicy)
+ err := cli.Get(context.TODO(), types.NamespacedName{Name: policy.Name, Namespace: policy.Namespace}, egp)
+ if err != nil {
+ return false, err
+ }
+ if egp.Status.Node != eipStatus.Name {
+
+ GinkgoWriter.Printf("Node is not synced, the egp is: %s, nodeName is: %s, but egw nodeName is: %s\nthe egw yaml:\n%s\n",
+ egp.Name, egp.Status.Node, eipStatus.Name, GetObjYAML(egw))
+ return false, nil
+ }
+ if egp.Status.Eip.Ipv4 != eips.IPv4 {
+ GinkgoWriter.Printf("Eip.Ipv4 is not synced, the egp is: %s, eipV4 is: %s, but egw IPv4 is: %s\nthe egw yaml:\n%s\n",
+ egp.Name, egp.Status.Eip.Ipv4, eips.IPv4, GetObjYAML(egw))
+ return false, nil
+ }
+ if egp.Status.Eip.Ipv6 != eips.IPv6 {
+ GinkgoWriter.Printf("Eip.Ipv6 is not synced, the egp is: %s, eipV6 is: %s, but egw IPv6 is: %s\nthe egw yaml:\n%s\n",
+ egp.Name, egp.Status.Eip.Ipv6, eips.IPv6, GetObjYAML(egw))
+ return false, nil
+ }
+ }
+ }
+ }
+ if checkV4 && checkV6 {
+ if len(eipV4s) != IPNum || len(eipV6s) != IPNum {
+ GinkgoWriter.Printf("failed check ip number, expect num is %v but got eipV4s num: %v, eipV6s num: %v\n", IPNum, len(eipV4s), len(eipV6s))
+ return false, nil
+ }
+ return true, nil
+ }
+ if checkV4 {
+ if len(eipV4s) != IPNum {
+ GinkgoWriter.Printf("failed check ip number, expect num is %v but got eipV4s num: %v\n", IPNum, len(eipV4s))
+ return false, nil
+ }
+ return true, nil
+ }
+ if len(eipV6s) != IPNum {
+ GinkgoWriter.Printf("failed check ip number, expect num is %v but got eipV6s num: %v\n", IPNum, len(eipV6s))
+ return false, nil
+ }
+ return true, nil
+}
+
+// WaitEGWSyncedWithEGP
+func WaitEGWSyncedWithEGP(cli client.Client, egw *egressv1.EgressGateway, checkV4, checkV6 bool, IPNum int, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(context.TODO(), timeout)
+ defer cancel()
+ for {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("timeout wait egressGateway synced with egressPolicy")
+ default:
+ err := cli.Get(ctx, types.NamespacedName{Name: egw.Name}, egw)
+ if err != nil {
+ GinkgoWriter.Printf("failed get egressgateway: %s\nerror is:\n%v\ntry again\n", egw.Name, err)
+ time.Sleep(time.Second / 2)
+ continue
+ }
+ ok, err := CheckEGWSyncedWithEGP(cli, egw, checkV4, checkV6, IPNum)
+ if ok {
+ return nil
+ }
+ if err != nil {
+ GinkgoWriter.Printf("failed check egressgateway synced with egressPolicy\n,error is:\n%v\ntry again\n", err)
+ time.Sleep(time.Second / 2)
+ continue
+ }
+ }
+ }
+}
diff --git a/test/e2e/common/pod.go b/test/e2e/common/pod.go
new file mode 100644
index 000000000..54b15f8fb
--- /dev/null
+++ b/test/e2e/common/pod.go
@@ -0,0 +1,83 @@
+// Copyright 2022 Authors of spidernet-io
+// SPDX-License-Identifier: Apache-2.0
+
+package common
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/go-faker/faker/v4"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func CreatePod(ctx context.Context, cli client.Client, image string) (*corev1.Pod, error) {
+ ctx, cancel := context.WithTimeout(ctx, time.Second*20)
+ defer cancel()
+
+ var terminationGracePeriodSeconds int64 = 0
+
+ name := faker.Word() + "-" + strings.ToLower(faker.FirstName()) + "-" + strings.ToLower(faker.FirstName())
+ label := map[string]string{"app": name}
+ res := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: "default",
+ Labels: label,
+ },
+ Spec: corev1.PodSpec{
+ TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
+ Containers: []corev1.Container{
+ {
+ Name: name,
+ Image: image,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ Command: []string{"/bin/sh", "-c", "sleep infinity"},
+ },
+ },
+ }}
+
+ err := cli.Create(ctx, res)
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ _ = DeleteObj(context.Background(), cli, res)
+ return nil, fmt.Errorf("create Pod time out")
+ default:
+ err := cli.Get(ctx, types.NamespacedName{Namespace: res.Namespace, Name: res.Name}, res)
+ if err != nil {
+ return nil, err
+ }
+
+ if res.Status.Phase == corev1.PodRunning {
+ return res, nil
+ }
+
+ time.Sleep(time.Second / 2)
+ }
+ }
+}
+
+// CreatePods create pods by gaven number "n"
+func CreatePods(ctx context.Context, cli client.Client, img string, n int) []*corev1.Pod {
+ var res []*corev1.Pod
+ for i := 0; i < n; {
+ pod, err := CreatePod(ctx, cli, img)
+ if err != nil {
+ continue
+ }
+ res = append(res, pod)
+ i++
+ }
+ return res
+}
diff --git a/test/e2e/reliability/reliability_ip_test.go b/test/e2e/reliability/reliability_ip_test.go
new file mode 100644
index 000000000..9557f5dd5
--- /dev/null
+++ b/test/e2e/reliability/reliability_ip_test.go
@@ -0,0 +1,125 @@
+// Copyright 2022 Authors of spidernet-io
+// SPDX-License-Identifier: Apache-2.0
+
+package reliability_test
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/go-faker/faker/v4"
+
+ corev1 "k8s.io/api/core/v1"
+
+ egressv1 "github.com/spidernet-io/egressgateway/pkg/k8s/apis/v1beta1"
+ "github.com/spidernet-io/egressgateway/test/e2e/common"
+)
+
+var _ = Describe("IP Allocation", Label("Reliability_IP"), func() {
+ var pods []*corev1.Pod
+ var egw *egressv1.EgressGateway
+ var p2p map[*corev1.Pod]*egressv1.EgressPolicy
+ var egps, newEgps []*egressv1.EgressPolicy
+ var IPNum, extraNum int64
+ var err error
+
+ var ctx context.Context
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ IPNum = 100
+ extraNum = 20
+
+ // create EgressGateway and pods
+ egw, pods, err = common.CreateEgressGatewayAndPodsBeforeEach(ctx, cli, nodeNameList, config.Image, IPNum, 1)
+ Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed create egw or pods: %v\n", err))
+ GinkgoWriter.Printf("succeeded create egw: %s\n", egw.Name)
+
+ DeferCleanup(func() {
+ // delete pods
+ for _, pod := range pods {
+ Expect(common.DeleteObj(ctx, cli, pod)).NotTo(HaveOccurred())
+ }
+ // delete policy if exists
+ Expect(common.WaitEgressPoliciesDeleted(ctx, cli, egps, time.Second*10)).NotTo(HaveOccurred())
+ Expect(common.WaitEgressPoliciesDeleted(ctx, cli, newEgps, time.Second*10)).NotTo(HaveOccurred())
+ // delete EgressGateway
+ Expect(common.DeleteObj(ctx, cli, egw)).NotTo(HaveOccurred())
+ })
+ })
+
+ It("test IP allocation", Label("R00008"), Serial, func() {
+ // create egps
+ By("create egressPolicies by gaven pods")
+ egps, _, err = common.CreateEgressPoliciesForPods(ctx, cli, egw, pods, egressConfig.EnableIPv4, egressConfig.EnableIPv6, time.Second*5)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("create extra egressPolicies")
+ fakeLabels := map[string]string{
+ "app": faker.Word(),
+ }
+ for i := 0; i < int(extraNum); i++ {
+ egp, err := common.CreateEgressPolicyWithEipAllocatorRR(ctx, cli, egw, fakeLabels)
+ Expect(err).NotTo(HaveOccurred())
+ egps = append(egps, egp)
+ }
+
+ // delete egps
+ By("delete all egressPolicies")
+ // todo @bzsuni we do not wait all policies delete here
+ Expect(common.DeleteEgressPolicies(ctx, cli, egps)).NotTo(HaveOccurred())
+
+ creationStart := time.Now()
+ // recreate egps
+ By("create egressPolicies by gaven pods")
+ newEgps, p2p, err = common.CreateEgressPoliciesForPods(ctx, cli, egw, pods, egressConfig.EnableIPv4, egressConfig.EnableIPv6, time.Second*10)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("create another extra egressPolicies")
+ var extraEgps []*egressv1.EgressPolicy
+ for i := 0; i < int(extraNum); i++ {
+ egp, err := common.CreateEgressPolicyWithEipAllocatorRR(ctx, cli, egw, fakeLabels)
+ Expect(err).NotTo(HaveOccurred())
+ extraEgps = append(extraEgps, egp)
+ newEgps = append(newEgps, egp)
+ }
+
+ // check egressgateway status synced with egresspolicy
+ By("check egressgateway status synced with egresspolicies")
+ err := common.WaitEGWSyncedWithEGP(cli, egw, egressConfig.EnableIPv4, egressConfig.EnableIPv6, int(IPNum), time.Second*5)
+ Expect(err).NotTo(HaveOccurred())
+ creationTime := time.Since(creationStart)
+
+ // check eip
+ By("check eip of pods")
+ Expect(common.CheckPodsEgressIP(ctx, config, p2p, egressConfig.EnableIPv4, egressConfig.EnableIPv6, true)).NotTo(HaveOccurred(), "failed check eip")
+
+ // check extra egresspolicies which should not allocate ip
+ for _, egp := range extraEgps {
+ Expect(egp.Status.Eip.Ipv4).To(BeEmpty(), fmt.Sprintf("failed check extra egp:\n%s\n", common.GetObjYAML(egp)))
+ Expect(egp.Status.Eip.Ipv6).To(BeEmpty(), fmt.Sprintf("failed check extra egp:\n%s\n", common.GetObjYAML(egp)))
+ }
+
+ deletionStart := time.Now()
+ // delete all policies
+ By("delete all egressPolicies")
+ Expect(common.WaitEgressPoliciesDeleted(ctx, cli, newEgps, time.Second*10)).NotTo(HaveOccurred())
+
+ // check eip after policies deleted
+ By("check egressgateway status should be empty")
+ err = common.WaitEGWSyncedWithEGP(cli, egw, egressConfig.EnableIPv4, egressConfig.EnableIPv6, 0, time.Second*5)
+ deletionTime := time.Since(deletionStart)
+
+ By("check eip of pods")
+ Expect(common.CheckPodsEgressIP(ctx, config, p2p, egressConfig.EnableIPv4, egressConfig.EnableIPv6, false)).NotTo(HaveOccurred(), "failed check eip")
+ Expect(err).NotTo(HaveOccurred())
+
+ // report the creation time and deletion time
+ GinkgoWriter.Printf("IP allocation takes: %s\n", creationTime)
+ GinkgoWriter.Printf("IP release takes: %s\n", deletionTime)
+ })
+})