From 414c76de20649b07479f1fb47a35395af74b007f Mon Sep 17 00:00:00 2001 From: Hao Liu <44379968+TheRealHaoLiu@users.noreply.github.com> Date: Mon, 5 Feb 2024 15:35:47 -0500 Subject: [PATCH 01/14] Fix K8S log reconnect timestamp tracking (#939) Also reviewed by myself and @shanemcd --- pkg/workceptor/kubernetes.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index 1e412fb94..225ec789a 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -120,10 +120,9 @@ func podRunningAndReady() func(event watch.Event) (bool, error) { return inner } -func (kw *kubeUnit) kubeLoggingConnectionHandler(timestamps bool) (io.ReadCloser, error) { +func (kw *kubeUnit) kubeLoggingConnectionHandler(timestamps bool, sinceTime time.Time) (io.ReadCloser, error) { var logStream io.ReadCloser var err error - var sinceTime time.Time podNamespace := kw.pod.Namespace podName := kw.pod.Name podOptions := &corev1.PodLogOptions{ @@ -172,7 +171,7 @@ func (kw *kubeUnit) kubeLoggingNoReconnect(streamWait *sync.WaitGroup, stdout *S defer streamWait.Done() podNamespace := kw.pod.Namespace podName := kw.pod.Name - logStream, err := kw.kubeLoggingConnectionHandler(false) + logStream, err := kw.kubeLoggingConnectionHandler(false, time.Time{}) if err != nil { return } @@ -228,7 +227,7 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout break } - logStream, err := kw.kubeLoggingConnectionHandler(true) + logStream, err := kw.kubeLoggingConnectionHandler(true, sinceTime) if err != nil { break } From 7eaf7f217b79d7222e7adca5ac6f2ce2ba8849cc Mon Sep 17 00:00:00 2001 From: Hao Liu Date: Tue, 6 Feb 2024 17:22:12 -0500 Subject: [PATCH 02/14] Fix erroneous override of work unit status Co-Authored-By: Seth Foster --- pkg/workceptor/kubernetes.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index 225ec789a..5bdbe035b 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -201,7 +201,8 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout for { if *stdinErr != nil { - break + // fail to send stdin to pod, no need to continue + return } // get pod, with retry @@ -224,12 +225,14 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout kw.GetWorkceptor().nc.GetLogger().Error(errMsg) kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) - break + // fail to get pod, no need to continue + return } logStream, err := kw.kubeLoggingConnectionHandler(true, sinceTime) if err != nil { - break + // fail to get log stream, no need to continue + return } // read from logstream @@ -739,7 +742,8 @@ func (kw *kubeUnit) runWorkUsingLogger() { return } - if kw.GetContext().Err() != context.Canceled { + // only transition from WorkStateRunning to WorkStateSucceeded if WorkStateFailed is set we do not override + if kw.GetContext().Err() != context.Canceled && kw.Status().State == WorkStateRunning { kw.UpdateBasicStatus(WorkStateSucceeded, "Finished", stdout.Size()) } } From cae686228067d25402eecaecebb2ec75fde0679d Mon Sep 17 00:00:00 2001 From: Hao Liu Date: Tue, 6 Feb 2024 14:32:09 -0500 Subject: [PATCH 03/14] Do not set stdout error on EOF retry stdoutErr is use to determine `errDetail` that's used in `kw.UpdateBasicStatus(WorkStateFailed, errDetail, stdout.Size())` in case where we retried 5 time and did not read any new log messages it is not an error it's the expected happy path so we should not set stdoutErr --- pkg/workceptor/kubernetes.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index 5bdbe035b..44bcee497 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -244,7 +244,8 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout kw.GetWorkceptor().nc.GetLogger().Info( "Context was canceled while reading logs for pod %s/%s. Assuming pod has finished", podNamespace, - podName) + podName, + ) return } @@ -263,9 +264,16 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout break } - *stdoutErr = err + kw.GetWorkceptor().nc.GetLogger().Error("Error reading from pod %s/%s: %s", podNamespace, podName, err) + // At this point we exausted all retries, every retry we either failed to read OR we read but did not get newer msg + // If we got a EOF on the last retry we assume that we read everything and we can stop the loop + // we ASSUME this is the happy path. + if err != io.EOF { + *stdoutErr = err + } + return } @@ -490,6 +498,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { if podName == "" { // create new pod if ked.PodName is empty + // TODO: add retry logic to make this more resilient to transient errors if err := kw.createPod(nil); err != nil { if err != ErrPodCompleted { errMsg := fmt.Sprintf("Error creating pod: %s", err) From 5135fb814f5c93e440d619a000968825e979b8d5 Mon Sep 17 00:00:00 2001 From: Thom Carlin Date: Thu, 8 Feb 2024 11:11:37 -0500 Subject: [PATCH 04/14] For RECEPTOR_KUBE_SUPPORT_RECONNECT, Update auto/default to true (#940) --- pkg/workceptor/kubernetes.go | 8 +- pkg/workceptor/kubernetes_test.go | 4 +- tests/functional/cli/cli_test.go | 24 ++- tests/functional/mesh/firewall_test.go | 6 +- tests/functional/mesh/mesh_test.go | 29 ++-- tests/functional/mesh/tls_test.go | 30 ++-- tests/functional/mesh/work_test.go | 211 +++++++++++++++++-------- tests/functional/mesh/work_utils.go | 6 +- 8 files changed, 210 insertions(+), 108 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index 44bcee497..aae1435d1 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -760,8 +760,8 @@ func (kw *kubeUnit) runWorkUsingLogger() { func shouldUseReconnect() bool { // Support for streaming from pod with timestamps using reconnect method is in all current versions // Can override the detection by setting the RECEPTOR_KUBE_SUPPORT_RECONNECT - // accepted values: "enabled", "disabled", "auto" with "disabled" being the default - // all invalid value will assume to be "disabled" + // accepted values: "enabled", "disabled", "auto". The default is "enabled" + // all invalid values will assume to be "disabled" env, ok := os.LookupEnv("RECEPTOR_KUBE_SUPPORT_RECONNECT") if ok { @@ -771,13 +771,13 @@ func shouldUseReconnect() bool { case "disabled": return false case "auto": - // continue + return true default: return false } } - return false + return true } func parseTime(s string) *time.Time { diff --git a/pkg/workceptor/kubernetes_test.go b/pkg/workceptor/kubernetes_test.go index b23ff4b92..23500ede9 100644 --- a/pkg/workceptor/kubernetes_test.go +++ b/pkg/workceptor/kubernetes_test.go @@ -18,7 +18,7 @@ func TestShouldUseReconnect(t *testing.T) { { name: "Positive (undefined) test", envValue: "", - want: false, + want: true, }, { name: "Enabled test", @@ -33,7 +33,7 @@ func TestShouldUseReconnect(t *testing.T) { { name: "Auto test", envValue: "auto", - want: false, + want: true, }, { name: "Default test", diff --git a/tests/functional/cli/cli_test.go b/tests/functional/cli/cli_test.go index c1481c9f5..5763c5795 100644 --- a/tests/functional/cli/cli_test.go +++ b/tests/functional/cli/cli_test.go @@ -59,8 +59,10 @@ func TestListeners(t *testing.T) { defer cmd.Process.Wait() defer cmd.Process.Kill() - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success, err := utils.CheckUntilTimeoutWithErr(ctx, 10*time.Millisecond, func() (bool, error) { + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { @@ -116,8 +118,10 @@ func TestSSLListeners(t *testing.T) { return err == nil } - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success := utils.CheckUntilTimeout(ctx, 10*time.Millisecond, checkFunc) + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success := utils.CheckUntilTimeout(ctx1, 10*time.Millisecond, checkFunc) if !success { t.Fatalf("Timed out while waiting for tls backend to start:\n%s", receptorStdOut.String()) } @@ -190,8 +194,10 @@ func TestCostMap(t *testing.T) { defer cmd.Process.Wait() defer cmd.Process.Kill() - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success, err := utils.CheckUntilTimeoutWithErr(ctx, 10*time.Millisecond, func() (bool, error) { + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { @@ -238,8 +244,10 @@ func TestCosts(t *testing.T) { defer cmd.Process.Wait() defer cmd.Process.Kill() - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success, err := utils.CheckUntilTimeoutWithErr(ctx, 10*time.Millisecond, func() (bool, error) { + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { diff --git a/tests/functional/mesh/firewall_test.go b/tests/functional/mesh/firewall_test.go index f4ad3d392..92c03d5e4 100644 --- a/tests/functional/mesh/firewall_test.go +++ b/tests/functional/mesh/firewall_test.go @@ -55,8 +55,10 @@ func TestFirewall(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 20*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } diff --git a/tests/functional/mesh/mesh_test.go b/tests/functional/mesh/mesh_test.go index d89177152..02c6a9eaa 100644 --- a/tests/functional/mesh/mesh_test.go +++ b/tests/functional/mesh/mesh_test.go @@ -46,8 +46,10 @@ func TestMeshStartup(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -109,8 +111,10 @@ func TestTraceroute(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -212,8 +216,9 @@ func TestMeshShutdown(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) @@ -288,8 +293,10 @@ func TestCosts(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -351,9 +358,11 @@ func TestDuplicateNodes(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 1*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { return strings.Contains(m.LogWriter.String(), "connected using a node ID we are already connected to") }) { t.Fatal("duplicate nodes were not expected to exist together") diff --git a/tests/functional/mesh/tls_test.go b/tests/functional/mesh/tls_test.go index 96c12c85f..11ddc4450 100644 --- a/tests/functional/mesh/tls_test.go +++ b/tests/functional/mesh/tls_test.go @@ -103,8 +103,10 @@ func TestTCPSSLConnections(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 20*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -187,9 +189,11 @@ func TestTCPSSLClientAuthFailNoKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") @@ -265,9 +269,11 @@ func TestTCPSSLClientAuthFailBadKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") @@ -330,9 +336,11 @@ func TestTCPSSLServerAuthFailNoKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { return strings.Contains(m.LogWriter.String(), "first record does not look like a TLS handshake") }) { t.Fatal("Expected connection to fail but it succeeded") @@ -402,9 +410,11 @@ func TestTCPSSLServerAuthFailBadKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") diff --git a/tests/functional/mesh/work_test.go b/tests/functional/mesh/work_test.go index e157f9e8d..cb86d9054 100644 --- a/tests/functional/mesh/work_test.go +++ b/tests/functional/mesh/work_test.go @@ -33,8 +33,10 @@ func TestWorkSubmitWithTLSClient(t *testing.T) { if err != nil { t.Fatal(err, m.DataDir) } - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.DataDir) } @@ -66,8 +68,10 @@ func TestWorkSubmitWithIncorrectTLSClient(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkFailed(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkFailed(ctx1, unitID) if err != nil { t.Fatal(err) @@ -79,15 +83,19 @@ func TestWorkSubmitWithIncorrectTLSClient(t *testing.T) { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkReleased(ctx2, unitID) if err != nil { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertFilesReleased(ctx, nodes["node1"].GetDataDir(), "node1", unitID) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = assertFilesReleased(ctx3, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err) @@ -119,8 +127,10 @@ func TestStartRemoteWorkWithTTL(t *testing.T) { if err != nil { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) - err = controllers["node1"].AssertWorkTimedOut(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkTimedOut(ctx1, unitID) if err != nil { t.Fatal(err) } @@ -128,13 +138,17 @@ func TestStartRemoteWorkWithTTL(t *testing.T) { if err != nil { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 5*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkReleased(ctx2, unitID) if err != nil { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 5*time.Second) - err = assertFilesReleased(ctx, nodes["node1"].GetDataDir(), "node1", unitID) + + ctx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel3() + err = assertFilesReleased(ctx3, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err) } @@ -162,8 +176,10 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkRunning(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -171,8 +187,11 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkCancelled(ctx, unitID) + + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkCancelled(ctx2, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -189,8 +208,10 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + err = m.WaitForReady(ctx3) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -206,18 +227,26 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + ctx4, cancel4 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel4() + + err = controllers["node1"].AssertWorkReleased(ctx4, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertFilesReleased(ctx, nodes["node1"].GetDataDir(), "node1", unitID) + + ctx5, cancel5 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel5() + + err = assertFilesReleased(ctx5, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertFilesReleased(ctx, nodes["node3"].GetDataDir(), "node3", remoteUnitID) + + ctx6, cancel6 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel6() + + err = assertFilesReleased(ctx6, nodes["node3"].GetDataDir(), "node3", remoteUnitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -242,8 +271,10 @@ func TestWorkSubmitWhileRemoteNodeIsDown(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkPending(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkPending(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -256,15 +287,19 @@ func TestWorkSubmitWhileRemoteNodeIsDown(t *testing.T) { } // Wait for node3 to join the mesh again - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = controllers["node1"].AssertWorkSucceeded(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -293,13 +328,17 @@ func TestWorkStreamingResumesWhenRelayNodeRestarts(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkRunning(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertStdoutFizeSize(ctx, nodes["node1"].GetDataDir(), "node1", unitID, 1) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = assertStdoutFizeSize(ctx2, nodes["node1"].GetDataDir(), "node1", unitID, 1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -310,19 +349,25 @@ func TestWorkStreamingResumesWhenRelayNodeRestarts(t *testing.T) { nodes["node2"].Shutdown() nodes["node2"].Start() // Wait for node2 to join the mesh again - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = m.WaitForReady(ctx3) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx4, cancel4 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel4() + + err = controllers["node1"].AssertWorkSucceeded(ctx4, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertStdoutFizeSize(ctx, nodes["node1"].GetDataDir(), "node1", unitID, 10) + ctx5, cancel5 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel5() + + err = assertStdoutFizeSize(ctx5, nodes["node1"].GetDataDir(), "node1", unitID, 10) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -349,8 +394,9 @@ func TestResultsOnRestartedNode(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkRunning(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -361,14 +407,18 @@ func TestResultsOnRestartedNode(t *testing.T) { t.Fatal(err, m.GetDataDir()) } // Wait for node3 to join the mesh again - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = controllers["node1"].AssertWorkSucceeded(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -399,8 +449,10 @@ func TestWorkSubmitAndReleaseToNonexistentNode(t *testing.T) { } // wait for 10 seconds, and check if the work is in pending state - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkPending(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkPending(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -410,8 +462,11 @@ func TestWorkSubmitAndReleaseToNonexistentNode(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -429,8 +484,11 @@ func TestWorkSubmitAndReleaseToNonexistentNode(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = controllers["node1"].AssertWorkReleased(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -454,8 +512,9 @@ func TestRuntimeParams(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -476,7 +535,7 @@ func TestRuntimeParams(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -506,8 +565,10 @@ func TestKubeRuntimeParams(t *testing.T) { m.Start(t.Name()) - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err := m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err := m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -556,7 +617,7 @@ func TestKubeRuntimeParams(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -585,8 +646,10 @@ func TestRuntimeParamsNotAllowed(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -627,8 +690,10 @@ func TestKubeContainerFailure(t *testing.T) { m.Start(t.Name()) - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err := m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err := m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -644,8 +709,10 @@ func TestKubeContainerFailure(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkFailed(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkFailed(ctx2, unitID) if err != nil { t.Fatal("Expected work to fail but it succeeded") } @@ -720,8 +787,10 @@ func TestSignedWorkVerification(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -738,8 +807,10 @@ func TestSignedWorkVerification(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkSucceeded(ctx2, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } diff --git a/tests/functional/mesh/work_utils.go b/tests/functional/mesh/work_utils.go index 9acd5ec0d..ae1c9fe0a 100644 --- a/tests/functional/mesh/work_utils.go +++ b/tests/functional/mesh/work_utils.go @@ -21,8 +21,10 @@ func workSetup(workPluginName workPlugin, t *testing.T) (map[string]*ReceptorCon t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.DataDir) } From c24526b5d3c76f871e3d052ac8f681258f7e8f56 Mon Sep 17 00:00:00 2001 From: Thom Carlin Date: Thu, 8 Feb 2024 11:33:41 -0500 Subject: [PATCH 05/14] Add Getting Started Guide (#925) --- .../creating_a_basic_network.rst | 104 ++++++++++++++++++ docs/source/getting_started_guide/index.rst | 28 +++++ .../installing_receptor.rst | 21 ++++ .../getting_started_guide/introduction.rst | 16 +++ docs/source/getting_started_guide/mesh.png | Bin 0 -> 23784 bytes .../trying_sample_commands.rst | 53 +++++++++ docs/source/index.rst | 1 + docs/source/user_guide/basic_usage.rst | 2 + docs/source/user_guide/connecting_nodes.rst | 1 + docs/source/user_guide/index.rst | 33 ++++-- .../user_guide/interacting_with_nodes.rst | 2 + 11 files changed, 249 insertions(+), 12 deletions(-) create mode 100644 docs/source/getting_started_guide/creating_a_basic_network.rst create mode 100644 docs/source/getting_started_guide/index.rst create mode 100644 docs/source/getting_started_guide/installing_receptor.rst create mode 100644 docs/source/getting_started_guide/introduction.rst create mode 100644 docs/source/getting_started_guide/mesh.png create mode 100644 docs/source/getting_started_guide/trying_sample_commands.rst diff --git a/docs/source/getting_started_guide/creating_a_basic_network.rst b/docs/source/getting_started_guide/creating_a_basic_network.rst new file mode 100644 index 000000000..98358ac7a --- /dev/null +++ b/docs/source/getting_started_guide/creating_a_basic_network.rst @@ -0,0 +1,104 @@ + +.. _creating_a_basic_network: + +############################### +Creating a basic 3-node network +############################### + +In this section, we will create a three-node network. +The three nodes are: foo, bar, and mal. + +`foo -> bar <- mal` + +foo and mal are directly connected to bar with TCP connections. + +foo can reach mal by sending network packets through bar. + +*********************** +Receptor configurations +*********************** + +1. Create three configuration files, one for each node. + + **foo.yml** + +.. code-block:: yaml + + --- + - node: + id: foo + + - control-service: + service: control + filename: /tmp/foo.sock + + - tcp-peer: + address: localhost:2222 + redial: true + + - log-level: debug + ... + +**bar.yml** + +.. code-block:: yaml + + --- + - node: + id: bar + + - control-service: + service: control + filename: /tmp/bar.sock + + - tcp-listener: + port: 2222 + + - log-level: debug + ... + + **mal.yml** + +.. code-block:: yaml + + --- + - node: + id: mal + + - control-service: + service: control + filename: /tmp/mal.sock + + - tcp-peer: + address: localhost:2222 + redial: true + + - log-level: debug + + - work-command: + workType: echo + command: bash + params: "-c \"while read -r line; do echo $line; sleep 1; done\"" + allowruntimeparams: true + ... + +2. Run the services in separate terminals. + +.. code-block:: bash + + ./receptor --config foo.yml + +.. code-block:: bash + + ./receptor --config bar.yml + +.. code-block:: bash + + ./receptor --config mal.yml + +.. seealso:: + + :ref:`configuring_receptor_with_a_config_file` + Configuring Receptor with a configuration file + :ref:`connecting_nodes` + Detail on connecting receptor nodes diff --git a/docs/source/getting_started_guide/index.rst b/docs/source/getting_started_guide/index.rst new file mode 100644 index 000000000..30017aa72 --- /dev/null +++ b/docs/source/getting_started_guide/index.rst @@ -0,0 +1,28 @@ +############################# +Getting started with Receptor +############################# + +Receptor is an overlay network intended to ease the distribution of work across +a large and dispersed collection of workers. Receptor nodes establish peer-to- +peer connections with each other via existing networks. Once connected, the re- +ceptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to +applications, as well as robust unit-of-work handling with resiliency against +transient network failures. + +.. image:: mesh.png + +.. toctree:: + :maxdepth: 1 + :caption: Contents: + + introduction + installing_receptor + creating_a_basic_network + trying_sample_commands + +.. seealso:: + + :ref:`interacting_with_nodes` + Further examples of working with nodes + :ref:`connecting_nodes` + Detail on connecting receptor nodes diff --git a/docs/source/getting_started_guide/installing_receptor.rst b/docs/source/getting_started_guide/installing_receptor.rst new file mode 100644 index 000000000..86b18bc39 --- /dev/null +++ b/docs/source/getting_started_guide/installing_receptor.rst @@ -0,0 +1,21 @@ + +.. _installing_receptor: + +################### +Installing Receptor +################### + +1. `Download receptor `_ +2. Install receptor (per installation guide below) +3. Install receptorctl + +.. code-block:: bash + + pip install receptorctl + +.. seealso:: + + :ref:`installing` + Detailed installation instructions + :ref:`using_receptor_containers` + Using receptor in containers diff --git a/docs/source/getting_started_guide/introduction.rst b/docs/source/getting_started_guide/introduction.rst new file mode 100644 index 000000000..3a4d5dd5f --- /dev/null +++ b/docs/source/getting_started_guide/introduction.rst @@ -0,0 +1,16 @@ +######################## +Introduction to receptor +######################## + +Receptor is an overlay network. +It eases the work distribution across a large and dispersed collection +of workers + +Receptor nodes establish peer-to-peer connections with each other through +existing networks + +Once connected, the receptor mesh provides: + +* Datagram (UDP-like) and stream (TCP-like) capabilities to applications +* Robust unit-of-work handling +* Resiliency against transient network failures diff --git a/docs/source/getting_started_guide/mesh.png b/docs/source/getting_started_guide/mesh.png new file mode 100644 index 0000000000000000000000000000000000000000..9de9f0e3ecf70af277a63e395d34cf2d51b8f20c GIT binary patch literal 23784 zcmeFZWmMH)*ELM1NP|ds3J6HIlyryENOyM}x=WBwrInCwQ0Y=SrMp48-hKSXeO>qa z>G|?}dd4_1@C?r0`?vSnYpprgTqi^^*{Nnk&gh&f_oR!MHN{>crCxXK728KNiSLn~?SI8h zHzEiw3jRin`0_Z`A?|$yZE#!&d?AVQrPTH@Q#+HQ)|FZnQH*@h%d_7_#Js;cUUv_o z#i3xDo#kr^#5G0mq`lFNpJj7~i9)FkKEf^P<>yC~D%hZ@-e@!I;!)BUOuxmDO*i`6+ps+eK*Zepj7HXh$vby58J9U)BBzBX7&!2zUVUfz5>= zBo;c;O^*A4$j!mpUM3BFA0OkQSGFKMz4wkl^*bH!$=7OZmButw6mb9bQWk}i!>hEx zfA-&RQ^3Ur;9)8tU}M4l#|sjO8^5CB@V!(NLE^t&kzpGFDB=J46758lIcDHB*7#rZ zQFOg@{;!WBNaQXdeDuI0sbc!CxnQy5SMvD&*Qb>o;8X?CVzu%Aooh|jurm;`ks$9D zJeI4xXrweLW1E|jrF%D}k0V7J!Qxfys7vqvUZ*G#ZmNKLxsLVc+CeP?U(-o#lNgSJ zk|sL1b!kyyvUQ%qorHhJdj{!_A$flAjDotVaZ(;1NR3rVJLZJY>MZ%0>m?#f?5?tF4nky}jDZ*%*~$dl5$J zrkzm7?psa3rf*}{c4ZcX8YBTVgUGawow5qMjlndvK?U_W@QyKx%<+#JEQlc16E2H* zzQdN&LHqW*_hF+uh2e8{%|lp)iQ&(2ppU{}1}Dc-75wwS=6PKFEFIi2umlUUa8WXp zTd*8Yh61}NhA`prXJQkqa8$|}2=`z$v!4`Q!HU-)V3C5N>e&CD+h4y-;1;uqQYqUZ zl(*k|osKDqq?yDCO@68wl{H)IP1xw8&*2iezx<$MTRSmm*Sx<`{=(u1Mcu9#R_d#F zbZN-sK0gN2Ukog@SN-zZO}o2TTqJSor7YBM^)hM$4>5TqDfn=EdQhz-ayS0_aB;C! z1pHUw?)ss*i-1Mj@SycVU+a9zz~44paU)|x_cInXTzrs@bF<}J3VP3U@Cmcoa zjH4*aEq&Fqk2dAW2Cg4xWib57N)%gDzfFUsf)M5;tA+RS-OTjo@}{P_XEh@N}>B;#ZX?!w0z( zR*0T_*pd04>8|l{Fg?dhXwD8SG6{7^r4j6mlJlt^_7I4kQ7@w%( z$XfPcs8zPBwQ9>uotA~K>vG!Nr-XmwJAfYh`*n> z_b-oCjFdLZvBFN@i-nZB6b zeNGjUeJTsuHB}xx<+e$Ue*jP5?5ZQlY1*h+^{6|VNKNzN13xuf z2vshvd|q#OBUO$@M4;_BGkG`U( zt%;Xx@#WYrXKVdE+Sx^DK+*zE3gXK|I;|I`>{zMP2TxFl5{VWi-;BF*GNXrbA;MOH zH(hiN8>L&$V?8og4`*gN6xi6Q^2z`zZ&ZmZzNFy?W{WYCg z;tBJ}?aJn`a=zz%egUH|z4z754Z%Y{x-oZm*7bFh?x{`r(px?G+B(Gd5?ollbWGt( ziU9hlDd69h!1x$8kM26imeyxYa_*b0=j_{k138xQyt=*}@HQUr&QzN_tR+YzkqWrEY^O@E>G@r)2j#sC zLyCqhgrL$XWs}%c4{;wnHT8M8yRe*LfS{=JgZ)nyh97@WrJ?BhfG4eBTmBxL#HC6) zx9w~3*V(+}UR!x~kREYm9`u06cTVwvVG5I4o(%b_%$r&{BHI) z%U@WI(o~wg(3q`0Y`baN6*y=x@vzPD1v`%B2mBb5sgyhBFVD@aqb7smo31)75+68g zP5c_GiT-4T=KQaZQFFer=`^|R@03?rpDwsBYBb(Yo5Y2ndlHQ6x2%ZiyOHIi?=)nE zt_51plK20zD*LRP!M)%zM$vS$w%c$%YyEPas>$0(q3mm0G%-)PtijCjNq-ODX68GG z1!l56E}gORqx8ewPGs`EeyI`#f}|&ST;(68UjYYRN)N&`rv!p*95fIRbJp@=z*cw| zKE|aNtldD4N~xX0e0NlnJ;Gk~iZ(^hO{Z8wf^fJ557SM;!%DG{XTe3yrfSlF%FxJv zBh4I%`%4n%^6`GPA54{JF|jk#XqMMbiBbBNcow2drZ#!J!_>FK`FPgk708#RIp@_F zp2D})Q~KVGE_>G=5lm#21DaN|ITo#b{YCch0;<1~_D=@bFf&3=WJI=R+5*L{lj zb4&0jQo6Kd&nG^P6yGg9F+`7jcL29?mkKzV;%t!13}In22*E{KnYK65tiS3=pZe}L zcugFf^Ck!3f(-dYOW(7rwSGgUTi7?&x7K!UvY_)PG4ipCu|h&LrrKPi^XBy0<~fr+b?{-9Bp(wjt}PJVrfBMf{Y5Dhe@x!8=n-a8*AO-fyyIwH z5ObW`TbY^8de5}YJ!|oy##JMQBI920V&1u&!DFR^oF|+2#v9pp1Z&fiWD}d@cx)($ z-0!k;2_}c7EwB|Ar+pLG(w2gDO))Y4F7V^U&xe(~B8%qFmm zOCo&YtrC9Peekv6{`y=)3)KeeMEfBw>DA|vpk}k2_9p~ulSR~&pBExz=Tg*BzwF1E zhf}HOW%&ALQ(_n1QpQW1pZshswdVD-#R`hPmVL`cm78aQ^E{?ltK|tU9y4CE(E0R- zSs8;Dp~cbvthxt8`tf6!;_I*2m9bq!gk(aFnUWt}?-eq5o@(-F7-yGKPxdWLZ)sQ# z&_L|bm3GzkXC=_TC=P1$+m08j^fy`X*eDB5pUKb%CrBz!mwMxnn}(hwum~sFG+nQh zo6nJt{NO(;!bp?z6%Mv_Ayta`bP4;|pd4bWN!M%Ro zDayNQ;DzIggvp(yRiLaVxChWzt_3uaoy}AEX7x_gCC|bad!7jQ3d?{M{q|{ku-JyAX83rjKcRNSBf#X3`9*ld$}C=mtK=l9zm&s%vV{?L)CA zPGeaul-TSHfQ6sGqPZf#6WR4&)1Nf&jye8@DY7>lbG27Jl3DW9-0!IUPnHFi(jO7v zBiL`rHIecZw`07EJZaNcXa#LmR4`j=PWcNGXSNH@i{zEc240Z##dM=)5@`Gjx5>d>EQjZ#~7TM<~ywz-8|wMXWdAThq@}_EBAN zRP#NRSl!+6Bccjk;kKu@%twbukrv+wM>b3@O$MtbB;W9n3jG zod&)WLuq66duUY5-EAauxkJueu{NqOQ_1xbX?EW?JLSyj`1bD5pROsq%R;!EH8#jR z8TStSLL0U+OrZ>B57$%egO3IFuOB{^ESjKI|%g*_^K?er&GwM>Yf=Idnai zTtqkz6Sxv3?Ts*Uzpck`zr!f5m7B+3d86FyhTLDXSCLFxxJ!PuR+$Wr7}9jw#ze*& z)gK-YFz1r^ticagR@1l}?fDze`!U*DfJnJ*>)GVNQa8iPviM+R-dPF+RK1gB7qt+v zbMsyYGBo-$>5$4U5*L})KJ!m+2Nbp?b8%1{D*i?d2eVMrzzq-gLPL-tos)I%oqzqM z$#LTVYcm2!7coEesW%vII*1Ugt5g*}Uv@n?`Eq{Eii-XV9#fFLWf<0vMU-(a=dyLd zZH|_KaP2s3NQD4-((wlc`Y6CeVWu%8d>nbCKSTJ7%-^M)3iIj+c~w%Ghti-&5=OZ! z`rTi1vMaylh0Gaz5MmH|@Ci z2uv9uD)y84pS5v8UBi&0T3H#T;fS(>Qq_!JIPR{^UioRHK*-$eU6|6h4^a*T!CHE* z8heb>HagEOQNOV2dlObocue7JDr)hFr&&x;ElQ6HBFdL)lLg$5@o1@2qhX9E31@dPAu;6a7-cVwUL)zbTP?3cyxP*ckOk(AiPYyNs_zG4 z`vC;N&iyUXljOsYk>h{gcwmo>1$hKOTrW&wVz_7mcCobgiCudEoMi4JlqJ#VZ)}Rc z&Mh0A`4tIqQi4v8Fsqtzs<1!ptt?DuA*LWLVv5&!Ek>&MHc0Y)92S1&zN~i)-w6>I z#)Ro{_ff|3w|=8WX)@HUd-JBoBB2iRvScYqA(&pj?~JBCjKflil7{>)PtRd<{?Q`a z6SJqk2o11q?T>yMl3{uAEC}`nBW61KnCm)P+0T42m0I@ZWPD>O0j%(7;Uld)ZFJ4G zwj%H{Nn;kJZ1_qPc6s{!%Ke{33IqwPno;nGL5|nOO=T5_@{vBbSaX|*G30;o7O~*N zYbeSylK;(8i~{B(Av;0!U*Z9e2y%aSL#PuqP6Bqz1z~M9AvV4|N)#2l@@LgowFX;+ zG8`*UAGLwcBD5kDI{;#%!Ul3qS59|JJz_~DC?NtqF%1h0ib`AXHy8qVmsQ_^?g+n;PzEjDmmA*)JN?%&nk=ZLDSGNZExbw->UiP&QhQZW_ur4 zvNrF1nLZb~-ui&lo+g{LbA1nhaoV%G>96x3LrxB-JM?|OlHmT4N%g^52Dh%4iwUSk?Fo?Oi#tIcUhrl!pzFaE1lb^Fjzgm|Ryyw_0Z!X{d)!OU{R{ZsC zW0Df(w@?gcm5yQI8yn~E*wU%EioBgZcV|=O!ypG|-Q^}!3tjZP8JhPw9rCms?M>EriAZVMxVCG+J^bOJXyuonObDm>FjuSh z1$VXiNcmXyc!j~@-j@N^La`vE7gjysqxNmrJIqr#Z|oYoDpR3(`5>;6;Bx;!!7U&j zzCC)%)vN2!iO71(F8iorJ;mTx^3aWstZhyO6fqa73x2VUGKa^!JQ9gq*7x2wMZzS@ zRFcQnd5;$tN#f@E`tPlh&FOfFDf9>GJAZNEg3*J)7X1rIGbei>-TaPdAgqa!o-i*LWPWUsCmqu zJFub(kP_Q9b;iN)xNm(VjkAZuV+mrGtfBdwxx68tQ%LSi!f$W7 z$_$`?F1;ofM_W<86&NXk$(luGxsOCbUNzRpEOCM-GDR?yQG{%6&9u9ps$+HR8rKKS zGHl;3nN$Pxn!#MrGJ4Fvl^di>HY;({s@}6Uz}C)nvsWF*d66G$63f?p3-W$e=KiVc z-HJmuG5~edcqY9MTkF$A{M(=;of;YnS3qKiPq?+5>$)^@cl9#oWe4i^SW%qGH*Fa! zMZgUX%`g`IbKgZtu{#8d{U7c(+HMXwFWD+lx{|^#TCBc^O8oTLtLk~A^uv58-Sn5= zjct8u&n+Wyo*%)Y?OfdpfbC=6u;(}r^O*r&VFu5HwP}m0{@v-k^N2>DJD^umbNsIR ze1JvS6$K-nM$l(Jd2^q{s5Wo+NEXTIq>nx$TgaQgz~cJ%_A+HOIsps2lF(U_Cooge zAoFa`?nflNKh}v}SS6i08|Eps3VHo@@YMyLviUK!n^ovz(w$K%5a}D#=;C)$9X?x6 zm7CR0XmWx%OUiQErAb|r2BO4&@7FJ;gP4%Yv*%q7owm2X~5)L-D+}__DZY0XFJ};|i zYx1sIJ?W*6;|+6Z*aIFW51wBD$2`mRy>-Tg$_(J|r|{g-FYQcuU%+Yyn6Xj`U)b_*r`ylk_f^pHMw>>$*@K73&+KpMJO_;qdm z%%aV#QhVhp52XvI+AN5lfBxDskP&Bej@R1udsL1XuRtBg7QJ7k>13>GncviKg|37T zHtqF9W|mXrKP>w=Hte8nzS8dL&qwx!_|JAh(jl;#ut?s2q-aGPKm^`sDgG4<(?+ylVLF7SC#Uy6Lh$sRX%`i0v3 z^itKss2*0@8DSCWR$SR_bRy*$t{eUItMEE(X&1b_t;h1*d-;~98p5`p#;X;r`i1#& z@Zx+D)cO7f78Gy*P)N2K;BA(vlH?sW?XuQKiC*yrOMhGaavE^@;wu%Y`EOn$dZZpF zJowCSV5>X{7b1#vdNFwBmv$o1urpi4A@;p_VPsZ-nlk}*Rmaqf&*C=|(Xkhi1$)a* zr;z@ZfK@j&m*MKT%zSAC(zR1;)37Y%ke)C6p^y=l1_)~Ms;G;?g3>72ype`Xld}I=bfX?qx;c7LsI@%Vi zy0IgO`_#Gz4UVgs?|AxET(m-4cwge?mGb8ayKzqI895GFE(F7GED5dwjKyBjdY(vvo^CPAC>5i_Um{skjZq9BuUtRJgq1KdJLq1nkH3Zi?b4^uv zeHkY1qzV*YHu;B`1llK(XkQ^@`<(Q3goevq*6206J#lJE!KfhAAKQG8q?d3|L+SuT z?o9i@Drp&rv*m!Kr5vSZ^xApo#X`Ag9-=erv@=%0A93d~QzfaR@5_Yo<4aA_14Oqf zKS$4djsiEq6BrfAa8X*EC1rY>6wZhw)d-*Q&3?Mvn-52;aU%iI+-M^@FTzEIQ7w9? zF%%S+)&^`l0$`0~f1RcY6C%?|77QW#Q#;63)>cpY<{rZH&@$}{%SU6kB#r3`L`3u#uY@WVQJ#2_9ov31eC8>g?MFixZTgZls zTET(U3jDoVytt1C3-Ktgwp6pFyQwa1Az?P6w zfm1Et2x#-)iEP=yhonPi_>b)6BMm?==_LQLamwn<3sh9`=>hq#Ex`_30jjg$FVrxB z8(*S0kk4amVLAe+CGN@bk45JX0>c~J~1+&DVT~UM?QZY}{0Q*vK&ZMCL z_K6R=gTp8q0VwLMu#)>F^Ho|SW4J%=60%hk)NYTyVVX^7XnnSHTgL^2F*PWbCU!8F zYLE^|XuQFJu9YVARm>-9u&Wy0OCncmxMLq*y_+plegQ2-hCt2HaD38JeWKfu^XFjX2Ax&6)s}y> z0PLeXkHZwSlbl376c6AQYiN~Tf`F)+(BTzr=Sw`H!$J`Abgv9(9lKKzZID#0IfzTg3VE$rZ2)EosR7XcqvS_~zg@;ZpK64fKeO2SzoLS_ruu(_|DSCOtD1}<= zOq7{}yYP;gew`5Vh2{uaq9+sbN}bSrGsFV&=?%bhvJLo^s!_#bK-4d~0Jh%sQ`;EI zNKtS!n#HD;YPbbhv{c2<6bE`}s`Sc}k_KIbbec@ZuVSy10q- zDL_c-ul&34wRp`tp+H&?;O2wAW2b><<^a=cj>A`UYj8sV%lc#JO$9|oe#oQ z=^BQCG&=R2lJJb-FfEkxf;iDCV;5?Ig5kSHP|qC#CvV^6vTbZOm}=A=M}CMG0?<2P zXl8>~1zfh2omV30pVp1*uXmhV+9T@pqx!LRCyqY zw+&79ksduUB=g#$gra&r+Ft{~sJMgBvTjlu6wN~Q2uSbng*Wvx_+3jI0jm;)A2CY{ zcK}gyy^!Rfx)o}2-8IYh+F`5)el;D`zrL{mZH3w7C!GyYcX4Y|(_QroM~mzM|FLVk zJ&~&h0*Umi)6EPh0dUs_ElNXai7^Xk3L@%>Q)!S%6tV9swH?Wru&{t)QGX|f0!*l& z(h`U-(Ee13XF^mjuMXQE$h1Q`eeDQeq3RLw*k#`M6eMe+?R{lxF~i5dX!L!JuN_j5 z+LWX5M%K}%ddCogo5Cb=^KoLxpgAZ(v{$OZDTmzm_s`Bn{|8e28^9vKHQ#KO&lFWJ zZ?4r-dM<2IOUKGH?4FK@U{2~fE4U9GJSFg;K~wW7iM2Pfcxzx)axCX+a5)0D+8lWq zXcJsc!B$K{7CL2MIL!v1xDPQ=FK>Pe!*tJ?$?E7hC`WLsrvYob>Nwy>0$LymcK3t% zJ<2JgSXJUheu;uo$=nrG_280ZJ;5~Fx*58063;K*fM{xXn7OJ%3-h)o)Ml+=V3RM) zH^#@W9_HUo1u?&CFq4n7<*h%>y!XvZBPb0SEO;%AAe#`Nfj0>3Z+lRrym8}c{mmxg zJLW;zcu<@FwjW5b7hsqUsFXKpfNi47n@eReRt=1P`l1vsp?HW2OD@10amG06StgOa z`eWfsLr&BFB!p8*e^QYSqi>;&-h=4PRC}z(iBIlgu)Y3>o9N;2^E@1c#lF~l>7(mD z(C4kjs_Vkub3VS~-K+dI|G@H5`0lsb>2VKva|#c$*p&?Ci|Y?wLA73Kbk~bdM?^OvNxL?{&5I(8Vi%)y zQBR~j)n7h}qM~}8=dy$;jz{+JyYD3luY)0fb`vWctx^Z`j(%GBZoC$6i>FbOps zffttY$)7=X1rAV0Xn!LxB)AjSFnA7?MR9%g1)vhUkaM{>f2Z4 z<-?a&F%_atkAK+UK%6K$u1F=US6hhlYiZGkHv#p7(%gwe+%S--7lehutjPNSpJx++ zl7tRRtT;tVw(CCf;m&HV?z2Gf`(FEdO0CA2iBXc5RG=fyGKe*<-n=Src&>iAmS_udjOVv{?+Q9&mIUK!@9ww7>b&7 z@(i5>EtPUIuZNpE^WG!QbQ9a#0Jkf^31k2%DP2VCAkyigIA9*-MlQlp>0oPSdBdtn z`0*&uhCL$6H$+?KSlR&)Npu*Y|MfA24Sh2aiw8=~fN>-_s_<$AEAl-y>lC_?dydBn zsx#31WPoti975Pkf=egwpdzR3uEs_(9!mX_Jag{)92V+)gY1-*IpZ;9%Uee^>WQXD;ctpp&7|NDZx zrUh?nI+k(#$D066$phfRzM%H`j0XG-_ZXzH$+(^!)~ffi!EbK zmKhFTQuIPv5~H3!gBIXK9k{ZkHF%^)>~}X872*MW;~fsI89j|nm-CiF5$<0XlUD!d!sEJ{%%nj1SCaUNuD;`%jhwT{tP~t8E0f=4qg|&g34k?5|cwKL28~TXI zc$jaK#2FNI2y&6|83C4-0k&tnRo;3&3-siCyXtOzIG4~=5L7mHXGaX~Q^19}jq3nf zZQ8C61at@ja;dKuKssHkoKXslwU-be$mcXuudSb5Y=ay(i*1-m8lgWBrkUgyOI-Wx z8AFtj2K|Xl2lJ{zI#-`b2@_MSXpXS&AP|$%^MYm4O5|qi3HCOKn035|YJn|&QgON; z=vD*uP129}(h@4=CvYsq=|CaLL^d8+y=k7~YX+4erpjuRq9kEKJWn^$0KvgWj^HOk z6GC6PmME7_?tiP&J{BVV`Kbg28$!)$0vvS$XFL!sbF<}Ygrg+!L$>U~4x`+JWPQ~q z?tO4BLGr2VIJ@6VKe00~3o@HQXbIOFzmyJac_x(9gN)pGZlOObx+)kn6{pylOPHs< z@=EHXC?n>>1(XJlE|IY4t6{{_fEonuX+wBIBPBo|C_jb5=qL2#&rE<&tKfN}RRIcK zyy(1LScPIU86rYOzFcI-kGz+9-k>Jfn`q_E$m2HjV#=jZ^H0f|f`U8_Z&jk@C|OH? zhDE!wqvhfLCKSsL2fJ7T&asWOk7R9EC4%#%Z!C_-=@5^?&H27U5E91V?bYcJj@(wG zzU?5^@NK}0Ki1HtB)S;Q*d5Ns(Z2f}po$gpSGCFrXc81nbDf`M>Z#WQm*|GRQ8SzhRe<%fcOK%8Jepqeo11a~x4yYb4KrF$s zxZr-(!y|rxW;+$__Z*;J?efIq%fN%ruNRIupEUzyMvsui88tmv$K*Zbk~=ye@I%jgNLKM4WpCa8 z9#dM*gJe3tY_JsGJK2;ppJP~K_d+uzJlR=gJb!_y|TgqdIX9(2L$MR2BXPE ze(lHRIDAD-ju!WYawdQADq=M&k<94;&^D@T={klGJvWwkT@09+b(w%~T=t-!VCn_A z2js!E*NiW=?m7t-!l+i5>UVeD_;E_tIYtM_cRBkCxuDsQauqPJx9q>O1l+j^bsZpb zGe9oT^=Kp@Eis840xi2U5<%<2jo?Nv7~F3i-EIX1@>}~JtZ+h)DtUwZvRKQv}g2ZFBMKWP}hDAFijC3h`0(D z2MgAk;ZA`13l|LmtsV(o0ZQPS+ZK$ZzmThI(Q!~49i_)w*x#D1E!)NNuiaI-HYp=} ztF#2pj|Xgei@@9SP-45@HkkE4;S>$TP3i%NKgh>*V*!{*0~m#GU|yCLu0YxlRMv-U z(2ko)+6eFc%ik$d!L2I>Y=r{XRJ#iW!FmS^8V`R?ssg$%5!C>0Z9vuLjnEDV=dHXj zGo8T1DA8@KR8S;Gr*8E)Hr~`ea2N2wyvzwe*QF;xEx646U=d`f2q7H+;3R9U zzq(BrP=Kp@(b06=P2Bu)oVU|+F!lMt_&A^Q#&|A`DK=z<_6RU|jcTa?G-yGYfH9y` zpYA^29_x|xqbBV~M(2R0ClFA19R+k>w}TQ3z+-krCcxUC6|t7G{W(%*u=H`PQ?K36 z=AgD@QoUW6kzSPJH{0cKVW$0pMRf+~Fi~0n89Hoq4sH@VfXRh?040>!&H>KG98|=( zQnG!Z;E4~*kT5PQ7HA;T0rk;Ddg!)4oBVQW92uHq6uSby?-g}>K2a$bA*6m|z|2&=KlFc?dql)R^Wj_<^jK^F5rkVaWNoT79-Et%$ruhu z-E(JvcO3-5$fR!8iun}i6EZ;80-j^k1Q&ER20Q3%Bss~NultTxT{>WqQ$aMGx$Byj zXeQ?i)Hr`le*`*Ant$XD8f70inGF@IeE4G3V_UzlagvbZ#1Q8MP?s#i<^6{DZEnzG z4s=3Rm!ETmidj}3(=>l2WW{{Y(=jL9_`X579wbAbYPjqZe#f@KxafPKM`oprO{J_2 z2b@?O=q1n_1YUOts3+{9WoJUcJUVgV6Sc?dDy-{r61mf;N4`TB+>47oai_k+7uQfQ z`IO5-d2dMT25McoTR`mAyNU%BZx3G`0hOYIiXXemGV)&2Zux-8GbUMt6KR<0VyL!( z{se4J2Z`f%Y_t9(Mm}k4>RIA-WwLcu;WOnN353W5n8h(4gVWZb3!cSQpPbWH*1w*n z7Zc(INxsD^P|ZgGEgl-6<~G$p-;OdIaT`DiG$(MhaIT6lYB+9`9;H!?>QHS9)I9yJ0}tS$-62Y0#`j0l3d~&`Tu8GVq(Ev^(yp;ugx!ib~(jTEHy4)kz1}$POVj zj)CN2(Z8M90%$4RAFl=X(~7^z3ue&D^ozyp1s1VU%6GXGlm$2X`AxmJMwyc&#e10H z6-=Su@!mqfW?6@Z1rGP{9uM`5rgNzZ%38OKzlQdU$vH0p0?a+C6&07R;i;qS1)59> ztDaQ}erX{NDpX{(Z@|ZS0c%A6 z7I22;wROe${nYG!x0lMxmq2Gk5&)vv7-Q!=dg3TGh&R8x<)4+?Kyz@<+ajf$13WB! zLD!euK;5E@YB^D=@vL5EBAFRBNmqZ*$pPkRU}7g6dI9|UhjIk)6P^~q6rnRArD%u+ z!Sj^_1vvsxI^Z)Ag8`FeszT%{0Z4dKh*m9?@=NG!D6CU9f+6Q< zD2ffeK;^NG7Z0V!gb;sOe28)SEOo^k9iYX1IRKlYU-!$)dUGr0E5uWy3dKx|fdXhO ziUg=T2at$uu$U-R#Uo8?WQ#}vUzOkq(ACiQ2;8jEV`xz8cHbFiROhW}*5qH+(UVgV zL_u1@k3ZQ;_M(JahZY5}qn~ki>!V%))A>eV3+Nd|*cm|YEi4B|QW_uxX#^|C6xi$m z(6wRkaGm09+qnK-a{A;u-B9ZDZ-Q5A?+(rYFZc?T@fw>s8|kDqBEK{0P;DbI1+YiY z)VYtU-X#&jK4$Im3aIRXW;H9Iq2fb$ceoywAD5$q(*5zrGzr+Ruuk<0Zq5~;ouxGV zbw%Mg5(Fv`BUOy(L-98N6i{#$M)sDMDSTJ0`Z{3Z9w%e9Q_C1kqlytNg60)@kojN; z@IiZ!O5D`;%PAosEt&IGQq15UYHc}8WQz@Z!SG99uQJGcS&g1i3C?}qF>G#TR7Y%` zuWd);3TkrSO|C(&S9;$&`et%XyaZ^jm{&_OC`Cg>kb1igh0#<83F+^j!44KEvwH;I z^fOmH`N{@930)rx_mf;|xb6$g8Sy!~7|$&{zBX{P3Xy-gpa#~+ErR`FIs;gAB#+wzYIxV7I} zZx^JLKe2!Y>QlsHALZ}D;JyXle~5NOf|o@RplMmE0~xmZqaxLP2Kq2*{71jq0m)Uh za4g`xcV7pjRO-#2+%{zb4iyKtTga7SyBM2+OMpOSzU)il1aVY}w~=M`XA@eO$qCSY zNiOS27N6tq+NuHwiOJ{{7Dp-8j)9EWkw_bo3{Ru8Xc9U=SJjGQ>PA;=c${w1AZiyp z;&)~$RoNXgIwMvnl^4d=J#uIPtv^=9L^93Z`!3Yz*Ah^}%F8S9t>aaL)e)MUjzcqY zXdE`>CgsqO@~KBHd8#1+@BmX6%>ZAgCrsx3P<}-F{rW?XL^=PuaR|O9J43J^<)Fd6 z^i1)`g=m(lBKhHR6j>DX*X=>y4ekvp4aIDPE0R6|0K^&&-_P%VH}Jq_0QvnP>65n4 zaTidQgo?D&YP?eA80FNAlz<~ql$|~TlFyd4pShyGK&rL@`iA7^2nXf8&vr}|1&_b8 z?}G@lA&-6RU7}T?pUiQTM*Wqt&C!QmlRi%Px|rF2IFpa=T9{ACb!yZt9|`qRc#{%G z;G-)MK^u|+TNDdOA5y9rw&v>XV}?Z%UyAC;2R*n_9a!dSNr8UGyI@3^;XM{OkK>QL ziY>-ppsYH;KaKFeJgzPr{Xs_Am6+czo{YE+JkXAQ$uDn2YMnmAykte~>6IWX4fb1a zNoF}|w&vXdmg?y0v(03d=qxSq7S$E}Bn#d}^%q}WrhbY(5C2k{lD>vzoFT z{`WqcYTw)?OWW_CM&Y_~j4~4Z&N+G5xu&Iw|>x^)4f`gr($Y@=Eg= zx>g%SCHlUS5c9hfi$RY3IzQnaCx6XJTI8cD0Rqz614@!errjo}{D{kP^!pXDGs7`d zqT^>?UHcvQ0EBnNKNMunf`$Vrtxn=^-*S|uz8&kPeXmU=_R8q1lN2g$rq*xYlZT0d zZ++wKm759|*}Hw)Kn$fV*}1eaAgS%)L_t&mI#+qleM=Zk8jlz@TJ$YDe4goOzr}t3 zj!=nY_1A|6hn%C*2&@QJ9X?C2`k(XU>`QGI{Ro@X;}VJZQEOT1S$QhBrTuE8kPQ2_ z8B$SM<@bPvFO(9aK zdsOom%TwFre#!*qJjWt|I&A3O#&-%U?r+4LN^@ZMxIP+x4^qPmSKoA9CiFaPy`WR? zfY%z1GdXZ5C%{Zo-T-nn5BJB3IdX0C_vW!67uPofDtg7`5h;)nyeu0WJy{*md{)7G z9EqyLerFuqE_fAppL3f{8UX6;Ocsg{1;?>7M_Q zj+tiAx&6)i1GcLx)i|jQ*~KqSa-Hwk+6Ra(QnIqQHvPiXBPADNbP6V;OYqT)fVP~P z_@gfj3^6kFdjT?_N?SA}I4msuXUUZVK;eABaGJUh2<5w9!9NIgQeeJrtyBvmvJ`kKE1!=C6SX7nvq6w=4sl4YQ?q-3a1S;k-jO0Z;1EEWMZg zmRgXD1PFYgJ>HLiX1N}M?^?9m12F8y<*4+qgYg9y(%0b@93__%?gXp zIae(O+sA@+>;U&nJn~MFzfTZ&z~OYr;RUpMxqnSFkY=8}J^5^{$0n{8L+J*vtt*Lz zyU1b+kA;3K*N=7Re#iuEwj=u_(9UGW%*wN7ED08&yWe9N3B)>D?2DHSbgFSn+UUz_kA3gY5~&b5>8Rr~=Pqf7en6fOmdF&le$zXbiPHdm_mCdiVDrfqf| zd=~sgS$oTX`~It{;Frc~CYH}y@?ZA3Bc;`emKhTlJ$v64QL$p=TrI?_H&bl`G|nUa z#Ysh;-qU>G^|o~ijBFtfcIm63dGx-1)C;+)dGdL56-lZL@^|S+%4FER$gtV{U$#sg z-G53FHd5(0n4K!4aLN$WvE1uyod|SN9j05CJ4W&PpeYJpy>;%T4C5Vpo6Re7)B%66 z{hUsng^RB^x$;~>cR-!pKYu8-N7-GSJ;gW-AqS()T^Wr%PpnUd8_@(iD(>mNgmU7o zcfV1m;+D}@1^4~giA*3}CQ6euo5dukfJ%?i4?0&`K~vap3%V(Vp2!n=A61~B-_yor zAN(0e!49;%$!b>0NG(Uru^pk6yMls&KSAU=yj5)pl!QUYliNUzBr~Nwe5y>a%I>r# zQ`Evy7>&B^DVkAEz9y|=NM@WL?DTEMdJuQEllQ}X3qX>?^hu=r8;|$zK-|mX5DTvR z5GOo)79lJ#vKsijAJ&<5SpBgxN!Q0xYMiGmAaKQ!t( zgmqqImb&!(VUDNmcFI8@kP1?*f4F3ZS8$p{D0_h%n{{)DMuvn+5WS_wFMHzbm1fLF zjc%Rf(}+5TLlC&;C+K@=VyLdOU0R0j;>L4EIggPixoFSR^#YL2Uo|#MHM(7VYpplu zM&$nJmD+QF&~UEfuW*s3;RM%Ay!vZS(#jA&yXMiSG)u!|*p%3N5gu?h2d(U!8smTNU?PQ_ z*}AM^a865g9lS@>M18bZF4f^vi}~G{Pg0~NQpW)_>K7;wD&&&JMQd=;{w>=An3fJM zb?YdY=-qoCbO?PurPuS$FX(NR>2LiX4HmO^4QjF*M4$_HKHf$o)Ln8D^f;tt&5hu} z?yo!WDJZ&Ap*NB5J4$}Zu`SKCNN~GzbQAu5-`y$SHjeCRzg@hD1)+b=bCYgfH9C^Q z3-DDPn_8>xvWYi2K9?3XpX9GQ%ij^Yq2zBCAGg~Rb}D3hZlp0K^ehq3@6jxP9D#A< zg>g?91^3?YcT}^0z4ut-p-rm@-Fw{*SqlVkR7(OHS%!-5ZJwssDc()R%Y$qaut|Sj zv_L(qN~r)>hF%%(V?p^~K1XmvGo41i+@8R~f_GdiT1AzM__I_Mb^UwD$dDT^V-a0q zOMKC@Y)QI18T{iB>C4DEbV(W=)ics}^q0o0Q!Sp(=Ek9@>bgWpI;7v^uM;vwG(1z8 zjjSZw^V#6;Y0#aq{LyZ`!$aS*Ff?CM6-26Y+s*S5@)L1Z>>$uaS?JWDJ97?xQ_cFk zHgIgFH8V)WNH+6B_4Pjn4#}qs`0O{2d*s7a3BJlCl>N89b};lS5QY;-UimpiJ1u4P zaDFDwfhWoLM9X2B9$sDB%}K}I;YvPQj0LBympH#0{8ktt!H;Z7 z?mL`F0986FQ&41I2ke z0_XTD`dnB)rk101QEW_w5yO*%ol>u3gN%mC)Q>At3YeeAQxS>UqhqAp= z?>mot%WwLA;c@@qpXe%Q`ELVKWN5${q}Qt|r}Ws5a`-EOv_8Qd?~KzH+1XJ|CjI8G zDq?yUDxPC?vL36?Y2!$+xlz$e zOOuVGHST|@QG1isoo2eN%cs~^3xWza`nje@_gCLYwQ1h?BO1^|4$IL$8 zd^ye6i5Vax!GMQM=8^j*pi1WZeHNm~J)AxYZU2}>Hc53uD-a1RL=2-SRp=ZYe_udH zdlaRmKMsizal0vzHw^#ueFX~c1|`a)e;rlu=ZA2^ zr2a>YuUVD)j)lauf1Gk*q?8%AD*&q_S|kC1?@b7MGebT2PJ?B9 z+9cYC6UxLi@8J3v{Z%0Wg8L^icgxECeC0(VjLp` z2+Nv$&W^Z75E@3IRnEU&qR+9HB3Pry?IxS+rLy!BY2@a)xb zZ&K1Il>7bj!o!ChOy(6-GM+bbCqW1sYLC|8qDGU1F(rQ|k~#O>e?SvxW1l_$r)L;= zP&7DDRdd)sqJK{)`C%LdK)WPRt;y^ASM&duSNi{d*U?`Sg74NLDSiU?r@L#e&UOJ- zxr-&(?&JU0%$dJK+4g_DtTVXnJ4wsK&}7L?jD143>}#Si5>j>}w z5Cy_y<-zE$+}i0lQL3#?v)CxM&}`Ah$YN0Y;wLrwu-we}>~z8BW}WDRzxE~#)y~h> z7cBqYV%3mgc7@42?AMz9Tv>h~;{2%zNrM0LVt~?BO>9L}ziv=JpTlB8snlFL7|{F8 z0gu|BMXxJT6}KwPi!A1Vej=+1>@T`PP<^2QB4J~vu7K*V3>p?%yW}QzTiD!jd{euE z+60F@zk46lod4i*puQWDkRg^}h z$$lBG(;#Hw4B#D-o1nq2LMAAJOy5Ojz>Ru-E~x>+?l`o?_S`yfr7=(Wx&E0}FcR?r zh$V-G*fP;k>KReK-#JN|uL2qdP4s9%h^{W6&lPkt%s`jL)`zh3A+{jqSX1_;sN$Ucbes%@o3J3|}NcQ%l5OwRva@-~6R|9W>pG=tq z^d|i&{s$!sPX;Knxs6w`B1&cIs~Uirvms`#s{!zm*h$T( z&O@Mo{|B4WlcF4eDW{dp)JTf6)9zrH5t7;1XIH6y0VMiL~4`^>_e_paZ|tufN3p zk|hu0YQbG1;T)O*55{B+*3y=fgJ4Sr>ZDe%iO-#qjEc8p0(nqDNDQ7003vT%_C#WM zDndv_(m@W4k1Zt~d;uRQVs?}q7EVzD?a*WxJ}@{9BL!xkM)4TI>?on>)kNjFV|7M_ z+SKTWpGpRb zY)~M9yiZ>NcwvDqR-m_6SZe{-0P8o_u>mGQ5z$vU=_Dyie{5IFX1ywNP~+z=047;) z7Z7)RW^^04ImhOQxPa-wfuZ+P35>P{Tm1iql~pa3AS z6KVkLW(07#Jh()LS`a03rV^`=VZd(Q`)l5}d6gz8Wj0szYN`~RWAl;ES%D2*1Hf`v z@>U8O^IalJTT7q$`eWhKCAzh>urlo)jt<=X!fLvkC}duI3w8 zy;YJ{!DyqvD@Ly3_Q(ZiFyQp~bmijb?;kQdAuA9N0+Zs+RK+6ziesrz&BU5UzJDcz znz>-WB~W0^Pz`|5=GJP+>QXxvB-+AFdu{GojI71F$4RKvnslsPoail8Tgwu*ElYzQ zovCEm$oE@q8ON`O0FKzFG*HmCsu;gDVLm)lj|bqomZAIijvw;F>X|*!yB$~_Vyi?f zxy|^9r--RqZZg0=F-v@Ri6eMkAU@hELWIH;2Q{+w2d;D8NF&SL%5)8WIosy-qA8MaLT zOFMflKL#Azip`TvqME-d=2-(Zdf4-QSkXOm*o3oSK22O4abfV4X^d0dyE8nEP56->Z5VA|*Jd2>bR0Wx$Q{QEf z0U^|iJB)Ap%MY9xdUBeAie2>#%RvQY-Il}+w|vkPvvy20$8KgVT?6xAS$rs8W2 zcr+op%Z(Nuoqz_HU~^7ei`dN>G_m}mQ;0fo#tNJ_?oG3gnoVC zJ@Rl$ah&ERM|Dvi+U60GOLVAdTnr1s=&z^4b|`E3h#=HD(<6Du^d4I;&hRR2`wmCq z`WMXoKd?pMNvYfe+z>9L+$;%BGrOhCfBdAWG1bma)g`*ambSlmBv|Mlu+l;=7syg2 z_rB!dt^{RZbTwC&`v+3HMlbT=Fo*y<6`y=0LyTg`)7DNKF|679dn;P9o+Ks&jncm; zXPyXOjfrCMjpic(l|Q4x`Djozl~v>wE_90FhpwfsfAOV&(TbP4ac-iG+bMCakB+{g z`~?=(GAiuQV=0p&0#Q71SvKPP?PJbEnP-s_HP%WZ zk~gHu>IE#`VmK~9d1Am1nO@&GJ^moamx11}6ZoHl9SG(80$RfF zk~3@G%(PSf#Lrt`6gE@^ms0nPHdVtcw8f>fw8#keJz_h9wQiGb)w(ECC)J1bYksiy z+Ve$GS39356t)g9=INw&*z3QJ#lNn~0z39ko){IB$O_3});~W3nsX ze@tMc^F0B-z*^Dv;;YAmaFiN&@)siDf`Jc95EN|Ag^1O4`bZAu0|&MzVip@{GG1(| z4D-FEDR?PuLL=sruUVa`jPJBFURUdh&@waX9b3BB&1FB^Z0PQkVL%qJ&V!pi11E&RTnav-d5#D4pH;sW0tsVL!E45BDF z*yDsIJ;%8>lZ5-Mg2LY~J!8B$`?LN9W_f7;T-I6^=RMAy?itkxb{>yJCzxCsS*j|s zE5&P>q)bjkPx6cy&wHh1eo)mB7j;cI9_};VaQ4l##>7yrxv%rZ7X!8Oy(t;)dNk5Gh&D2W@S+{k^U7g5s(*5AGW?9Mf5sdb04+2=M7 z4&-fBdXFAFn-2ggT*gKcr7*gz=OGr ziPQ~n{L?ce3I|S(|3n9#%qr0xzI z>KXQJ1zkSloO+OzDADa6#2l09#>avQ;2M7~StXmtyAgi}%WD(<15hA?3*^&qqb_-< zd$G^y93DkOnhHPu}u&eY- zjTwVIr+fq_Zn}$aPrY;#I4^emxhxsJ#NpzX9!Sv%z?_JuUc5e`Ql8~S9l-rCpbtJj z)F<5C*Mu9oe0Gx4ojGP&3Vl7F9HCv%myzAmG0}lNb2?(T_>&h}c^)6&b~mO>VJ!C3 zyCCI3m!1Uns2||3n!Ziom;v>lrz1(z2hCs9PBY|m3Yqa~VwpBY`;+|;tnsCk4&pp|0`oyJiy%G z@wOv`8mLf~D$DXCT#$e42@Gk>OU&jwd_mbEOS# zISRo@`JY3T^UstRd{_y;<(kbYacBkGn08#=aBwZF=LikL3Xw$ro)jBSEA9z<#!;Zq z@uJLM(~xdE!rEyn6M#`d$>|=u*8SNfR{2}vTY$9oCe19`x>j-OO}W$&$Uw7i*vMS) ztEq@Va7hfj=Cj0H38$)MWG`wr;uiJJXXg|iAlW90>lhUmS6lVh2kje4m&_S9mDm%) zfDfv5_P%#s*wcBQy?4W@;l+}ucl4*5P zhWFg6_Li!J?FD$<4bm&=wOjEpL**YIQIYw`;g!^?(sy}&@F*q*gm>>A?!P54`DTwH zQ4`#MHDH4|>HcK2vHfWyG|i*1mad1ESB8Ducnn0__>}m)F50TxJ0T;s;ZT^MmrVBA z`q&GCDvAxq?=fGlQL46UOJIENnP2O4RW5e=Fx>aFr9J;#ij3BXR9D3DAP_Y7>=fzt zs0p4!-5W@JyLj5jg{H=Db){GG^v**FVY35Aq`9p>iN9*4MiuUQ#qXxZyT7=zUNMh$ zy-Y!-txF@`_4k_5uW=Q+ai*aW3bKxP#{1Ysy{uOK*(i{dXk3+ew&|z)A>&p9UVx$q^&n`pG8F~M=R!n9I}Q_5qEp^2b4`Tx(4V$ z#@D8^hJL9RnjDZXh0!7XpqaBAbd|06${R~5s;&4K(~oI`{%iu;F?`exv114&^2$2q_S@w?3jZ#1ufbungn!Y}%N kKAu$vAOAnOdgAC9cGJvLBmXc2@UxHUXc}m|L1V)H4IN}GeE Date: Thu, 8 Feb 2024 11:37:31 -0500 Subject: [PATCH 06/14] Update kind version to current --- .github/workflows/coverage_reporting.yml | 2 +- .github/workflows/pull_request.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/coverage_reporting.yml b/.github/workflows/coverage_reporting.yml index 1a8bfe152..72ef69695 100644 --- a/.github/workflows/coverage_reporting.yml +++ b/.github/workflows/coverage_reporting.yml @@ -38,7 +38,7 @@ jobs: sudo cp ./receptor /usr/local/bin/receptor - name: Download kind binary - run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 && chmod +x ./kind + run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.21.0/kind-linux-amd64 && chmod +x ./kind - name: Create k8s cluster run: ./kind create cluster diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index a0785d3e1..559533266 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -70,7 +70,7 @@ jobs: sudo cp ./receptor /usr/local/bin/receptor - name: Download kind binary - run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 && chmod +x ./kind + run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.21.0/kind-linux-amd64 && chmod +x ./kind - name: Create k8s cluster run: ./kind create cluster From e1791ff31d767c730987755305ed77d2af7f57a0 Mon Sep 17 00:00:00 2001 From: Thom Carlin Date: Thu, 8 Feb 2024 12:02:30 -0500 Subject: [PATCH 07/14] Use floating latest kind version --- .github/workflows/coverage_reporting.yml | 2 +- .github/workflows/pull_request.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/coverage_reporting.yml b/.github/workflows/coverage_reporting.yml index 72ef69695..243b7bf6a 100644 --- a/.github/workflows/coverage_reporting.yml +++ b/.github/workflows/coverage_reporting.yml @@ -38,7 +38,7 @@ jobs: sudo cp ./receptor /usr/local/bin/receptor - name: Download kind binary - run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.21.0/kind-linux-amd64 && chmod +x ./kind + run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind - name: Create k8s cluster run: ./kind create cluster diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 559533266..80dd16d10 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -70,7 +70,7 @@ jobs: sudo cp ./receptor /usr/local/bin/receptor - name: Download kind binary - run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.21.0/kind-linux-amd64 && chmod +x ./kind + run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind - name: Create k8s cluster run: ./kind create cluster From 4264e3c0911d0cda5cf0532e5cdb031bcbc7ced2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Feb 2024 00:22:45 +0000 Subject: [PATCH 08/14] Bump golang.org/x/net from 0.20.0 to 0.21.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.20.0 to 0.21.0. - [Commits](https://github.com/golang/net/compare/v0.20.0...v0.21.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index f003bf12c..692d8c711 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 github.com/vishvananda/netlink v1.1.0 go.uber.org/mock v0.4.0 - golang.org/x/net v0.20.0 + golang.org/x/net v0.21.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.1 k8s.io/apimachinery v0.29.1 @@ -55,12 +55,12 @@ require ( github.com/quic-go/qtls-go1-20 v0.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/vishvananda/netns v0.0.4 // indirect - golang.org/x/crypto v0.18.0 // indirect + golang.org/x/crypto v0.19.0 // indirect golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.16.1 // indirect diff --git a/go.sum b/go.sum index 4dba32c5c..0c93334f3 100644 --- a/go.sum +++ b/go.sum @@ -131,8 +131,8 @@ go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -146,8 +146,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -162,11 +162,11 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 75261da76bc7b8b18a63228a8d8c6df48c4ba8c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 00:22:48 +0000 Subject: [PATCH 09/14] Bump golangci/golangci-lint-action from 3 to 4 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3 to 4. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3...v4) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/pull_request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 80dd16d10..73201e5a4 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -15,7 +15,7 @@ jobs: fetch-depth: 0 - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: v1.52 lint-receptorctl: From 6aae11784cef14ab46d754aff0f362a28da6b556 Mon Sep 17 00:00:00 2001 From: Aaron Hetherington Date: Fri, 9 Feb 2024 12:12:32 +0000 Subject: [PATCH 10/14] Add a mock layer to kubernetes API calls --- pkg/workceptor/kubernetes.go | 194 ++++++++--- pkg/workceptor/kubernetes_test.go | 127 ++++++- pkg/workceptor/mock_workceptor/kubernetes.go | 330 +++++++++++++++++++ 3 files changed, 604 insertions(+), 47 deletions(-) create mode 100644 pkg/workceptor/mock_workceptor/kubernetes.go diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index aae1435d1..35b5847ad 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "net" + "net/url" "os" "strconv" "strings" @@ -54,7 +55,7 @@ type kubeUnit struct { } // kubeExtraData is the content of the ExtraData JSON field for a Kubernetes worker. -type kubeExtraData struct { +type KubeExtraData struct { Image string Command string Params string @@ -64,6 +65,109 @@ type kubeExtraData struct { PodName string } +type KubeAPIer interface { + NewNotFound(qualifiedResource schema.GroupResource, name string) *apierrors.StatusError + OneTermEqualSelector(k string, v string) fields.Selector + NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) + GetLogs(clientset *kubernetes.Clientset, namespace string, name string, opts *corev1.PodLogOptions) *rest.Request + Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) + Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) + List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) + Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.DeleteOptions) error + SubResource(clientset *kubernetes.Clientset, podName string, podNamespace string) *rest.Request + InClusterConfig() (*rest.Config, error) + NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules + BuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*rest.Config, error) + NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) + NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) + StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error + UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch2.PreconditionFunc, conditions ...watch2.ConditionFunc) (*watch.Event, error) + NewFakeNeverRateLimiter() flowcontrol.RateLimiter + NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter +} + +type KubeAPIWrapper struct { +} + +func (ku KubeAPIWrapper) NewNotFound(qualifiedResource schema.GroupResource, name string) *apierrors.StatusError { + return apierrors.NewNotFound(qualifiedResource, name) +} + +func (ku KubeAPIWrapper) OneTermEqualSelector(k string, v string) fields.Selector { + return fields.OneTermEqualSelector(k, v) +} + +func (ku KubeAPIWrapper) NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) { + return kubernetes.NewForConfig(c) +} + +func (ku KubeAPIWrapper) GetLogs(clientset *kubernetes.Clientset, namespace string, name string, opts *corev1.PodLogOptions) *rest.Request { + return clientset.CoreV1().Pods(namespace).GetLogs(name, opts) +} + +func (ku KubeAPIWrapper) Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) { + return clientset.CoreV1().Pods(namespace).Get(ctx, name, opts) +} + +func (ku KubeAPIWrapper) Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) { + return clientset.CoreV1().Pods(namespace).Create(ctx, pod, opts) +} + +func (ku KubeAPIWrapper) List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) { + return clientset.CoreV1().Pods(namespace).List(ctx, opts) +} + +func (ku KubeAPIWrapper) Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return clientset.CoreV1().Pods(namespace).Watch(ctx, opts) +} + +func (ku KubeAPIWrapper) Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.DeleteOptions) error { + return clientset.CoreV1().Pods(namespace).Delete(ctx, name, opts) +} + +func (ku KubeAPIWrapper) SubResource(clientset *kubernetes.Clientset, podName string, podNamespace string) *rest.Request { + return clientset.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(podNamespace).SubResource("attach") +} + +func (ku KubeAPIWrapper) InClusterConfig() (*rest.Config, error) { + return rest.InClusterConfig() +} + +func (ku KubeAPIWrapper) NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules { + return clientcmd.NewDefaultClientConfigLoadingRules() +} + +func (ku KubeAPIWrapper) BuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*rest.Config, error) { + return clientcmd.BuildConfigFromFlags(masterUrl, kubeconfigPath) +} + +func (ku KubeAPIWrapper) NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) { + return clientcmd.NewClientConfigFromBytes(configBytes) +} + +func (ku KubeAPIWrapper) NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) { + return remotecommand.NewSPDYExecutor(config, method, url) +} + +func (ku KubeAPIWrapper) StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error { + return exec.StreamWithContext(ctx, options) +} + +func (ku KubeAPIWrapper) UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch2.PreconditionFunc, conditions ...watch2.ConditionFunc) (*watch.Event, error) { + return watch2.UntilWithSync(ctx, lw, objType, precondition, conditions...) +} + +func (ku KubeAPIWrapper) NewFakeNeverRateLimiter() flowcontrol.RateLimiter { + return flowcontrol.NewFakeNeverRateLimiter() +} + +func (ku KubeAPIWrapper) NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter { + return flowcontrol.NewFakeAlwaysRateLimiter() +} + +var KubeAPIWrapperInstance KubeAPIer + // ErrPodCompleted is returned when pod has already completed before we could attach. var ErrPodCompleted = fmt.Errorf("pod ran to completion") @@ -78,7 +182,7 @@ func podRunningAndReady() func(event watch.Event) (bool, error) { imagePullBackOffRetries := 3 inner := func(event watch.Event) (bool, error) { if event.Type == watch.Deleted { - return false, apierrors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") + return false, KubeAPIWrapperInstance.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } if t, ok := event.Object.(*corev1.Pod); ok { switch t.Status.Phase { @@ -134,9 +238,7 @@ func (kw *kubeUnit) kubeLoggingConnectionHandler(timestamps bool, sinceTime time podOptions.SinceTime = &metav1.Time{Time: sinceTime} } - logReq := kw.clientset.CoreV1().Pods(podNamespace).GetLogs( - podName, podOptions, - ) + logReq := KubeAPIWrapperInstance.GetLogs(kw.clientset, podNamespace, podName, podOptions) // get logstream, with retry for retries := 5; retries > 0; retries-- { logStream, err = logReq.Stream(kw.GetContext()) @@ -207,7 +309,7 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout // get pod, with retry for retries := 5; retries > 0; retries-- { - kw.pod, err = kw.clientset.CoreV1().Pods(podNamespace).Get(kw.GetContext(), podName, metav1.GetOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Get(kw.clientset, podNamespace, kw.GetContext(), podName, metav1.GetOptions{}) if err == nil { break } @@ -278,7 +380,7 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout } split := strings.SplitN(line, " ", 2) - timeStamp := parseTime(split[0]) + timeStamp := ParseTime(split[0]) if !timeStamp.After(sinceTime) && !successfulWrite { continue } @@ -301,7 +403,7 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout } func (kw *kubeUnit) createPod(env map[string]string) error { - ked := kw.UnredactedStatus().ExtraData.(*kubeExtraData) + ked := kw.UnredactedStatus().ExtraData.(*KubeExtraData) command, err := shlex.Split(ked.Command) if err != nil { return err @@ -383,7 +485,7 @@ func (kw *kubeUnit) createPod(env map[string]string) error { } // get pod and store to kw.pod - kw.pod, err = kw.clientset.CoreV1().Pods(ked.KubeNamespace).Create(kw.GetContext(), pod, metav1.CreateOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Create(kw.clientset, ked.KubeNamespace, kw.GetContext(), pod, metav1.CreateOptions{}) if err != nil { return err } @@ -398,21 +500,21 @@ func (kw *kubeUnit) createPod(env map[string]string) error { status.State = WorkStatePending status.Detail = "Pod created" status.StdoutSize = 0 - status.ExtraData.(*kubeExtraData).PodName = kw.pod.Name + status.ExtraData.(*KubeExtraData).PodName = kw.pod.Name }) // Wait for the pod to be running - fieldSelector := fields.OneTermEqualSelector("metadata.name", kw.pod.Name).String() + fieldSelector := KubeAPIWrapperInstance.OneTermEqualSelector("metadata.name", kw.pod.Name).String() lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return kw.clientset.CoreV1().Pods(ked.KubeNamespace).List(kw.GetContext(), options) + return KubeAPIWrapperInstance.List(kw.clientset, ked.KubeNamespace, kw.GetContext(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return kw.clientset.CoreV1().Pods(ked.KubeNamespace).Watch(kw.GetContext(), options) + return KubeAPIWrapperInstance.Watch(kw.clientset, ked.KubeNamespace, kw.GetContext(), options) }, } @@ -422,7 +524,7 @@ func (kw *kubeUnit) createPod(env map[string]string) error { } time.Sleep(2 * time.Second) - ev, err := watch2.UntilWithSync(ctxPodReady, lw, &corev1.Pod{}, nil, podRunningAndReady()) + ev, err := KubeAPIWrapperInstance.UntilWithSync(ctxPodReady, lw, &corev1.Pod{}, nil, podRunningAndReady()) if ev == nil || ev.Object == nil { return fmt.Errorf("did not return an event while watching pod for work unit %s", kw.ID()) } @@ -491,7 +593,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { skipStdin := true status := kw.Status() - ked := status.ExtraData.(*kubeExtraData) + ked := status.ExtraData.(*KubeExtraData) podName := ked.PodName podNamespace := ked.KubeNamespace @@ -538,7 +640,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { default: } - kw.pod, err = kw.clientset.CoreV1().Pods(podNamespace).Get(kw.GetContext(), podName, metav1.GetOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Get(kw.clientset, podNamespace, kw.GetContext(), podName, metav1.GetOptions{}) if err == nil { break } @@ -563,11 +665,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { // Attach stdin stream to the pod var exec remotecommand.Executor if !skipStdin { - req := kw.clientset.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(podName). - Namespace(podNamespace). - SubResource("attach") + req := KubeAPIWrapperInstance.SubResource(kw.clientset, podName, podNamespace) req.VersionedParams( &corev1.PodExecOptions{ @@ -579,9 +677,8 @@ func (kw *kubeUnit) runWorkUsingLogger() { }, scheme.ParameterCodec, ) - var err error - exec, err = remotecommand.NewSPDYExecutor(kw.config, "POST", req.URL()) + exec, err = KubeAPIWrapperInstance.NewSPDYExecutor(kw.config, "POST", req.URL()) if err != nil { errMsg := fmt.Sprintf("Error creating SPDY executor: %s", err) kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) @@ -675,7 +772,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { var err error for retries := 5; retries > 0; retries-- { - err = exec.StreamWithContext(kw.GetContext(), remotecommand.StreamOptions{ + err = KubeAPIWrapperInstance.StreamWithContext(exec, kw.GetContext(), remotecommand.StreamOptions{ Stdin: stdin, Tty: false, }) @@ -721,7 +818,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { }() } - stdoutWithReconnect := shouldUseReconnect() + stdoutWithReconnect := ShouldUseReconnect() if stdoutWithReconnect && stdoutErr == nil { kw.GetWorkceptor().nc.GetLogger().Debug("streaming stdout with reconnect support") go kw.kubeLoggingWithReconnect(&streamWait, stdout, &stdinErr, &stdoutErr) @@ -757,7 +854,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { } } -func shouldUseReconnect() bool { +func ShouldUseReconnect() bool { // Support for streaming from pod with timestamps using reconnect method is in all current versions // Can override the detection by setting the RECEPTOR_KUBE_SUPPORT_RECONNECT // accepted values: "enabled", "disabled", "auto". The default is "enabled" @@ -780,7 +877,7 @@ func shouldUseReconnect() bool { return true } -func parseTime(s string) *time.Time { +func ParseTime(s string) *time.Time { t, err := time.Parse(time.RFC3339, s) if err == nil { return &t @@ -973,10 +1070,10 @@ func (kw *kubeUnit) runWorkUsingTCP() { func (kw *kubeUnit) connectUsingKubeconfig() error { var err error - ked := kw.UnredactedStatus().ExtraData.(*kubeExtraData) + ked := kw.UnredactedStatus().ExtraData.(*KubeExtraData) if ked.KubeConfig == "" { - clr := clientcmd.NewDefaultClientConfigLoadingRules() - kw.config, err = clientcmd.BuildConfigFromFlags("", clr.GetDefaultFilename()) + clr := KubeAPIWrapperInstance.NewDefaultClientConfigLoadingRules() + kw.config, err = KubeAPIWrapperInstance.BuildConfigFromFlags("", clr.GetDefaultFilename()) if ked.KubeNamespace == "" { c, err := clr.Load() if err != nil { @@ -985,14 +1082,14 @@ func (kw *kubeUnit) connectUsingKubeconfig() error { curContext, ok := c.Contexts[c.CurrentContext] if ok && curContext != nil { kw.UpdateFullStatus(func(sfd *StatusFileData) { - sfd.ExtraData.(*kubeExtraData).KubeNamespace = curContext.Namespace + sfd.ExtraData.(*KubeExtraData).KubeNamespace = curContext.Namespace }) } else { return fmt.Errorf("could not determine namespace") } } } else { - cfg, err := clientcmd.NewClientConfigFromBytes([]byte(ked.KubeConfig)) + cfg, err := KubeAPIWrapperInstance.NewClientConfigFromBytes([]byte(ked.KubeConfig)) if err != nil { return err } @@ -1002,7 +1099,7 @@ func (kw *kubeUnit) connectUsingKubeconfig() error { return err } kw.UpdateFullStatus(func(sfd *StatusFileData) { - sfd.ExtraData.(*kubeExtraData).KubeNamespace = namespace + sfd.ExtraData.(*KubeExtraData).KubeNamespace = namespace }) } kw.config, err = cfg.ClientConfig() @@ -1019,7 +1116,7 @@ func (kw *kubeUnit) connectUsingKubeconfig() error { func (kw *kubeUnit) connectUsingIncluster() error { var err error - kw.config, err = rest.InClusterConfig() + kw.config, err = KubeAPIWrapperInstance.InClusterConfig() if err != nil { return err } @@ -1082,16 +1179,16 @@ func (kw *kubeUnit) connectToKube() error { if ok { switch envRateLimiter { case "never": - kw.config.RateLimiter = flowcontrol.NewFakeNeverRateLimiter() + kw.config.RateLimiter = KubeAPIWrapperInstance.NewFakeNeverRateLimiter() case "always": - kw.config.RateLimiter = flowcontrol.NewFakeAlwaysRateLimiter() + kw.config.RateLimiter = KubeAPIWrapperInstance.NewFakeAlwaysRateLimiter() default: } kw.GetWorkceptor().nc.GetLogger().Debug("RateLimiter: %s", envRateLimiter) } kw.GetWorkceptor().nc.GetLogger().Debug("QPS: %f, Burst: %d", kw.config.QPS, kw.config.Burst) - kw.clientset, err = kubernetes.NewForConfig(kw.config) + kw.clientset, err = KubeAPIWrapperInstance.NewForConfig(kw.config) if err != nil { return err } @@ -1114,7 +1211,7 @@ func readFileToString(filename string) (string, error) { // SetFromParams sets the in-memory state from parameters. func (kw *kubeUnit) SetFromParams(params map[string]string) error { - ked := kw.GetStatusCopy().ExtraData.(*kubeExtraData) + ked := kw.GetStatusCopy().ExtraData.(*KubeExtraData) type value struct { name string permission bool @@ -1203,7 +1300,7 @@ func (kw *kubeUnit) SetFromParams(params map[string]string) error { // Status returns a copy of the status currently loaded in memory. func (kw *kubeUnit) Status() *StatusFileData { status := kw.UnredactedStatus() - ed, ok := status.ExtraData.(*kubeExtraData) + ed, ok := status.ExtraData.(*KubeExtraData) if ok { ed.KubeConfig = "" ed.KubePod = "" @@ -1217,7 +1314,7 @@ func (kw *kubeUnit) UnredactedStatus() *StatusFileData { kw.GetStatusLock().RLock() defer kw.GetStatusLock().RUnlock() status := kw.GetStatusWithoutExtraData() - ked, ok := kw.GetStatusCopy().ExtraData.(*kubeExtraData) + ked, ok := kw.GetStatusCopy().ExtraData.(*KubeExtraData) if ok { kedCopy := *ked status.ExtraData = &kedCopy @@ -1246,7 +1343,7 @@ func (kw *kubeUnit) startOrRestart() error { // Restart resumes monitoring a job after a Receptor restart. func (kw *kubeUnit) Restart() error { status := kw.Status() - ked := status.ExtraData.(*kubeExtraData) + ked := status.ExtraData.(*KubeExtraData) if IsComplete(status.State) { return nil } @@ -1260,7 +1357,7 @@ func (kw *kubeUnit) Restart() error { if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } else { - err := kw.clientset.CoreV1().Pods(ked.KubeNamespace).Delete(context.Background(), ked.PodName, metav1.DeleteOptions{}) + err := KubeAPIWrapperInstance.Delete(kw.clientset, ked.KubeNamespace, context.Background(), ked.PodName, metav1.DeleteOptions{}) if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } @@ -1285,7 +1382,7 @@ func (kw *kubeUnit) Cancel() error { kw.CancelContext() kw.UpdateBasicStatus(WorkStateCanceled, "Canceled", -1) if kw.pod != nil { - err := kw.clientset.CoreV1().Pods(kw.pod.Namespace).Delete(context.Background(), kw.pod.Name, metav1.DeleteOptions{}) + err := KubeAPIWrapperInstance.Delete(kw.clientset, kw.pod.Namespace, context.Background(), kw.pod.Name, metav1.DeleteOptions{}) if err != nil { kw.GetWorkceptor().nc.GetLogger().Error("Error deleting pod %s: %s", kw.pod.Name, err) } @@ -1332,10 +1429,14 @@ type KubeWorkerCfg struct { // NewWorker is a factory to produce worker instances. func (cfg KubeWorkerCfg) NewWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string) WorkUnit { + return cfg.NewkubeWorker(bwu, w, unitID, workType, nil) +} + +func (cfg KubeWorkerCfg) NewkubeWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string, kawi KubeAPIer) WorkUnit { if bwu == nil { bwu = &BaseWorkUnit{ status: StatusFileData{ - ExtraData: &kubeExtraData{ + ExtraData: &KubeExtraData{ Image: cfg.Image, Command: cfg.Command, KubeNamespace: cfg.Namespace, @@ -1346,6 +1447,11 @@ func (cfg KubeWorkerCfg) NewWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, u } } + KubeAPIWrapperInstance = KubeAPIWrapper{} + if kawi != nil { + KubeAPIWrapperInstance = kawi + } + ku := &kubeUnit{ BaseWorkUnitForWorkUnit: bwu, authMethod: strings.ToLower(cfg.AuthMethod), diff --git a/pkg/workceptor/kubernetes_test.go b/pkg/workceptor/kubernetes_test.go index 23500ede9..f7eb4dea2 100644 --- a/pkg/workceptor/kubernetes_test.go +++ b/pkg/workceptor/kubernetes_test.go @@ -1,10 +1,26 @@ -package workceptor +package workceptor_test import ( + "context" "os" "reflect" + "sync" "testing" "time" + + "github.com/ansible/receptor/pkg/logger" + "github.com/ansible/receptor/pkg/workceptor" + "github.com/ansible/receptor/pkg/workceptor/mock_workceptor" + "github.com/golang/mock/gomock" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" ) func TestShouldUseReconnect(t *testing.T) { @@ -50,7 +66,7 @@ func TestShouldUseReconnect(t *testing.T) { os.Unsetenv(envVariable) } - if got := shouldUseReconnect(); got != tt.want { + if got := workceptor.ShouldUseReconnect(); got != tt.want { t.Errorf("shouldUseReconnect() = %v, want %v", got, tt.want) } }) @@ -88,9 +104,114 @@ func TestParseTime(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := parseTime(tt.args.s); !reflect.DeepEqual(got, tt.want) { + if got := workceptor.ParseTime(tt.args.s); !reflect.DeepEqual(got, tt.want) { t.Errorf("parseTime() = %v, want %v", got, tt.want) } }) } } + +func createKubernetesTestSetup(t *testing.T) (workceptor.WorkUnit, *mock_workceptor.MockBaseWorkUnitForWorkUnit, *mock_workceptor.MockNetceptorForWorkceptor, *workceptor.Workceptor, *mock_workceptor.MockKubeAPIer, context.Context) { + ctrl := gomock.NewController(t) + ctx := context.Background() + + mockBaseWorkUnit := mock_workceptor.NewMockBaseWorkUnitForWorkUnit(ctrl) + mockNetceptor := mock_workceptor.NewMockNetceptorForWorkceptor(ctrl) + mockNetceptor.EXPECT().NodeID().Return("NodeID") + mockKubeAPI := mock_workceptor.NewMockKubeAPIer(ctrl) + + w, err := workceptor.New(ctx, mockNetceptor, "/tmp") + if err != nil { + t.Errorf("Error while creating Workceptor: %v", err) + } + + mockBaseWorkUnit.EXPECT().Init(w, "", "", workceptor.FileSystem{}, nil) + kubeConfig := workceptor.KubeWorkerCfg{AuthMethod: "incluster"} + ku := kubeConfig.NewkubeWorker(mockBaseWorkUnit, w, "", "", mockKubeAPI) + + return ku, mockBaseWorkUnit, mockNetceptor, w, mockKubeAPI, ctx +} + +type hasTerm struct { + field, value string +} + +func (h *hasTerm) DeepCopySelector() fields.Selector { return h } +func (h *hasTerm) Empty() bool { return true } +func (h *hasTerm) Matches(ls fields.Fields) bool { return true } +func (h *hasTerm) Requirements() fields.Requirements { + return []fields.Requirement{{ + Field: h.field, + Operator: selection.Equals, + Value: h.value, + }} +} +func (h *hasTerm) RequiresExactMatch(field string) (value string, found bool) { return "", true } +func (h *hasTerm) String() string { return "Test" } +func (h *hasTerm) Transform(fn fields.TransformFunc) (fields.Selector, error) { return h, nil } + +type ex struct { +} + +func (e *ex) Stream(options remotecommand.StreamOptions) error { + return nil +} + +func (e *ex) StreamWithContext(ctx context.Context, options remotecommand.StreamOptions) error { + return nil +} + +func TestKubeStart(t *testing.T) { + ku, mockbwu, mockNet, w, mockKubeAPI, ctx := createKubernetesTestSetup(t) + + startTestCases := []struct { + name string + }{ + {name: "test1"}, + } + + for _, testCase := range startTestCases { + t.Run(testCase.name, func(t *testing.T) { + mockbwu.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + config := rest.Config{} + mockKubeAPI.EXPECT().InClusterConfig().Return(&config, nil) + mockbwu.EXPECT().GetWorkceptor().Return(w).AnyTimes() + logger := logger.NewReceptorLogger("") + mockNet.EXPECT().GetLogger().Return(logger).AnyTimes() + clientset := kubernetes.Clientset{} + mockKubeAPI.EXPECT().NewForConfig(gomock.Any()).Return(&clientset, nil) + mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() + lock := &sync.RWMutex{} + mockbwu.EXPECT().GetStatusLock().Return(lock).AnyTimes() + kubeExtraData := workceptor.KubeExtraData{} + status := workceptor.StatusFileData{ExtraData: &kubeExtraData} + mockbwu.EXPECT().GetStatusWithoutExtraData().Return(&status).AnyTimes() + mockbwu.EXPECT().GetStatusCopy().Return(status).AnyTimes() + mockbwu.EXPECT().GetContext().Return(ctx).AnyTimes() + pod := corev1.Pod{metav1.TypeMeta{}, metav1.ObjectMeta{Name: "Test Name"}, corev1.PodSpec{}, corev1.PodStatus{}} + + mockKubeAPI.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() + mockbwu.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() + + field := hasTerm{} + mockKubeAPI.EXPECT().OneTermEqualSelector(gomock.Any(), gomock.Any()).Return(&field).AnyTimes() + ev := watch.Event{Object: &pod} + mockKubeAPI.EXPECT().UntilWithSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&ev, nil).AnyTimes() + apierr := apierrors.StatusError{} + mockKubeAPI.EXPECT().NewNotFound(gomock.Any(), gomock.Any()).Return(&apierr).AnyTimes() + mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() + + c := rest.RESTClient{} + req := rest.NewRequest(&c) + mockKubeAPI.EXPECT().SubResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(req).AnyTimes() + exec := ex{} + mockKubeAPI.EXPECT().NewSPDYExecutor(gomock.Any(), gomock.Any(), gomock.Any()).Return(&exec, nil).AnyTimes() + mockbwu.EXPECT().UnitDir().Return("TestDir").AnyTimes() + + err := ku.Start() + if err != nil { + t.Error(err) + } + }) + } +} diff --git a/pkg/workceptor/mock_workceptor/kubernetes.go b/pkg/workceptor/mock_workceptor/kubernetes.go new file mode 100644 index 000000000..82c93d6af --- /dev/null +++ b/pkg/workceptor/mock_workceptor/kubernetes.go @@ -0,0 +1,330 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/workceptor/kubernetes.go + +// Package mock_workceptor is a generated GoMock package. +package mock_workceptor + +import ( + context "context" + gomock "github.com/golang/mock/gomock" + v1 "k8s.io/api/core/v1" + errors "k8s.io/apimachinery/pkg/api/errors" + v10 "k8s.io/apimachinery/pkg/apis/meta/v1" + fields "k8s.io/apimachinery/pkg/fields" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + watch "k8s.io/apimachinery/pkg/watch" + kubernetes "k8s.io/client-go/kubernetes" + rest "k8s.io/client-go/rest" + cache "k8s.io/client-go/tools/cache" + clientcmd "k8s.io/client-go/tools/clientcmd" + remotecommand "k8s.io/client-go/tools/remotecommand" + watch0 "k8s.io/client-go/tools/watch" + flowcontrol "k8s.io/client-go/util/flowcontrol" + url "net/url" + reflect "reflect" +) + +// MockKubeAPIer is a mock of KubeAPIer interface +type MockKubeAPIer struct { + ctrl *gomock.Controller + recorder *MockKubeAPIerMockRecorder +} + +// MockKubeAPIerMockRecorder is the mock recorder for MockKubeAPIer +type MockKubeAPIerMockRecorder struct { + mock *MockKubeAPIer +} + +// NewMockKubeAPIer creates a new mock instance +func NewMockKubeAPIer(ctrl *gomock.Controller) *MockKubeAPIer { + mock := &MockKubeAPIer{ctrl: ctrl} + mock.recorder = &MockKubeAPIerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockKubeAPIer) EXPECT() *MockKubeAPIerMockRecorder { + return m.recorder +} + +// NewNotFound mocks base method +func (m *MockKubeAPIer) NewNotFound(qualifiedResource schema.GroupResource, name string) *errors.StatusError { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewNotFound", qualifiedResource, name) + ret0, _ := ret[0].(*errors.StatusError) + return ret0 +} + +// NewNotFound indicates an expected call of NewNotFound +func (mr *MockKubeAPIerMockRecorder) NewNotFound(qualifiedResource, name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewNotFound", reflect.TypeOf((*MockKubeAPIer)(nil).NewNotFound), qualifiedResource, name) +} + +// OneTermEqualSelector mocks base method +func (m *MockKubeAPIer) OneTermEqualSelector(k, v string) fields.Selector { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OneTermEqualSelector", k, v) + ret0, _ := ret[0].(fields.Selector) + return ret0 +} + +// OneTermEqualSelector indicates an expected call of OneTermEqualSelector +func (mr *MockKubeAPIerMockRecorder) OneTermEqualSelector(k, v interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OneTermEqualSelector", reflect.TypeOf((*MockKubeAPIer)(nil).OneTermEqualSelector), k, v) +} + +// NewForConfig mocks base method +func (m *MockKubeAPIer) NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewForConfig", c) + ret0, _ := ret[0].(*kubernetes.Clientset) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewForConfig indicates an expected call of NewForConfig +func (mr *MockKubeAPIerMockRecorder) NewForConfig(c interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewForConfig", reflect.TypeOf((*MockKubeAPIer)(nil).NewForConfig), c) +} + +// GetLogs mocks base method +func (m *MockKubeAPIer) GetLogs(clientset *kubernetes.Clientset, namespace, name string, opts *v1.PodLogOptions) *rest.Request { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLogs", clientset, namespace, name, opts) + ret0, _ := ret[0].(*rest.Request) + return ret0 +} + +// GetLogs indicates an expected call of GetLogs +func (mr *MockKubeAPIerMockRecorder) GetLogs(clientset, namespace, name, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockKubeAPIer)(nil).GetLogs), clientset, namespace, name, opts) +} + +// Get mocks base method +func (m *MockKubeAPIer) Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts v10.GetOptions) (*v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", clientset, namespace, ctx, name, opts) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockKubeAPIerMockRecorder) Get(clientset, namespace, ctx, name, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeAPIer)(nil).Get), clientset, namespace, ctx, name, opts) +} + +// Create mocks base method +func (m *MockKubeAPIer) Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *v1.Pod, opts v10.CreateOptions) (*v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", clientset, namespace, ctx, pod, opts) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Create indicates an expected call of Create +func (mr *MockKubeAPIerMockRecorder) Create(clientset, namespace, ctx, pod, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeAPIer)(nil).Create), clientset, namespace, ctx, pod, opts) +} + +// List mocks base method +func (m *MockKubeAPIer) List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts v10.ListOptions) (*v1.PodList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", clientset, namespace, ctx, opts) + ret0, _ := ret[0].(*v1.PodList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List +func (mr *MockKubeAPIerMockRecorder) List(clientset, namespace, ctx, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeAPIer)(nil).List), clientset, namespace, ctx, opts) +} + +// Watch mocks base method +func (m *MockKubeAPIer) Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts v10.ListOptions) (watch.Interface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Watch", clientset, namespace, ctx, opts) + ret0, _ := ret[0].(watch.Interface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Watch indicates an expected call of Watch +func (mr *MockKubeAPIerMockRecorder) Watch(clientset, namespace, ctx, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockKubeAPIer)(nil).Watch), clientset, namespace, ctx, opts) +} + +// Delete mocks base method +func (m *MockKubeAPIer) Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts v10.DeleteOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", clientset, namespace, ctx, name, opts) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete +func (mr *MockKubeAPIerMockRecorder) Delete(clientset, namespace, ctx, name, opts interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeAPIer)(nil).Delete), clientset, namespace, ctx, name, opts) +} + +// SubResource mocks base method +func (m *MockKubeAPIer) SubResource(clientset *kubernetes.Clientset, podName, podNamespace string) *rest.Request { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubResource", clientset, podName, podNamespace) + ret0, _ := ret[0].(*rest.Request) + return ret0 +} + +// SubResource indicates an expected call of SubResource +func (mr *MockKubeAPIerMockRecorder) SubResource(clientset, podName, podNamespace interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockKubeAPIer)(nil).SubResource), clientset, podName, podNamespace) +} + +// InClusterConfig mocks base method +func (m *MockKubeAPIer) InClusterConfig() (*rest.Config, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InClusterConfig") + ret0, _ := ret[0].(*rest.Config) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InClusterConfig indicates an expected call of InClusterConfig +func (mr *MockKubeAPIerMockRecorder) InClusterConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InClusterConfig", reflect.TypeOf((*MockKubeAPIer)(nil).InClusterConfig)) +} + +// NewDefaultClientConfigLoadingRules mocks base method +func (m *MockKubeAPIer) NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewDefaultClientConfigLoadingRules") + ret0, _ := ret[0].(*clientcmd.ClientConfigLoadingRules) + return ret0 +} + +// NewDefaultClientConfigLoadingRules indicates an expected call of NewDefaultClientConfigLoadingRules +func (mr *MockKubeAPIerMockRecorder) NewDefaultClientConfigLoadingRules() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewDefaultClientConfigLoadingRules", reflect.TypeOf((*MockKubeAPIer)(nil).NewDefaultClientConfigLoadingRules)) +} + +// BuildConfigFromFlags mocks base method +func (m *MockKubeAPIer) BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*rest.Config, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BuildConfigFromFlags", masterUrl, kubeconfigPath) + ret0, _ := ret[0].(*rest.Config) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BuildConfigFromFlags indicates an expected call of BuildConfigFromFlags +func (mr *MockKubeAPIerMockRecorder) BuildConfigFromFlags(masterUrl, kubeconfigPath interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildConfigFromFlags", reflect.TypeOf((*MockKubeAPIer)(nil).BuildConfigFromFlags), masterUrl, kubeconfigPath) +} + +// NewClientConfigFromBytes mocks base method +func (m *MockKubeAPIer) NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewClientConfigFromBytes", configBytes) + ret0, _ := ret[0].(clientcmd.ClientConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewClientConfigFromBytes indicates an expected call of NewClientConfigFromBytes +func (mr *MockKubeAPIerMockRecorder) NewClientConfigFromBytes(configBytes interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewClientConfigFromBytes", reflect.TypeOf((*MockKubeAPIer)(nil).NewClientConfigFromBytes), configBytes) +} + +// NewSPDYExecutor mocks base method +func (m *MockKubeAPIer) NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewSPDYExecutor", config, method, url) + ret0, _ := ret[0].(remotecommand.Executor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewSPDYExecutor indicates an expected call of NewSPDYExecutor +func (mr *MockKubeAPIerMockRecorder) NewSPDYExecutor(config, method, url interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSPDYExecutor", reflect.TypeOf((*MockKubeAPIer)(nil).NewSPDYExecutor), config, method, url) +} + +// StreamWithContext mocks base method +func (m *MockKubeAPIer) StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StreamWithContext", exec, ctx, options) + ret0, _ := ret[0].(error) + return ret0 +} + +// StreamWithContext indicates an expected call of StreamWithContext +func (mr *MockKubeAPIerMockRecorder) StreamWithContext(exec, ctx, options interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWithContext", reflect.TypeOf((*MockKubeAPIer)(nil).StreamWithContext), exec, ctx, options) +} + +// UntilWithSync mocks base method +func (m *MockKubeAPIer) UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch0.PreconditionFunc, conditions ...watch0.ConditionFunc) (*watch.Event, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, lw, objType, precondition} + for _, a := range conditions { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UntilWithSync", varargs...) + ret0, _ := ret[0].(*watch.Event) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UntilWithSync indicates an expected call of UntilWithSync +func (mr *MockKubeAPIerMockRecorder) UntilWithSync(ctx, lw, objType, precondition interface{}, conditions ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, lw, objType, precondition}, conditions...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntilWithSync", reflect.TypeOf((*MockKubeAPIer)(nil).UntilWithSync), varargs...) +} + +// NewFakeNeverRateLimiter mocks base method +func (m *MockKubeAPIer) NewFakeNeverRateLimiter() flowcontrol.RateLimiter { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewFakeNeverRateLimiter") + ret0, _ := ret[0].(flowcontrol.RateLimiter) + return ret0 +} + +// NewFakeNeverRateLimiter indicates an expected call of NewFakeNeverRateLimiter +func (mr *MockKubeAPIerMockRecorder) NewFakeNeverRateLimiter() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFakeNeverRateLimiter", reflect.TypeOf((*MockKubeAPIer)(nil).NewFakeNeverRateLimiter)) +} + +// NewFakeAlwaysRateLimiter mocks base method +func (m *MockKubeAPIer) NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewFakeAlwaysRateLimiter") + ret0, _ := ret[0].(flowcontrol.RateLimiter) + return ret0 +} + +// NewFakeAlwaysRateLimiter indicates an expected call of NewFakeAlwaysRateLimiter +func (mr *MockKubeAPIerMockRecorder) NewFakeAlwaysRateLimiter() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFakeAlwaysRateLimiter", reflect.TypeOf((*MockKubeAPIer)(nil).NewFakeAlwaysRateLimiter)) +} From 8c8739a47710da89d9288220074f200cafec3aab Mon Sep 17 00:00:00 2001 From: Aaron Hetherington Date: Fri, 9 Feb 2024 15:39:59 +0000 Subject: [PATCH 11/14] Fix linting errors and add comments --- pkg/workceptor/kubernetes.go | 67 +++++----- pkg/workceptor/kubernetes_test.go | 95 ++++++++------- pkg/workceptor/mock_workceptor/kubernetes.go | 122 +++++++++---------- 3 files changed, 145 insertions(+), 139 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index 35b5847ad..7429e4052 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -66,29 +66,28 @@ type KubeExtraData struct { } type KubeAPIer interface { - NewNotFound(qualifiedResource schema.GroupResource, name string) *apierrors.StatusError - OneTermEqualSelector(k string, v string) fields.Selector - NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) - GetLogs(clientset *kubernetes.Clientset, namespace string, name string, opts *corev1.PodLogOptions) *rest.Request - Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) - Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) - List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) - Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.DeleteOptions) error - SubResource(clientset *kubernetes.Clientset, podName string, podNamespace string) *rest.Request + NewNotFound(schema.GroupResource, string) *apierrors.StatusError + OneTermEqualSelector(string, string) fields.Selector + NewForConfig(*rest.Config) (*kubernetes.Clientset, error) + GetLogs(*kubernetes.Clientset, string, string, *corev1.PodLogOptions) *rest.Request + Get(context.Context, *kubernetes.Clientset, string, string, metav1.GetOptions) (*corev1.Pod, error) + Create(context.Context, *kubernetes.Clientset, string, *corev1.Pod, metav1.CreateOptions) (*corev1.Pod, error) + List(context.Context, *kubernetes.Clientset, string, metav1.ListOptions) (*corev1.PodList, error) + Watch(context.Context, *kubernetes.Clientset, string, metav1.ListOptions) (watch.Interface, error) + Delete(context.Context, *kubernetes.Clientset, string, string, metav1.DeleteOptions) error + SubResource(*kubernetes.Clientset, string, string) *rest.Request InClusterConfig() (*rest.Config, error) NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules - BuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*rest.Config, error) - NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) - NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) - StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error - UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch2.PreconditionFunc, conditions ...watch2.ConditionFunc) (*watch.Event, error) + BuildConfigFromFlags(string, string) (*rest.Config, error) + NewClientConfigFromBytes([]byte) (clientcmd.ClientConfig, error) + NewSPDYExecutor(*rest.Config, string, *url.URL) (remotecommand.Executor, error) + StreamWithContext(context.Context, remotecommand.Executor, remotecommand.StreamOptions) error + UntilWithSync(context.Context, cache.ListerWatcher, runtime.Object, watch2.PreconditionFunc, ...watch2.ConditionFunc) (*watch.Event, error) NewFakeNeverRateLimiter() flowcontrol.RateLimiter NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter } -type KubeAPIWrapper struct { -} +type KubeAPIWrapper struct{} func (ku KubeAPIWrapper) NewNotFound(qualifiedResource schema.GroupResource, name string) *apierrors.StatusError { return apierrors.NewNotFound(qualifiedResource, name) @@ -106,23 +105,23 @@ func (ku KubeAPIWrapper) GetLogs(clientset *kubernetes.Clientset, namespace stri return clientset.CoreV1().Pods(namespace).GetLogs(name, opts) } -func (ku KubeAPIWrapper) Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) { +func (ku KubeAPIWrapper) Get(ctx context.Context, clientset *kubernetes.Clientset, namespace string, name string, opts metav1.GetOptions) (*corev1.Pod, error) { return clientset.CoreV1().Pods(namespace).Get(ctx, name, opts) } -func (ku KubeAPIWrapper) Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) { +func (ku KubeAPIWrapper) Create(ctx context.Context, clientset *kubernetes.Clientset, namespace string, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) { return clientset.CoreV1().Pods(namespace).Create(ctx, pod, opts) } -func (ku KubeAPIWrapper) List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) { +func (ku KubeAPIWrapper) List(ctx context.Context, clientset *kubernetes.Clientset, namespace string, opts metav1.ListOptions) (*corev1.PodList, error) { return clientset.CoreV1().Pods(namespace).List(ctx, opts) } -func (ku KubeAPIWrapper) Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { +func (ku KubeAPIWrapper) Watch(ctx context.Context, clientset *kubernetes.Clientset, namespace string, opts metav1.ListOptions) (watch.Interface, error) { return clientset.CoreV1().Pods(namespace).Watch(ctx, opts) } -func (ku KubeAPIWrapper) Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.DeleteOptions) error { +func (ku KubeAPIWrapper) Delete(ctx context.Context, clientset *kubernetes.Clientset, namespace string, name string, opts metav1.DeleteOptions) error { return clientset.CoreV1().Pods(namespace).Delete(ctx, name, opts) } @@ -138,8 +137,8 @@ func (ku KubeAPIWrapper) NewDefaultClientConfigLoadingRules() *clientcmd.ClientC return clientcmd.NewDefaultClientConfigLoadingRules() } -func (ku KubeAPIWrapper) BuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*rest.Config, error) { - return clientcmd.BuildConfigFromFlags(masterUrl, kubeconfigPath) +func (ku KubeAPIWrapper) BuildConfigFromFlags(masterURL string, kubeconfigPath string) (*rest.Config, error) { + return clientcmd.BuildConfigFromFlags(masterURL, kubeconfigPath) } func (ku KubeAPIWrapper) NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) { @@ -150,7 +149,7 @@ func (ku KubeAPIWrapper) NewSPDYExecutor(config *rest.Config, method string, url return remotecommand.NewSPDYExecutor(config, method, url) } -func (ku KubeAPIWrapper) StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error { +func (ku KubeAPIWrapper) StreamWithContext(ctx context.Context, exec remotecommand.Executor, options remotecommand.StreamOptions) error { return exec.StreamWithContext(ctx, options) } @@ -166,6 +165,8 @@ func (ku KubeAPIWrapper) NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter { return flowcontrol.NewFakeAlwaysRateLimiter() } +// KubeAPIWrapperInstance is a package level var that wraps all required kubernetes API calls. +// It is instantiated in the NewkubeWorker function and available throughout the package. var KubeAPIWrapperInstance KubeAPIer // ErrPodCompleted is returned when pod has already completed before we could attach. @@ -309,7 +310,7 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout // get pod, with retry for retries := 5; retries > 0; retries-- { - kw.pod, err = KubeAPIWrapperInstance.Get(kw.clientset, podNamespace, kw.GetContext(), podName, metav1.GetOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Get(kw.GetContext(), kw.clientset, podNamespace, podName, metav1.GetOptions{}) if err == nil { break } @@ -485,7 +486,7 @@ func (kw *kubeUnit) createPod(env map[string]string) error { } // get pod and store to kw.pod - kw.pod, err = KubeAPIWrapperInstance.Create(kw.clientset, ked.KubeNamespace, kw.GetContext(), pod, metav1.CreateOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Create(kw.GetContext(), kw.clientset, ked.KubeNamespace, pod, metav1.CreateOptions{}) if err != nil { return err } @@ -509,12 +510,12 @@ func (kw *kubeUnit) createPod(env map[string]string) error { ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return KubeAPIWrapperInstance.List(kw.clientset, ked.KubeNamespace, kw.GetContext(), options) + return KubeAPIWrapperInstance.List(kw.GetContext(), kw.clientset, ked.KubeNamespace, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return KubeAPIWrapperInstance.Watch(kw.clientset, ked.KubeNamespace, kw.GetContext(), options) + return KubeAPIWrapperInstance.Watch(kw.GetContext(), kw.clientset, ked.KubeNamespace, options) }, } @@ -640,7 +641,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { default: } - kw.pod, err = KubeAPIWrapperInstance.Get(kw.clientset, podNamespace, kw.GetContext(), podName, metav1.GetOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Get(kw.GetContext(), kw.clientset, podNamespace, podName, metav1.GetOptions{}) if err == nil { break } @@ -772,7 +773,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { var err error for retries := 5; retries > 0; retries-- { - err = KubeAPIWrapperInstance.StreamWithContext(exec, kw.GetContext(), remotecommand.StreamOptions{ + err = KubeAPIWrapperInstance.StreamWithContext(kw.GetContext(), exec, remotecommand.StreamOptions{ Stdin: stdin, Tty: false, }) @@ -1357,7 +1358,7 @@ func (kw *kubeUnit) Restart() error { if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } else { - err := KubeAPIWrapperInstance.Delete(kw.clientset, ked.KubeNamespace, context.Background(), ked.PodName, metav1.DeleteOptions{}) + err := KubeAPIWrapperInstance.Delete(kw.GetContext(), kw.clientset, ked.KubeNamespace, ked.PodName, metav1.DeleteOptions{}) if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } @@ -1382,7 +1383,7 @@ func (kw *kubeUnit) Cancel() error { kw.CancelContext() kw.UpdateBasicStatus(WorkStateCanceled, "Canceled", -1) if kw.pod != nil { - err := KubeAPIWrapperInstance.Delete(kw.clientset, kw.pod.Namespace, context.Background(), kw.pod.Name, metav1.DeleteOptions{}) + err := KubeAPIWrapperInstance.Delete(kw.GetContext(), kw.clientset, kw.pod.Namespace, kw.pod.Name, metav1.DeleteOptions{}) if err != nil { kw.GetWorkceptor().nc.GetLogger().Error("Error deleting pod %s: %s", kw.pod.Name, err) } diff --git a/pkg/workceptor/kubernetes_test.go b/pkg/workceptor/kubernetes_test.go index f7eb4dea2..ee29659d1 100644 --- a/pkg/workceptor/kubernetes_test.go +++ b/pkg/workceptor/kubernetes_test.go @@ -138,7 +138,7 @@ type hasTerm struct { func (h *hasTerm) DeepCopySelector() fields.Selector { return h } func (h *hasTerm) Empty() bool { return true } -func (h *hasTerm) Matches(ls fields.Fields) bool { return true } +func (h *hasTerm) Matches(_ fields.Fields) bool { return true } func (h *hasTerm) Requirements() fields.Requirements { return []fields.Requirement{{ Field: h.field, @@ -146,18 +146,17 @@ func (h *hasTerm) Requirements() fields.Requirements { Value: h.value, }} } -func (h *hasTerm) RequiresExactMatch(field string) (value string, found bool) { return "", true } -func (h *hasTerm) String() string { return "Test" } -func (h *hasTerm) Transform(fn fields.TransformFunc) (fields.Selector, error) { return h, nil } +func (h *hasTerm) RequiresExactMatch(_ string) (value string, found bool) { return "", true } +func (h *hasTerm) String() string { return "Test" } +func (h *hasTerm) Transform(_ fields.TransformFunc) (fields.Selector, error) { return h, nil } -type ex struct { -} +type ex struct{} -func (e *ex) Stream(options remotecommand.StreamOptions) error { +func (e *ex) Stream(_ remotecommand.StreamOptions) error { return nil } -func (e *ex) StreamWithContext(ctx context.Context, options remotecommand.StreamOptions) error { +func (e *ex) StreamWithContext(_ context.Context, _ remotecommand.StreamOptions) error { return nil } @@ -165,48 +164,54 @@ func TestKubeStart(t *testing.T) { ku, mockbwu, mockNet, w, mockKubeAPI, ctx := createKubernetesTestSetup(t) startTestCases := []struct { - name string + name string + expectedCalls func() }{ - {name: "test1"}, + { + name: "test1", + expectedCalls: func() { + mockbwu.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + config := rest.Config{} + mockKubeAPI.EXPECT().InClusterConfig().Return(&config, nil) + mockbwu.EXPECT().GetWorkceptor().Return(w).AnyTimes() + logger := logger.NewReceptorLogger("") + mockNet.EXPECT().GetLogger().Return(logger).AnyTimes() + clientset := kubernetes.Clientset{} + mockKubeAPI.EXPECT().NewForConfig(gomock.Any()).Return(&clientset, nil) + mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() + lock := &sync.RWMutex{} + mockbwu.EXPECT().GetStatusLock().Return(lock).AnyTimes() + kubeExtraData := workceptor.KubeExtraData{} + status := workceptor.StatusFileData{ExtraData: &kubeExtraData} + mockbwu.EXPECT().GetStatusWithoutExtraData().Return(&status).AnyTimes() + mockbwu.EXPECT().GetStatusCopy().Return(status).AnyTimes() + mockbwu.EXPECT().GetContext().Return(ctx).AnyTimes() + pod := corev1.Pod{TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{Name: "Test Name"}, Spec: corev1.PodSpec{}, Status: corev1.PodStatus{}} + + mockKubeAPI.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() + mockbwu.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() + + field := hasTerm{} + mockKubeAPI.EXPECT().OneTermEqualSelector(gomock.Any(), gomock.Any()).Return(&field).AnyTimes() + ev := watch.Event{Object: &pod} + mockKubeAPI.EXPECT().UntilWithSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&ev, nil).AnyTimes() + apierr := apierrors.StatusError{} + mockKubeAPI.EXPECT().NewNotFound(gomock.Any(), gomock.Any()).Return(&apierr).AnyTimes() + mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() + + c := rest.RESTClient{} + req := rest.NewRequest(&c) + mockKubeAPI.EXPECT().SubResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(req).AnyTimes() + exec := ex{} + mockKubeAPI.EXPECT().NewSPDYExecutor(gomock.Any(), gomock.Any(), gomock.Any()).Return(&exec, nil).AnyTimes() + mockbwu.EXPECT().UnitDir().Return("TestDir").AnyTimes() + }, + }, } for _, testCase := range startTestCases { t.Run(testCase.name, func(t *testing.T) { - mockbwu.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - config := rest.Config{} - mockKubeAPI.EXPECT().InClusterConfig().Return(&config, nil) - mockbwu.EXPECT().GetWorkceptor().Return(w).AnyTimes() - logger := logger.NewReceptorLogger("") - mockNet.EXPECT().GetLogger().Return(logger).AnyTimes() - clientset := kubernetes.Clientset{} - mockKubeAPI.EXPECT().NewForConfig(gomock.Any()).Return(&clientset, nil) - mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() - lock := &sync.RWMutex{} - mockbwu.EXPECT().GetStatusLock().Return(lock).AnyTimes() - kubeExtraData := workceptor.KubeExtraData{} - status := workceptor.StatusFileData{ExtraData: &kubeExtraData} - mockbwu.EXPECT().GetStatusWithoutExtraData().Return(&status).AnyTimes() - mockbwu.EXPECT().GetStatusCopy().Return(status).AnyTimes() - mockbwu.EXPECT().GetContext().Return(ctx).AnyTimes() - pod := corev1.Pod{metav1.TypeMeta{}, metav1.ObjectMeta{Name: "Test Name"}, corev1.PodSpec{}, corev1.PodStatus{}} - - mockKubeAPI.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() - mockbwu.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() - - field := hasTerm{} - mockKubeAPI.EXPECT().OneTermEqualSelector(gomock.Any(), gomock.Any()).Return(&field).AnyTimes() - ev := watch.Event{Object: &pod} - mockKubeAPI.EXPECT().UntilWithSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&ev, nil).AnyTimes() - apierr := apierrors.StatusError{} - mockKubeAPI.EXPECT().NewNotFound(gomock.Any(), gomock.Any()).Return(&apierr).AnyTimes() - mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() - - c := rest.RESTClient{} - req := rest.NewRequest(&c) - mockKubeAPI.EXPECT().SubResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(req).AnyTimes() - exec := ex{} - mockKubeAPI.EXPECT().NewSPDYExecutor(gomock.Any(), gomock.Any(), gomock.Any()).Return(&exec, nil).AnyTimes() - mockbwu.EXPECT().UnitDir().Return("TestDir").AnyTimes() + testCase.expectedCalls() err := ku.Start() if err != nil { diff --git a/pkg/workceptor/mock_workceptor/kubernetes.go b/pkg/workceptor/mock_workceptor/kubernetes.go index 82c93d6af..f72746955 100644 --- a/pkg/workceptor/mock_workceptor/kubernetes.go +++ b/pkg/workceptor/mock_workceptor/kubernetes.go @@ -49,148 +49,148 @@ func (m *MockKubeAPIer) EXPECT() *MockKubeAPIerMockRecorder { } // NewNotFound mocks base method -func (m *MockKubeAPIer) NewNotFound(qualifiedResource schema.GroupResource, name string) *errors.StatusError { +func (m *MockKubeAPIer) NewNotFound(arg0 schema.GroupResource, arg1 string) *errors.StatusError { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewNotFound", qualifiedResource, name) + ret := m.ctrl.Call(m, "NewNotFound", arg0, arg1) ret0, _ := ret[0].(*errors.StatusError) return ret0 } // NewNotFound indicates an expected call of NewNotFound -func (mr *MockKubeAPIerMockRecorder) NewNotFound(qualifiedResource, name interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewNotFound(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewNotFound", reflect.TypeOf((*MockKubeAPIer)(nil).NewNotFound), qualifiedResource, name) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewNotFound", reflect.TypeOf((*MockKubeAPIer)(nil).NewNotFound), arg0, arg1) } // OneTermEqualSelector mocks base method -func (m *MockKubeAPIer) OneTermEqualSelector(k, v string) fields.Selector { +func (m *MockKubeAPIer) OneTermEqualSelector(arg0, arg1 string) fields.Selector { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OneTermEqualSelector", k, v) + ret := m.ctrl.Call(m, "OneTermEqualSelector", arg0, arg1) ret0, _ := ret[0].(fields.Selector) return ret0 } // OneTermEqualSelector indicates an expected call of OneTermEqualSelector -func (mr *MockKubeAPIerMockRecorder) OneTermEqualSelector(k, v interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) OneTermEqualSelector(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OneTermEqualSelector", reflect.TypeOf((*MockKubeAPIer)(nil).OneTermEqualSelector), k, v) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OneTermEqualSelector", reflect.TypeOf((*MockKubeAPIer)(nil).OneTermEqualSelector), arg0, arg1) } // NewForConfig mocks base method -func (m *MockKubeAPIer) NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) { +func (m *MockKubeAPIer) NewForConfig(arg0 *rest.Config) (*kubernetes.Clientset, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewForConfig", c) + ret := m.ctrl.Call(m, "NewForConfig", arg0) ret0, _ := ret[0].(*kubernetes.Clientset) ret1, _ := ret[1].(error) return ret0, ret1 } // NewForConfig indicates an expected call of NewForConfig -func (mr *MockKubeAPIerMockRecorder) NewForConfig(c interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewForConfig(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewForConfig", reflect.TypeOf((*MockKubeAPIer)(nil).NewForConfig), c) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewForConfig", reflect.TypeOf((*MockKubeAPIer)(nil).NewForConfig), arg0) } // GetLogs mocks base method -func (m *MockKubeAPIer) GetLogs(clientset *kubernetes.Clientset, namespace, name string, opts *v1.PodLogOptions) *rest.Request { +func (m *MockKubeAPIer) GetLogs(arg0 *kubernetes.Clientset, arg1, arg2 string, arg3 *v1.PodLogOptions) *rest.Request { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLogs", clientset, namespace, name, opts) + ret := m.ctrl.Call(m, "GetLogs", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*rest.Request) return ret0 } // GetLogs indicates an expected call of GetLogs -func (mr *MockKubeAPIerMockRecorder) GetLogs(clientset, namespace, name, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) GetLogs(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockKubeAPIer)(nil).GetLogs), clientset, namespace, name, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockKubeAPIer)(nil).GetLogs), arg0, arg1, arg2, arg3) } // Get mocks base method -func (m *MockKubeAPIer) Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts v10.GetOptions) (*v1.Pod, error) { +func (m *MockKubeAPIer) Get(arg0 context.Context, arg1 *kubernetes.Clientset, arg2, arg3 string, arg4 v10.GetOptions) (*v1.Pod, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", clientset, namespace, ctx, name, opts) + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(*v1.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get -func (mr *MockKubeAPIerMockRecorder) Get(clientset, namespace, ctx, name, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeAPIer)(nil).Get), clientset, namespace, ctx, name, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeAPIer)(nil).Get), arg0, arg1, arg2, arg3, arg4) } // Create mocks base method -func (m *MockKubeAPIer) Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *v1.Pod, opts v10.CreateOptions) (*v1.Pod, error) { +func (m *MockKubeAPIer) Create(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 *v1.Pod, arg4 v10.CreateOptions) (*v1.Pod, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Create", clientset, namespace, ctx, pod, opts) + ret := m.ctrl.Call(m, "Create", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(*v1.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // Create indicates an expected call of Create -func (mr *MockKubeAPIerMockRecorder) Create(clientset, namespace, ctx, pod, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Create(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeAPIer)(nil).Create), clientset, namespace, ctx, pod, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeAPIer)(nil).Create), arg0, arg1, arg2, arg3, arg4) } // List mocks base method -func (m *MockKubeAPIer) List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts v10.ListOptions) (*v1.PodList, error) { +func (m *MockKubeAPIer) List(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 v10.ListOptions) (*v1.PodList, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List", clientset, namespace, ctx, opts) + ret := m.ctrl.Call(m, "List", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1.PodList) ret1, _ := ret[1].(error) return ret0, ret1 } // List indicates an expected call of List -func (mr *MockKubeAPIerMockRecorder) List(clientset, namespace, ctx, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) List(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeAPIer)(nil).List), clientset, namespace, ctx, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeAPIer)(nil).List), arg0, arg1, arg2, arg3) } // Watch mocks base method -func (m *MockKubeAPIer) Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts v10.ListOptions) (watch.Interface, error) { +func (m *MockKubeAPIer) Watch(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 v10.ListOptions) (watch.Interface, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Watch", clientset, namespace, ctx, opts) + ret := m.ctrl.Call(m, "Watch", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(watch.Interface) ret1, _ := ret[1].(error) return ret0, ret1 } // Watch indicates an expected call of Watch -func (mr *MockKubeAPIerMockRecorder) Watch(clientset, namespace, ctx, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Watch(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockKubeAPIer)(nil).Watch), clientset, namespace, ctx, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockKubeAPIer)(nil).Watch), arg0, arg1, arg2, arg3) } // Delete mocks base method -func (m *MockKubeAPIer) Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts v10.DeleteOptions) error { +func (m *MockKubeAPIer) Delete(arg0 context.Context, arg1 *kubernetes.Clientset, arg2, arg3 string, arg4 v10.DeleteOptions) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", clientset, namespace, ctx, name, opts) + ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // Delete indicates an expected call of Delete -func (mr *MockKubeAPIerMockRecorder) Delete(clientset, namespace, ctx, name, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Delete(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeAPIer)(nil).Delete), clientset, namespace, ctx, name, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeAPIer)(nil).Delete), arg0, arg1, arg2, arg3, arg4) } // SubResource mocks base method -func (m *MockKubeAPIer) SubResource(clientset *kubernetes.Clientset, podName, podNamespace string) *rest.Request { +func (m *MockKubeAPIer) SubResource(arg0 *kubernetes.Clientset, arg1, arg2 string) *rest.Request { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubResource", clientset, podName, podNamespace) + ret := m.ctrl.Call(m, "SubResource", arg0, arg1, arg2) ret0, _ := ret[0].(*rest.Request) return ret0 } // SubResource indicates an expected call of SubResource -func (mr *MockKubeAPIerMockRecorder) SubResource(clientset, podName, podNamespace interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) SubResource(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockKubeAPIer)(nil).SubResource), clientset, podName, podNamespace) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockKubeAPIer)(nil).SubResource), arg0, arg1, arg2) } // InClusterConfig mocks base method @@ -223,69 +223,69 @@ func (mr *MockKubeAPIerMockRecorder) NewDefaultClientConfigLoadingRules() *gomoc } // BuildConfigFromFlags mocks base method -func (m *MockKubeAPIer) BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*rest.Config, error) { +func (m *MockKubeAPIer) BuildConfigFromFlags(arg0, arg1 string) (*rest.Config, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BuildConfigFromFlags", masterUrl, kubeconfigPath) + ret := m.ctrl.Call(m, "BuildConfigFromFlags", arg0, arg1) ret0, _ := ret[0].(*rest.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // BuildConfigFromFlags indicates an expected call of BuildConfigFromFlags -func (mr *MockKubeAPIerMockRecorder) BuildConfigFromFlags(masterUrl, kubeconfigPath interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) BuildConfigFromFlags(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildConfigFromFlags", reflect.TypeOf((*MockKubeAPIer)(nil).BuildConfigFromFlags), masterUrl, kubeconfigPath) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildConfigFromFlags", reflect.TypeOf((*MockKubeAPIer)(nil).BuildConfigFromFlags), arg0, arg1) } // NewClientConfigFromBytes mocks base method -func (m *MockKubeAPIer) NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) { +func (m *MockKubeAPIer) NewClientConfigFromBytes(arg0 []byte) (clientcmd.ClientConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewClientConfigFromBytes", configBytes) + ret := m.ctrl.Call(m, "NewClientConfigFromBytes", arg0) ret0, _ := ret[0].(clientcmd.ClientConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // NewClientConfigFromBytes indicates an expected call of NewClientConfigFromBytes -func (mr *MockKubeAPIerMockRecorder) NewClientConfigFromBytes(configBytes interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewClientConfigFromBytes(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewClientConfigFromBytes", reflect.TypeOf((*MockKubeAPIer)(nil).NewClientConfigFromBytes), configBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewClientConfigFromBytes", reflect.TypeOf((*MockKubeAPIer)(nil).NewClientConfigFromBytes), arg0) } // NewSPDYExecutor mocks base method -func (m *MockKubeAPIer) NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) { +func (m *MockKubeAPIer) NewSPDYExecutor(arg0 *rest.Config, arg1 string, arg2 *url.URL) (remotecommand.Executor, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewSPDYExecutor", config, method, url) + ret := m.ctrl.Call(m, "NewSPDYExecutor", arg0, arg1, arg2) ret0, _ := ret[0].(remotecommand.Executor) ret1, _ := ret[1].(error) return ret0, ret1 } // NewSPDYExecutor indicates an expected call of NewSPDYExecutor -func (mr *MockKubeAPIerMockRecorder) NewSPDYExecutor(config, method, url interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewSPDYExecutor(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSPDYExecutor", reflect.TypeOf((*MockKubeAPIer)(nil).NewSPDYExecutor), config, method, url) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSPDYExecutor", reflect.TypeOf((*MockKubeAPIer)(nil).NewSPDYExecutor), arg0, arg1, arg2) } // StreamWithContext mocks base method -func (m *MockKubeAPIer) StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error { +func (m *MockKubeAPIer) StreamWithContext(arg0 context.Context, arg1 remotecommand.Executor, arg2 remotecommand.StreamOptions) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StreamWithContext", exec, ctx, options) + ret := m.ctrl.Call(m, "StreamWithContext", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // StreamWithContext indicates an expected call of StreamWithContext -func (mr *MockKubeAPIerMockRecorder) StreamWithContext(exec, ctx, options interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) StreamWithContext(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWithContext", reflect.TypeOf((*MockKubeAPIer)(nil).StreamWithContext), exec, ctx, options) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWithContext", reflect.TypeOf((*MockKubeAPIer)(nil).StreamWithContext), arg0, arg1, arg2) } // UntilWithSync mocks base method -func (m *MockKubeAPIer) UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch0.PreconditionFunc, conditions ...watch0.ConditionFunc) (*watch.Event, error) { +func (m *MockKubeAPIer) UntilWithSync(arg0 context.Context, arg1 cache.ListerWatcher, arg2 runtime.Object, arg3 watch0.PreconditionFunc, arg4 ...watch0.ConditionFunc) (*watch.Event, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, lw, objType, precondition} - for _, a := range conditions { + varargs := []interface{}{arg0, arg1, arg2, arg3} + for _, a := range arg4 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "UntilWithSync", varargs...) @@ -295,9 +295,9 @@ func (m *MockKubeAPIer) UntilWithSync(ctx context.Context, lw cache.ListerWatche } // UntilWithSync indicates an expected call of UntilWithSync -func (mr *MockKubeAPIerMockRecorder) UntilWithSync(ctx, lw, objType, precondition interface{}, conditions ...interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) UntilWithSync(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, lw, objType, precondition}, conditions...) + varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntilWithSync", reflect.TypeOf((*MockKubeAPIer)(nil).UntilWithSync), varargs...) } From 148bd301c299ea3627320e7aba500ace16887e17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 00:40:22 +0000 Subject: [PATCH 12/14] Bump k8s.io/apimachinery from 0.29.1 to 0.29.2 Bumps [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) from 0.29.1 to 0.29.2. - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.29.1...v0.29.2) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 692d8c711..89196d6b8 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( golang.org/x/net v0.21.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.1 - k8s.io/apimachinery v0.29.1 + k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.1 ) diff --git a/go.sum b/go.sum index 0c93334f3..812d75ff8 100644 --- a/go.sum +++ b/go.sum @@ -204,8 +204,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= From 066763674fb9248cd683d5702427096edee6f8eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 11:16:30 +0000 Subject: [PATCH 13/14] Bump k8s.io/api from 0.29.1 to 0.29.2 Bumps [k8s.io/api](https://github.com/kubernetes/api) from 0.29.1 to 0.29.2. - [Commits](https://github.com/kubernetes/api/compare/v0.29.1...v0.29.2) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 89196d6b8..3e1b3249c 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( go.uber.org/mock v0.4.0 golang.org/x/net v0.21.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.29.1 + k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.1 ) diff --git a/go.sum b/go.sum index 812d75ff8..734f8d383 100644 --- a/go.sum +++ b/go.sum @@ -202,8 +202,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= From 692b96832a6139ea9904519c1cbcc8a4c7b63dfc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Feb 2024 11:39:33 +0000 Subject: [PATCH 14/14] Bump k8s.io/client-go from 0.29.1 to 0.29.2 Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.29.1 to 0.29.2. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.1...v0.29.2) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3e1b3249c..3e2f47811 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 - k8s.io/client-go v0.29.1 + k8s.io/client-go v0.29.2 ) require ( diff --git a/go.sum b/go.sum index 734f8d383..6590a693c 100644 --- a/go.sum +++ b/go.sum @@ -206,8 +206,8 @@ k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=