From 3a0e804877b9e1076bcc2cebf289f8e9cff13e41 Mon Sep 17 00:00:00 2001 From: Sairaman K Date: Tue, 30 Nov 2021 09:53:12 +0530 Subject: [PATCH] Updating API version of admissionregistration.k8s.io (#1401) * Added webhook timeout variable * made timeout variable to be private * Fixed chart version * Bumped admission & admissionregistration ver to v1 * Bumped operator version to v1beta2-1.3.0-3.1.1 * Fixed typo in README.md * Fixed typos in README.md Co-authored-by: Sairaman Kumar --- Makefile | 2 +- README.md | 3 + charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 2 +- charts/spark-operator-chart/README.md.gotmpl | 2 +- examples/spark-operator-with-metrics.yaml | 8 +- examples/spark-operator-with-webhook.yaml | 16 ++-- .../spark-operator.yaml | 8 +- .../spark-operator-patch.yaml | 2 +- .../spark-operator-webhook.yaml | 8 +- pkg/webhook/scheme.go | 4 +- pkg/webhook/webhook.go | 84 +++++++++---------- pkg/webhook/webhook_test.go | 10 +-- test/e2e/README.md | 2 +- 14 files changed, 78 insertions(+), 77 deletions(-) diff --git a/Makefile b/Makefile index 7e7d7e176..cca336495 100644 --- a/Makefile +++ b/Makefile @@ -64,7 +64,7 @@ test: clean it-test: clean all @echo "running unit tests" - go test -v ./test/e2e/ --kubeconfig "$HOME/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 + go test -v ./test/e2e/ --kubeconfig "$HOME/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 vet: @echo "running go vet" diff --git a/README.md b/README.md index 00e31699a..a4c8cbb74 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,8 @@ Customization of Spark pods, e.g., mounting arbitrary volumes and setting pod af * Version >= 1.13 of Kubernetes to use the [`subresource` support for CustomResourceDefinitions](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#subresources), which became beta in 1.13 and is enabled by default in 1.13 and higher. +* Version >= 1.16 of Kubernetes to use the `MutatingWebhook` and `ValidatingWebhook` of `apiVersion: admissionregistration.k8s.io/v1`. + ## Installation The easiest way to install the Kubernetes Operator for Apache Spark is to use the Helm [chart](charts/spark-operator-chart/). @@ -47,6 +49,7 @@ The following table lists the most recent few versions of the operator. | Operator Version | API Version | Kubernetes Version | Base Spark Version | Operator Image Tag | | ------------- | ------------- | ------------- | ------------- | ------------- | | `latest` (master HEAD) | `v1beta2` | 1.13+ | `3.0.0` | `latest` | +| `v1beta2-1.3.0-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` | `v1beta2-1.3.0-3.1.1` | | `v1beta2-1.2.3-3.1.1` | `v1beta2` | 1.13+ | `3.1.1` | `v1beta2-1.2.3-3.1.1` | | `v1beta2-1.2.0-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` | `v1beta2-1.2.0-3.0.0` | | `v1beta2-1.1.2-2.4.5` | `v1beta2` | 1.13+ | `2.4.5` | `v1beta2-1.1.2-2.4.5` | diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index c11d09f9a..4bae7c859 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.1.13 -appVersion: v1beta2-1.2.3-3.1.1 +version: 1.1.14 +appVersion: v1beta2-1.3.0-3.1.1 keywords: - spark home: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 92b0b59d9..f2b39be84 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -9,7 +9,7 @@ This chart bootstraps a [Kubernetes Operator for Apache Spark](https://github.co ## Prerequisites - Helm >= 3 -- Kubernetes >= 1.13 +- Kubernetes >= 1.16 ## Previous Helm Chart diff --git a/charts/spark-operator-chart/README.md.gotmpl b/charts/spark-operator-chart/README.md.gotmpl index 78e24484d..ca1db84f8 100644 --- a/charts/spark-operator-chart/README.md.gotmpl +++ b/charts/spark-operator-chart/README.md.gotmpl @@ -9,7 +9,7 @@ This chart bootstraps a [Kubernetes Operator for Apache Spark]({{template "chart ## Prerequisites - Helm >= 3 -- Kubernetes >= 1.13 +- Kubernetes >= 1.16 ## Previous Helm Chart diff --git a/examples/spark-operator-with-metrics.yaml b/examples/spark-operator-with-metrics.yaml index 1a0e4cc0d..3513b506a 100644 --- a/examples/spark-operator-with-metrics.yaml +++ b/examples/spark-operator-with-metrics.yaml @@ -21,13 +21,13 @@ metadata: namespace: spark-operator labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 strategy: type: Recreate template: @@ -38,12 +38,12 @@ spec: prometheus.io/path: "/metrics" labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: serviceAccountName: sparkoperator containers: - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 + image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 imagePullPolicy: Always ports: - containerPort: 10254 diff --git a/examples/spark-operator-with-webhook.yaml b/examples/spark-operator-with-webhook.yaml index fa90b1277..25fa81d5e 100644 --- a/examples/spark-operator-with-webhook.yaml +++ b/examples/spark-operator-with-webhook.yaml @@ -21,20 +21,20 @@ metadata: namespace: spark-operator labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 strategy: type: Recreate template: metadata: labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: serviceAccountName: sparkoperator volumes: @@ -43,7 +43,7 @@ spec: secretName: spark-webhook-certs containers: - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 + image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 imagePullPolicy: Always volumeMounts: - name: webhook-certs @@ -62,20 +62,20 @@ metadata: namespace: spark-operator labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: backoffLimit: 3 template: metadata: labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: serviceAccountName: sparkoperator restartPolicy: Never containers: - name: main - image: gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 + image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 imagePullPolicy: IfNotPresent command: ["/usr/bin/gencerts.sh", "-p"] --- @@ -91,4 +91,4 @@ spec: name: webhook selector: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 diff --git a/manifest/spark-operator-install/spark-operator.yaml b/manifest/spark-operator-install/spark-operator.yaml index 41f53b912..b4b31d1ad 100644 --- a/manifest/spark-operator-install/spark-operator.yaml +++ b/manifest/spark-operator-install/spark-operator.yaml @@ -21,25 +21,25 @@ metadata: namespace: spark-operator labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 strategy: type: Recreate template: metadata: labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: serviceAccountName: sparkoperator containers: - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 + image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 imagePullPolicy: Always args: - -logtostderr diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml index ba3ce4fe9..e752063c8 100644 --- a/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml +++ b/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml @@ -19,7 +19,7 @@ metadata: name: sparkoperator labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 namespace: spark-operator spec: template: diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml index 1618f06cc..eaad8660d 100644 --- a/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml +++ b/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml @@ -21,20 +21,20 @@ metadata: namespace: spark-operator labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: backoffLimit: 3 template: metadata: labels: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 spec: serviceAccountName: sparkoperator restartPolicy: Never containers: - name: main - image: gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 + image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 imagePullPolicy: IfNotPresent command: ["/usr/bin/gencerts.sh", "-p"] --- @@ -50,4 +50,4 @@ spec: name: webhook selector: app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.2.3-3.1.1 + app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 diff --git a/pkg/webhook/scheme.go b/pkg/webhook/scheme.go index 2b182dc3e..e9a02c5f0 100644 --- a/pkg/webhook/scheme.go +++ b/pkg/webhook/scheme.go @@ -17,7 +17,7 @@ limitations under the License. package webhook import ( - admissionv1beta1 "k8s.io/api/admission/v1beta1" + admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -34,5 +34,5 @@ func init() { func addToScheme(scheme *runtime.Scheme) { corev1.AddToScheme(scheme) - admissionv1beta1.AddToScheme(scheme) + admissionv1.AddToScheme(scheme) } diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index 425809b20..8d58fdd3b 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -28,10 +28,8 @@ import ( "github.com/golang/glog" - admissionv1beta1 "k8s.io/api/admission/v1beta1" - "k8s.io/api/admissionregistration/v1beta1" - arv1beta1 "k8s.io/api/admissionregistration/v1beta1" - apiv1 "k8s.io/api/core/v1" + admissionv1 "k8s.io/api/admission/v1" + arv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -78,8 +76,8 @@ type WebHook struct { lister crdlisters.SparkApplicationLister server *http.Server certProvider *certProvider - serviceRef *v1beta1.ServiceReference - failurePolicy v1beta1.FailurePolicyType + serviceRef *arv1.ServiceReference + failurePolicy arv1.FailurePolicyType selector *metav1.LabelSelector sparkJobNamespace string deregisterOnExit bool @@ -139,7 +137,7 @@ func New( } path := "/webhook" - serviceRef := &v1beta1.ServiceReference{ + serviceRef := &arv1.ServiceReference{ Namespace: userConfig.webhookServiceNamespace, Name: userConfig.webhookServiceName, Path: &path, @@ -152,14 +150,14 @@ func New( serviceRef: serviceRef, sparkJobNamespace: jobNamespace, deregisterOnExit: deregisterOnExit, - failurePolicy: arv1beta1.Ignore, + failurePolicy: arv1.Ignore, coreV1InformerFactory: coreV1InformerFactory, enableResourceQuotaEnforcement: enableResourceQuotaEnforcement, timeoutSeconds: func(b int32) *int32 { return &b }(int32(*webhookTimeout)), } if userConfig.webhookFailOnError { - hook.failurePolicy = arv1beta1.Fail + hook.failurePolicy = arv1.Fail } if userConfig.webhookNamespaceSelector == "" { @@ -197,7 +195,7 @@ func parseNamespaceSelector(selectorArg string) (*metav1.LabelSelector, error) { for _, selectorStr := range selectorStrs { kv := strings.SplitN(selectorStr, "=", 2) if len(kv) != 2 || kv[0] == "" || kv[1] == "" { - return nil, fmt.Errorf("Webhook namespace selector must be in the form key1=value1,key2=value2") + return nil, fmt.Errorf("webhook namespace selector must be in the form key1=value1,key2=value2") } selector.MatchLabels[kv[0]] = kv[1] } @@ -231,7 +229,7 @@ func (wh *WebHook) Start(stopCh <-chan struct{}) error { func (wh *WebHook) Stop() error { // Do not deregister if strict error handling is enabled; pod deletions are common, and we // don't want to create windows where pods can be created without being subject to the webhook. - if wh.failurePolicy != arv1beta1.Fail { + if wh.failurePolicy != arv1.Fail { if err := wh.selfDeregistration(userConfig.webhookConfigName); err != nil { return err } @@ -268,14 +266,14 @@ func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { return } - review := &admissionv1beta1.AdmissionReview{} + review := &admissionv1.AdmissionReview{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(body, nil, review); err != nil { internalError(w, err) return } var whErr error - var reviewResponse *admissionv1beta1.AdmissionResponse + var reviewResponse *admissionv1.AdmissionResponse switch review.Request.Resource { case podResource: reviewResponse, whErr = mutatePods(review, wh.lister, wh.sparkJobNamespace) @@ -300,7 +298,7 @@ func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { return } - response := admissionv1beta1.AdmissionReview{} + response := admissionv1.AdmissionReview{} if reviewResponse != nil { response.Response = reviewResponse if review.Request != nil { @@ -328,8 +326,8 @@ func internalError(w http.ResponseWriter, err error) { } func denyRequest(w http.ResponseWriter, reason string, code int) { - response := &admissionv1beta1.AdmissionReview{ - Response: &admissionv1beta1.AdmissionResponse{ + response := &admissionv1.AdmissionReview{ + Response: &admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Code: int32(code), @@ -351,18 +349,18 @@ func denyRequest(w http.ResponseWriter, reason string, code int) { } func (wh *WebHook) selfRegistration(webhookConfigName string) error { - mwcClient := wh.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations() - vwcClient := wh.clientset.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations() + mwcClient := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() + vwcClient := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() caCert, err := readCertFile(wh.certProvider.caCertFile) if err != nil { return err } - mutatingRules := []v1beta1.RuleWithOperations{ + mutatingRules := []arv1.RuleWithOperations{ { - Operations: []v1beta1.OperationType{v1beta1.Create}, - Rule: v1beta1.Rule{ + Operations: []arv1.OperationType{arv1.Create}, + Rule: arv1.Rule{ APIGroups: []string{""}, APIVersions: []string{"v1"}, Resources: []string{"pods"}, @@ -370,10 +368,10 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { }, } - validatingRules := []v1beta1.RuleWithOperations{ + validatingRules := []arv1.RuleWithOperations{ { - Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update}, - Rule: v1beta1.Rule{ + Operations: []arv1.OperationType{arv1.Create, arv1.Update}, + Rule: arv1.Rule{ APIGroups: []string{crdapi.GroupName}, APIVersions: []string{crdv1beta2.Version}, Resources: []string{sparkApplicationResource.Resource, scheduledSparkApplicationResource.Resource}, @@ -381,10 +379,10 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { }, } - mutatingWebhook := v1beta1.MutatingWebhook{ + mutatingWebhook := arv1.MutatingWebhook{ Name: webhookName, Rules: mutatingRules, - ClientConfig: v1beta1.WebhookClientConfig{ + ClientConfig: arv1.WebhookClientConfig{ Service: wh.serviceRef, CABundle: caCert, }, @@ -393,10 +391,10 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { TimeoutSeconds: wh.timeoutSeconds, } - validatingWebhook := v1beta1.ValidatingWebhook{ + validatingWebhook := arv1.ValidatingWebhook{ Name: quotaWebhookName, Rules: validatingRules, - ClientConfig: v1beta1.WebhookClientConfig{ + ClientConfig: arv1.WebhookClientConfig{ Service: wh.serviceRef, CABundle: caCert, }, @@ -405,8 +403,8 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { TimeoutSeconds: wh.timeoutSeconds, } - mutatingWebhooks := []v1beta1.MutatingWebhook{mutatingWebhook} - validatingWebhooks := []v1beta1.ValidatingWebhook{validatingWebhook} + mutatingWebhooks := []arv1.MutatingWebhook{mutatingWebhook} + validatingWebhooks := []arv1.ValidatingWebhook{validatingWebhook} mutatingExisting, mutatingGetErr := mwcClient.Get(context.TODO(), webhookConfigName, metav1.GetOptions{}) if mutatingGetErr != nil { @@ -415,7 +413,7 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { } // Create case. glog.Info("Creating a MutatingWebhookConfiguration for the Spark pod admission webhook") - webhookConfig := &v1beta1.MutatingWebhookConfiguration{ + webhookConfig := &arv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: webhookConfigName, }, @@ -443,7 +441,7 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { } // Create case. glog.Info("Creating a ValidatingWebhookConfiguration for the SparkApplication resource quota enforcement webhook") - webhookConfig := &v1beta1.ValidatingWebhookConfiguration{ + webhookConfig := &arv1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: webhookConfigName, }, @@ -468,8 +466,8 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { } func (wh *WebHook) selfDeregistration(webhookConfigName string) error { - mutatingConfigs := wh.clientset.AdmissionregistrationV1beta1().MutatingWebhookConfigurations() - validatingConfigs := wh.clientset.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations() + mutatingConfigs := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() + validatingConfigs := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() if wh.enableResourceQuotaEnforcement { err := validatingConfigs.Delete(context.TODO(), webhookConfigName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) if err != nil { @@ -479,7 +477,7 @@ func (wh *WebHook) selfDeregistration(webhookConfigName string) error { return mutatingConfigs.Delete(context.TODO(), webhookConfigName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) } -func admitSparkApplications(review *admissionv1beta1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1beta1.AdmissionResponse, error) { +func admitSparkApplications(review *admissionv1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1.AdmissionResponse, error) { if review.Request.Resource != sparkApplicationResource { return nil, fmt.Errorf("expected resource to be %s, got %s", sparkApplicationResource, review.Request.Resource) } @@ -494,7 +492,7 @@ func admitSparkApplications(review *admissionv1beta1.AdmissionReview, enforcer r if err != nil { return nil, fmt.Errorf("resource quota enforcement failed for SparkApplication: %v", err) } - response := &admissionv1beta1.AdmissionResponse{Allowed: reason == ""} + response := &admissionv1.AdmissionResponse{Allowed: reason == ""} if reason != "" { response.Result = &metav1.Status{ Message: reason, @@ -504,7 +502,7 @@ func admitSparkApplications(review *admissionv1beta1.AdmissionReview, enforcer r return response, nil } -func admitScheduledSparkApplications(review *admissionv1beta1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1beta1.AdmissionResponse, error) { +func admitScheduledSparkApplications(review *admissionv1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1.AdmissionResponse, error) { if review.Request.Resource != scheduledSparkApplicationResource { return nil, fmt.Errorf("expected resource to be %s, got %s", scheduledSparkApplicationResource, review.Request.Resource) } @@ -515,7 +513,7 @@ func admitScheduledSparkApplications(review *admissionv1beta1.AdmissionReview, e return nil, fmt.Errorf("failed to unmarshal a ScheduledSparkApplication from the raw data in the admission request: %v", err) } - response := &admissionv1beta1.AdmissionResponse{Allowed: true} + response := &admissionv1.AdmissionResponse{Allowed: true} reason, err := enforcer.AdmitScheduledSparkApplication(*app) if err != nil { return nil, fmt.Errorf("resource quota enforcement failed for ScheduledSparkApplication: %v", err) @@ -530,16 +528,16 @@ func admitScheduledSparkApplications(review *admissionv1beta1.AdmissionReview, e } func mutatePods( - review *admissionv1beta1.AdmissionReview, + review *admissionv1.AdmissionReview, lister crdlisters.SparkApplicationLister, - sparkJobNs string) (*admissionv1beta1.AdmissionResponse, error) { + sparkJobNs string) (*admissionv1.AdmissionResponse, error) { raw := review.Request.Object.Raw pod := &corev1.Pod{} if err := json.Unmarshal(raw, pod); err != nil { return nil, fmt.Errorf("failed to unmarshal a Pod from the raw data in the admission request: %v", err) } - response := &admissionv1beta1.AdmissionResponse{Allowed: true} + response := &admissionv1.AdmissionResponse{Allowed: true} if !isSparkPod(pod) || !inSparkJobNamespace(review.Request.Namespace, sparkJobNs) { glog.V(2).Infof("Pod %s in namespace %s is not subject to mutation", pod.GetObjectMeta().GetName(), review.Request.Namespace) @@ -565,7 +563,7 @@ func mutatePods( } glog.V(3).Infof("Pod %s mutation/patch result %s", pod.GetObjectMeta().GetName(), patchBytes) response.Patch = patchBytes - patchType := admissionv1beta1.PatchTypeJSONPatch + patchType := admissionv1.PatchTypeJSONPatch response.PatchType = &patchType } @@ -573,7 +571,7 @@ func mutatePods( } func inSparkJobNamespace(podNs string, sparkJobNamespace string) bool { - if sparkJobNamespace == apiv1.NamespaceAll { + if sparkJobNamespace == corev1.NamespaceAll { return true } return podNs == sparkJobNamespace diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go index 618dd7954..06e2ebefb 100644 --- a/pkg/webhook/webhook_test.go +++ b/pkg/webhook/webhook_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" - "k8s.io/api/admission/v1beta1" + admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -62,8 +62,8 @@ func TestMutatePod(t *testing.T) { if err != nil { t.Error(err) } - review := &v1beta1.AdmissionReview{ - Request: &v1beta1.AdmissionRequest{ + review := &admissionv1.AdmissionReview{ + Request: &admissionv1.AdmissionRequest{ Resource: metav1.GroupVersionResource{ Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, @@ -99,7 +99,7 @@ func TestMutatePod(t *testing.T) { review.Request.Object.Raw = podBytes response, _ = mutatePods(review, lister, "default") assert.True(t, response.Allowed) - assert.Equal(t, v1beta1.PatchTypeJSONPatch, *response.PatchType) + assert.Equal(t, admissionv1.PatchTypeJSONPatch, *response.PatchType) assert.True(t, len(response.Patch) > 0) // 3. Test processing Spark pod with patches. @@ -172,7 +172,7 @@ func TestMutatePod(t *testing.T) { review.Request.Object.Raw = podBytes response, _ = mutatePods(review, lister, "default") assert.True(t, response.Allowed) - assert.Equal(t, v1beta1.PatchTypeJSONPatch, *response.PatchType) + assert.Equal(t, admissionv1.PatchTypeJSONPatch, *response.PatchType) assert.True(t, len(response.Patch) > 0) var patchOps []*patchOperation json.Unmarshal(response.Patch, &patchOps) diff --git a/test/e2e/README.md b/test/e2e/README.md index 9c4f6e76f..b8dc9d133 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -12,7 +12,7 @@ Prerequisites: e2e tests are written as Go test. All go test techniques apply (e.g. picking what to run, timeout length). Let's say I want to run all tests in "test/e2e/": ```bash -$ go test -v ./test/e2e/ --kubeconfig "$HOME/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:v1beta2-1.2.3-3.1.1 +$ go test -v ./test/e2e/ --kubeconfig "$HOME/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 ``` ### Available Tests