diff --git a/kubernetes/service-account-secret.yaml b/kubernetes/service-account-secret.yaml new file mode 100644 index 00000000..e61bd665 --- /dev/null +++ b/kubernetes/service-account-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: shibuya + annotations: + kubernetes.io/service-account.name: shibuya +type: kubernetes.io/service-account-token diff --git a/makefile b/makefile index 0c45849c..3923dde1 100644 --- a/makefile +++ b/makefile @@ -33,16 +33,26 @@ grafana: grafana/ kind load docker-image shibuya:grafana --name shibuya kubectl -n $(shibuya-controller-ns) replace -f kubernetes/grafana.yaml --force -.PHONY: shibuya -shibuya: shibuya/ kubernetes/ - cd shibuya && sh build.sh +.PHONY: local_api +local_api: + cd shibuya && sh build.sh api docker build -f shibuya/Dockerfile --build-arg env=local -t api:local shibuya kind load docker-image api:local --name shibuya + +.PHONY: local_controller +local_controller: + cd shibuya && sh build.sh controller + docker build -f shibuya/Dockerfile --build-arg env=local -t controller:local shibuya + kind load docker-image controller:local --name shibuya + +.PHONY: shibuya +shibuya: local_api local_controller helm uninstall shibuya || true - helm upgrade --install shibuya install/shibuya + cd shibuya && helm upgrade --install shibuya install/shibuya .PHONY: jmeter jmeter: shibuya/engines/jmeter + cp shibuya/config_tmpl.json shibuya/config.json cd shibuya && sh build.sh jmeter docker build -t shibuya:jmeter -f shibuya/docker-local/Dockerfile.engines.jmeter shibuya kind load docker-image shibuya:jmeter --name shibuya @@ -51,8 +61,12 @@ jmeter: shibuya/engines/jmeter expose: -killall kubectl -kubectl -n $(shibuya-controller-ns) port-forward service/grafana 3000:3000 > /dev/null 2>&1 & - -kubectl -n $(shibuya-controller-ns) port-forward service/shibuya 8080:8080 > /dev/null 2>&1 & + -kubectl -n $(shibuya-controller-ns) port-forward service/shibuya-api-local 8080:8080 > /dev/null 2>&1 & +# TODO! +# After k8s 1.22, service account token is no longer auto generated. We need to manually create the secret +# for the service account. ref: "https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#manual-secret-management-for-serviceaccounts" +# So we should fetch the token details from the manually created secret instead of the automatically created ones .PHONY: kubeconfig kubeconfig: ./kubernetes/generate_kubeconfig.sh $(shibuya-controller-ns) @@ -61,6 +75,7 @@ kubeconfig: permissions: kubectl -n $(shibuya-executor-ns) apply -f kubernetes/roles.yaml kubectl -n $(shibuya-controller-ns) apply -f kubernetes/serviceaccount.yaml + kubectl -n $(shibuya-controller-ns) apply -f kubernetes/service-account-secret.yaml -kubectl -n $(shibuya-executor-ns) create rolebinding shibuya --role=shibuya --serviceaccount $(shibuya-controller-ns):shibuya kubectl -n $(shibuya-executor-ns) replace -f kubernetes/ingress.yaml --force @@ -85,11 +100,3 @@ ingress-controller: # And update the image in the config.json docker build -t shibuya:ingress-controller -f ingress-controller/Dockerfile ingress-controller kind load docker-image shibuya:ingress-controller --name shibuya - -.PHONY: controller -controller: - cd shibuya && sh build.sh controller - docker build -f shibuya/Dockerfile --build-arg env=local --build-arg="binary_name=shibuya-controller" -t controller:local shibuya - kind load docker-image controller:local --name shibuya - helm uninstall shibuya || true - helm upgrade --install shibuya install/shibuya diff --git a/shibuya/Dockerfile b/shibuya/Dockerfile index b9477a9f..7642b11e 100644 --- a/shibuya/Dockerfile +++ b/shibuya/Dockerfile @@ -1,10 +1,5 @@ FROM ubuntu:18.04 -RUN apt-get update && apt-get install -y curl -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \ - && chmod +x ./kubectl \ - && mv ./kubectl /usr/local/bin/kubectl - ARG binary_name=shibuya ADD ./build/${binary_name} /usr/local/bin/${binary_name} diff --git a/shibuya/install/shibuya/Chart.yaml b/shibuya/install/shibuya/Chart.yaml index bf46af5c..34390b9f 100644 --- a/shibuya/install/shibuya/Chart.yaml +++ b/shibuya/install/shibuya/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: v0.1.1 +version: v0.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/shibuya/install/shibuya/values.yaml b/shibuya/install/shibuya/values.yaml index e20e664b..b1e6d39c 100644 --- a/shibuya/install/shibuya/values.yaml +++ b/shibuya/install/shibuya/values.yaml @@ -71,7 +71,7 @@ runtime: project: "" zone: "" cluster_id: "" - gc_duration: "" + gc_duration: 30 service_type: "" in_cluster: true namespace: "shibuya-executors" diff --git a/shibuya/scheduler/k8s.go b/shibuya/scheduler/k8s.go index 662c3701..5b5b0ef9 100644 --- a/shibuya/scheduler/k8s.go +++ b/shibuya/scheduler/k8s.go @@ -6,7 +6,6 @@ import ( "fmt" "io/ioutil" "net/http" - "os/exec" "sort" "strconv" "strings" @@ -632,16 +631,26 @@ func (kcm *K8sClientManager) ServiceReachable(engineUrl string) bool { } func (kcm *K8sClientManager) deleteService(collectionID int64) error { - // Delete services by collection is not supported as of yet - // Wait for this PR to be merged - https://github.com/kubernetes/kubernetes/pull/85802 - cmd := exec.Command("kubectl", "-n", kcm.Namespace, "delete", "svc", "--force", "--grace-period=0", "-l", fmt.Sprintf("collection=%d", collectionID)) - o, err := cmd.Output() + // We could not delete services by label + // So we firstly get them by label and then delete them one by one + // you can check here: https://github.com/kubernetes/kubernetes/issues/68468#issuecomment-419981870 + corev1Client := kcm.client.CoreV1().Services(kcm.Namespace) + resp, err := corev1Client.List(context.TODO(), metav1.ListOptions{ + LabelSelector: makeCollectionLabel(collectionID), + }) if err != nil { - log.Printf("Cannot delete services for collection %d", collectionID) return err } - log.Print(string(o)) - return nil + + // If there are any errors in deletion, we only return the last one + // the errors could be similar so we should avoid return a long list of errors + var lastError error + for _, svc := range resp.Items { + if err := corev1Client.Delete(context.TODO(), svc.Name, metav1.DeleteOptions{}); err != nil { + lastError = err + } + } + return lastError } func (kcm *K8sClientManager) deleteDeployment(collectionID int64) error { @@ -672,10 +681,6 @@ func (kcm *K8sClientManager) PurgeCollection(collectionID int64) error { if err != nil { return err } - err = kcm.deleteIngressRules(collectionID) - if err != nil { - return err - } return nil } @@ -843,15 +848,6 @@ func (kcm *K8sClientManager) CreateIngress(ingressClass, ingressName, serviceNam return nil } -func (kcm *K8sClientManager) deleteIngressRules(collectionID int64) error { - deletePolicy := metav1.DeletePropagationForeground - return kcm.client.NetworkingV1().Ingresses(kcm.Namespace).DeleteCollection(context.TODO(), metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, metav1.ListOptions{ - LabelSelector: fmt.Sprintf("collection=%d", collectionID), - }) -} - func (kcm *K8sClientManager) GetNodesByCollection(collectionID string) ([]apiv1.Node, error) { opts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("collection_id=%s", collectionID),