diff --git a/BUILD b/BUILD index 6ed4335..46e170e 100644 --- a/BUILD +++ b/BUILD @@ -10,7 +10,7 @@ go_library( importpath = "github.com/jdewinne/k8s-dev-scaler", visibility = ["//visibility:private"], deps = [ - "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "//scaler", "@io_k8s_client_go//kubernetes", "@io_k8s_client_go//rest", "@io_k8s_client_go//tools/clientcmd", diff --git a/README.md b/README.md index 2368479..4ba8278 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,12 @@ When developing on a local k8s instance, often you have to juggle with memory, cpu, ... And when developing with multiple branches, you sometimes have your app installed in multiple namespaces. Each branch, having it's own namespace maybe... -So in order to reduce your resource consumption by your k8s dev cluster, this tool allows to downscale all `deployments` and `statefulsets` to zero. It also allows to put them all back at scale `1`. +So in order to reduce your resource consumption by your k8s dev cluster, this tool allows to downscale all `deployments` and `statefulsets` to zero. It also allows to scale them all back up. Behind the scenes it places an annotaion called `k8s.dev.scaler/desired.replicas` that keeps track of the desired number if replicas. + +# Installation + ++ Linux: Download from [Releases](https://github.com/jdewinne/k8s-dev-scaler/releases) ++ Linux, Mac: Install using `go get https://github.com/jdewinne/k8s-dev-scaler` # Usage diff --git a/main.go b/main.go index c51831c..9846e52 100644 --- a/main.go +++ b/main.go @@ -1,14 +1,14 @@ package main import ( - "context" "flag" "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + + scaler "github.com/jdewinne/k8s-dev-scaler/scaler" ) // GetKubeClient creates a Kubernetes config and client for a given kubeconfig context. @@ -67,11 +67,6 @@ func main() { panic("Scale must be up or down") } - replicas := int32(0) - if *scale == "up" { - replicas = 1 - } - // use the current context in kubeconfig _, client, err := GetKubeClient(*k8scontextflag) if err != nil { @@ -79,36 +74,11 @@ func main() { } fmt.Println("Deployments") - - deploymentsClient := client.AppsV1().Deployments(*namespace) - list, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{}) - if err != nil { - panic(err) - } - for _, d := range list.Items { - fmt.Printf(" * Scaling %s (%d to %d replicas)\n", d.Name, *d.Spec.Replicas, replicas) - opts, err := deploymentsClient.GetScale(context.TODO(), d.Name, metav1.GetOptions{}) - if err != nil { - panic(err) - } - opts.Spec.Replicas = replicas - deploymentsClient.UpdateScale(context.TODO(), d.Name, opts, metav1.UpdateOptions{}) - } + dscaler := scaler.NewDeploymentsScaler(client, *namespace, *scale) + dscaler.ScaleDeploymentResources() fmt.Println("Stateful sets") - statefulSetsClient := client.AppsV1().StatefulSets(*namespace) - sslist, err := statefulSetsClient.List(context.TODO(), metav1.ListOptions{}) - if err != nil { - panic(err) - } - for _, ss := range sslist.Items { - fmt.Printf(" * Scaling %s (%d to %d replicas)\n", ss.Name, *ss.Spec.Replicas, replicas) - opts, err := statefulSetsClient.GetScale(context.TODO(), ss.Name, metav1.GetOptions{}) - if err != nil { - panic(err) - } - opts.Spec.Replicas = replicas - statefulSetsClient.UpdateScale(context.TODO(), ss.Name, opts, metav1.UpdateOptions{}) - } + sscaler := scaler.NewStatefulSetsScaler(client, *namespace, *scale) + sscaler.ScaleStatefulSetResources() } diff --git a/scaler/BUILD.bazel b/scaler/BUILD.bazel new file mode 100644 index 0000000..e8c17f3 --- /dev/null +++ b/scaler/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "scaler", + srcs = [ + "deployments.go", + "statefulSets.go", + ], + importpath = "github.com/jdewinne/k8s-dev-scaler/scaler", + visibility = ["//visibility:public"], + deps = [ + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_apimachinery//pkg/types", + "@io_k8s_client_go//kubernetes", + "@io_k8s_client_go//kubernetes/typed/apps/v1:apps", + ], +) diff --git a/scaler/deployments.go b/scaler/deployments.go new file mode 100644 index 0000000..8eb7e77 --- /dev/null +++ b/scaler/deployments.go @@ -0,0 +1,77 @@ +package scaler + +import ( + "context" + "fmt" + "strconv" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +// DeploymentsScaler allows to scale up or down all deployments +type DeploymentsScaler struct { + + // defining struct variables + client v1.DeploymentInterface + namespace string + scale string +} + +// NewDeploymentsScaler instantiates +func NewDeploymentsScaler(client kubernetes.Interface, namespace string, scale string) *DeploymentsScaler { + p := new(DeploymentsScaler) + p.client = client.AppsV1().Deployments(namespace) + p.namespace = namespace + p.scale = scale + return p +} + +func (ds *DeploymentsScaler) annotateResource(name string, replicas int32) error { + payload := fmt.Sprintf(`{"metadata":{"annotations":{"k8s.dev.scaler/desired.replicas":"%d"}}}`, replicas) + _, err := ds.client.Patch(context.TODO(), name, types.MergePatchType, []byte(payload), metav1.PatchOptions{}) + return err +} + +func (ds *DeploymentsScaler) getDesiredReplicas(name string) (int32, error) { + deployment, err := ds.client.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + replicas, _ := strconv.Atoi(deployment.Annotations["k8s.dev.scaler/desired.replicas"]) + return int32(replicas), nil +} + +// ScaleDeploymentResources will scale all deployments up or down in a namespace +func (ds *DeploymentsScaler) ScaleDeploymentResources() { + resources, err := ds.client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err) + } + for _, r := range resources.Items { + // store original desired number of replicas as an annotation + if ds.scale == "down" { + err = ds.annotateResource(r.Name, *r.Spec.Replicas) + if err != nil { + panic(err.Error()) + } + } + // If scaling up, get the replicas from the previously stored annotation + replicas := int32(0) + if ds.scale == "up" { + replicas, err = ds.getDesiredReplicas(r.Name) + if err != nil { + panic(err.Error()) + } + } + fmt.Printf(" * Scaling %s (%d to %d replicas)\n", r.Name, *r.Spec.Replicas, replicas) + opts, err := ds.client.GetScale(context.TODO(), r.Name, metav1.GetOptions{}) + if err != nil { + panic(err) + } + opts.Spec.Replicas = replicas + ds.client.UpdateScale(context.TODO(), r.Name, opts, metav1.UpdateOptions{}) + } +} diff --git a/scaler/statefulSets.go b/scaler/statefulSets.go new file mode 100644 index 0000000..bb7fff6 --- /dev/null +++ b/scaler/statefulSets.go @@ -0,0 +1,79 @@ +package scaler + +import ( + "context" + "fmt" + "strconv" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/kubernetes/typed/apps/v1" +) + +// StatefulSetsScaler allows to scale up or down all statefulSets +type StatefulSetsScaler struct { + + // defining struct variables + client v1.StatefulSetInterface + namespace string + scale string +} + +// NewStatefulSetsScaler instantiates +func NewStatefulSetsScaler(client kubernetes.Interface, namespace string, scale string) *StatefulSetsScaler { + p := new(StatefulSetsScaler) + p.client = client.AppsV1().StatefulSets(namespace) + p.namespace = namespace + p.scale = scale + return p +} + +// annotateResource places the k8s.dev.scaler/desired.replicas annotation +func (ss *StatefulSetsScaler) annotateResource(name string, replicas int32) error { + payload := fmt.Sprintf(`{"metadata":{"annotations":{"k8s.dev.scaler/desired.replicas":"%d"}}}`, replicas) + _, err := ss.client.Patch(context.TODO(), name, types.MergePatchType, []byte(payload), metav1.PatchOptions{}) + return err +} + +// getDesiredReplicas fetches the value from the k8s.dev.scaler/desired.replicas annotation +func (ss *StatefulSetsScaler) getDesiredReplicas(name string) (int32, error) { + statefulSet, err := ss.client.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + replicas, _ := strconv.Atoi(statefulSet.Annotations["k8s.dev.scaler/desired.replicas"]) + return int32(replicas), nil +} + +// ScaleStatefulSetResources will scale all deployments up or down in a namespace +func (ss *StatefulSetsScaler) ScaleStatefulSetResources() { + resources, err := ss.client.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + panic(err) + } + for _, r := range resources.Items { + // store original desired number of replicas as an annotation + if ss.scale == "down" { + err = ss.annotateResource(r.Name, *r.Spec.Replicas) + if err != nil { + panic(err.Error()) + } + } + // If scaling up, get the replicas from the previously stored annotation + replicas := int32(0) + if ss.scale == "up" { + replicas, err = ss.getDesiredReplicas(r.Name) + if err != nil { + panic(err.Error()) + } + } + fmt.Printf(" * Scaling %s (%d to %d replicas)\n", r.Name, *r.Spec.Replicas, replicas) + opts, err := ss.client.GetScale(context.TODO(), r.Name, metav1.GetOptions{}) + if err != nil { + panic(err) + } + opts.Spec.Replicas = replicas + ss.client.UpdateScale(context.TODO(), r.Name, opts, metav1.UpdateOptions{}) + } +}