Skip to content

Commit

Permalink
Keep track of desired scale
Browse files Browse the repository at this point in the history
  • Loading branch information
jdewinne committed Dec 18, 2020
1 parent a13a7a4 commit 1379eda
Show file tree
Hide file tree
Showing 6 changed files with 186 additions and 38 deletions.
2 changes: 1 addition & 1 deletion BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ go_library(
importpath = "github.com/jdewinne/k8s-dev-scaler",
visibility = ["//visibility:private"],
deps = [
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
"//scaler",
"@io_k8s_client_go//kubernetes",
"@io_k8s_client_go//rest",
"@io_k8s_client_go//tools/clientcmd",
Expand Down
7 changes: 6 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,12 @@

When developing on a local k8s instance, often you have to juggle with memory, cpu, ... And when developing with multiple branches, you sometimes have your app installed in multiple namespaces. Each branch, having it's own namespace maybe...

So in order to reduce your resource consumption by your k8s dev cluster, this tool allows to downscale all `deployments` and `statefulsets` to zero. It also allows to put them all back at scale `1`.
So in order to reduce your resource consumption by your k8s dev cluster, this tool allows to downscale all `deployments` and `statefulsets` to zero. It also allows to scale them all back up. Behind the scenes it places an annotaion called `k8s.dev.scaler/desired.replicas` that keeps track of the desired number if replicas.

# Installation

+ Linux: Download from [Releases](https://github.com/jdewinne/k8s-dev-scaler/releases)
+ Linux, Mac: Install using `go get https://github.com/jdewinne/k8s-dev-scaler`

# Usage

Expand Down
42 changes: 6 additions & 36 deletions main.go
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
package main

import (
"context"
"flag"
"fmt"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"

scaler "github.com/jdewinne/k8s-dev-scaler/scaler"
)

// GetKubeClient creates a Kubernetes config and client for a given kubeconfig context.
Expand Down Expand Up @@ -67,48 +67,18 @@ func main() {
panic("Scale must be up or down")
}

replicas := int32(0)
if *scale == "up" {
replicas = 1
}

// use the current context in kubeconfig
_, client, err := GetKubeClient(*k8scontextflag)
if err != nil {
panic(err.Error())
}

fmt.Println("Deployments")

deploymentsClient := client.AppsV1().Deployments(*namespace)
list, err := deploymentsClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err)
}
for _, d := range list.Items {
fmt.Printf(" * Scaling %s (%d to %d replicas)\n", d.Name, *d.Spec.Replicas, replicas)
opts, err := deploymentsClient.GetScale(context.TODO(), d.Name, metav1.GetOptions{})
if err != nil {
panic(err)
}
opts.Spec.Replicas = replicas
deploymentsClient.UpdateScale(context.TODO(), d.Name, opts, metav1.UpdateOptions{})
}
dscaler := scaler.NewDeploymentsScaler(client, *namespace, *scale)
dscaler.ScaleDeploymentResources()

fmt.Println("Stateful sets")
statefulSetsClient := client.AppsV1().StatefulSets(*namespace)
sslist, err := statefulSetsClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err)
}
for _, ss := range sslist.Items {
fmt.Printf(" * Scaling %s (%d to %d replicas)\n", ss.Name, *ss.Spec.Replicas, replicas)
opts, err := statefulSetsClient.GetScale(context.TODO(), ss.Name, metav1.GetOptions{})
if err != nil {
panic(err)
}
opts.Spec.Replicas = replicas
statefulSetsClient.UpdateScale(context.TODO(), ss.Name, opts, metav1.UpdateOptions{})
}
sscaler := scaler.NewStatefulSetsScaler(client, *namespace, *scale)
sscaler.ScaleStatefulSetResources()

}
17 changes: 17 additions & 0 deletions scaler/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")

go_library(
name = "scaler",
srcs = [
"deployments.go",
"statefulSets.go",
],
importpath = "github.com/jdewinne/k8s-dev-scaler/scaler",
visibility = ["//visibility:public"],
deps = [
"@io_k8s_apimachinery//pkg/apis/meta/v1:meta",
"@io_k8s_apimachinery//pkg/types",
"@io_k8s_client_go//kubernetes",
"@io_k8s_client_go//kubernetes/typed/apps/v1:apps",
],
)
77 changes: 77 additions & 0 deletions scaler/deployments.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
package scaler

import (
"context"
"fmt"
"strconv"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/apps/v1"
)

// DeploymentsScaler allows to scale up or down all deployments
type DeploymentsScaler struct {

// defining struct variables
client v1.DeploymentInterface
namespace string
scale string
}

// NewDeploymentsScaler instantiates
func NewDeploymentsScaler(client kubernetes.Interface, namespace string, scale string) *DeploymentsScaler {
p := new(DeploymentsScaler)
p.client = client.AppsV1().Deployments(namespace)
p.namespace = namespace
p.scale = scale
return p
}

func (ds *DeploymentsScaler) annotateResource(name string, replicas int32) error {
payload := fmt.Sprintf(`{"metadata":{"annotations":{"k8s.dev.scaler/desired.replicas":"%d"}}}`, replicas)
_, err := ds.client.Patch(context.TODO(), name, types.MergePatchType, []byte(payload), metav1.PatchOptions{})
return err
}

func (ds *DeploymentsScaler) getDesiredReplicas(name string) (int32, error) {
deployment, err := ds.client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
panic(err.Error())
}
replicas, _ := strconv.Atoi(deployment.Annotations["k8s.dev.scaler/desired.replicas"])
return int32(replicas), nil
}

// ScaleDeploymentResources will scale all deployments up or down in a namespace
func (ds *DeploymentsScaler) ScaleDeploymentResources() {
resources, err := ds.client.List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err)
}
for _, r := range resources.Items {
// store original desired number of replicas as an annotation
if ds.scale == "down" {
err = ds.annotateResource(r.Name, *r.Spec.Replicas)
if err != nil {
panic(err.Error())
}
}
// If scaling up, get the replicas from the previously stored annotation
replicas := int32(0)
if ds.scale == "up" {
replicas, err = ds.getDesiredReplicas(r.Name)
if err != nil {
panic(err.Error())
}
}
fmt.Printf(" * Scaling %s (%d to %d replicas)\n", r.Name, *r.Spec.Replicas, replicas)
opts, err := ds.client.GetScale(context.TODO(), r.Name, metav1.GetOptions{})
if err != nil {
panic(err)
}
opts.Spec.Replicas = replicas
ds.client.UpdateScale(context.TODO(), r.Name, opts, metav1.UpdateOptions{})
}
}
79 changes: 79 additions & 0 deletions scaler/statefulSets.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
package scaler

import (
"context"
"fmt"
"strconv"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/apps/v1"
)

// StatefulSetsScaler allows to scale up or down all statefulSets
type StatefulSetsScaler struct {

// defining struct variables
client v1.StatefulSetInterface
namespace string
scale string
}

// NewStatefulSetsScaler instantiates
func NewStatefulSetsScaler(client kubernetes.Interface, namespace string, scale string) *StatefulSetsScaler {
p := new(StatefulSetsScaler)
p.client = client.AppsV1().StatefulSets(namespace)
p.namespace = namespace
p.scale = scale
return p
}

// annotateResource places the k8s.dev.scaler/desired.replicas annotation
func (ss *StatefulSetsScaler) annotateResource(name string, replicas int32) error {
payload := fmt.Sprintf(`{"metadata":{"annotations":{"k8s.dev.scaler/desired.replicas":"%d"}}}`, replicas)
_, err := ss.client.Patch(context.TODO(), name, types.MergePatchType, []byte(payload), metav1.PatchOptions{})
return err
}

// getDesiredReplicas fetches the value from the k8s.dev.scaler/desired.replicas annotation
func (ss *StatefulSetsScaler) getDesiredReplicas(name string) (int32, error) {
statefulSet, err := ss.client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
panic(err.Error())
}
replicas, _ := strconv.Atoi(statefulSet.Annotations["k8s.dev.scaler/desired.replicas"])
return int32(replicas), nil
}

// ScaleStatefulSetResources will scale all deployments up or down in a namespace
func (ss *StatefulSetsScaler) ScaleStatefulSetResources() {
resources, err := ss.client.List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err)
}
for _, r := range resources.Items {
// store original desired number of replicas as an annotation
if ss.scale == "down" {
err = ss.annotateResource(r.Name, *r.Spec.Replicas)
if err != nil {
panic(err.Error())
}
}
// If scaling up, get the replicas from the previously stored annotation
replicas := int32(0)
if ss.scale == "up" {
replicas, err = ss.getDesiredReplicas(r.Name)
if err != nil {
panic(err.Error())
}
}
fmt.Printf(" * Scaling %s (%d to %d replicas)\n", r.Name, *r.Spec.Replicas, replicas)
opts, err := ss.client.GetScale(context.TODO(), r.Name, metav1.GetOptions{})
if err != nil {
panic(err)
}
opts.Spec.Replicas = replicas
ss.client.UpdateScale(context.TODO(), r.Name, opts, metav1.UpdateOptions{})
}
}

0 comments on commit 1379eda

Please sign in to comment.