diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 0000000..7da4a7a --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,28 @@ +# This workflow will build a golang project +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go + +name: Go + +on: + push: + branches: [ "main", "dev"] + pull_request: + branches: [ "main", "dev" ] + +jobs: + + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.22.x' + + - name: Build + run: go build -v ./... + + - name: Test + run: go test -v ./... diff --git a/cmd/_your_app_/.keep b/cmd/_your_app_/.keep deleted file mode 100644 index e69de29..0000000 diff --git a/cmd/envoy/Dockerfile b/cmd/envoy/Dockerfile new file mode 100644 index 0000000..0a8db9f --- /dev/null +++ b/cmd/envoy/Dockerfile @@ -0,0 +1,33 @@ +# 使用官方的 Golang 镜像作为基础镜像 +FROM golang:bullseye AS builder + +# 设置工作目录 +WORKDIR /app + +# 复制源代码到工作目录 +COPY . . + +WORKDIR /app/cmd/envoy +# 编译 Go 程序 +RUN go build -o /envoy + +# 使用官方的 Debian 镜像作为基础镜像 +FROM debian:bullseye + +# 设置工作目录 +WORKDIR /app + +# 从构建阶段复制编译后的 Go 程序 +COPY --from=builder /envoy . + +# 创建用户和组,并设置 UID 为 1337 +RUN groupadd -g 1337 envoy_proxy_group && useradd -u 1337 -g envoy_proxy_group -m envoy_proxy + +# 更改文件所有者为新创建的用户和组 +RUN chown -R envoy_proxy:envoy_proxy_group /app + +# 切换到非特权用户 +USER envoy_proxy + +# 设置默认命令 +CMD ["/app/envoy"] diff --git a/cmd/envoy/envoy.go b/cmd/envoy/envoy.go new file mode 100644 index 0000000..5ec2018 --- /dev/null +++ b/cmd/envoy/envoy.go @@ -0,0 +1,14 @@ +package main + +import ( + "log" + "minikubernetes/pkg/microservice/envoy" +) + +func main() { + e, err := envoy.NewEnvoy("10.119.12.123") + if err != nil { + log.Fatalf("Failed to create envoy: %v", err) + } + e.Run() +} diff --git a/cmd/envoyinit/Dockerfile b/cmd/envoyinit/Dockerfile new file mode 100644 index 0000000..9bb8054 --- /dev/null +++ b/cmd/envoyinit/Dockerfile @@ -0,0 +1,38 @@ +# 使用官方的 Golang 镜像作为基础镜像 +FROM golang:bullseye AS builder + +# 设置工作目录 +WORKDIR /app + +# 复制源代码到工作目录 +COPY . . + +WORKDIR /app/cmd/envoyinit +# 编译 Go 程序 +RUN go build -o /envoy-init + +# 使用官方的 Debian 镜像作为基础镜像 +FROM debian:bullseye + +USER root + +# 安装 iptables +RUN apt-get update && apt-get install -y iptables && rm -rf /var/lib/apt/lists/* + +# 设置工作目录 +WORKDIR /app + +# 从构建阶段复制编译后的 Go 程序 +COPY --from=builder /envoy-init . + +## 创建用户和组,并设置 UID 为 1337 +#RUN groupadd -g 1337 envoy_proxy_group && useradd -u 1337 -g envoy_proxy_group -m envoy_proxy +# +## 更改文件所有者为新创建的用户和组 +#RUN chown -R envoy_proxy:envoy_proxy_group /app +# +## 切换到非特权用户 +#USER envoy_proxy + +# 设置默认命令 +CMD ["/app/envoy-init"] diff --git a/cmd/envoyinit/envoy_init.go b/cmd/envoyinit/envoy_init.go new file mode 100644 index 0000000..7b1a167 --- /dev/null +++ b/cmd/envoyinit/envoy_init.go @@ -0,0 +1,18 @@ +package main + +import ( + "log" + envoyinit "minikubernetes/pkg/microservice/envoy/init" +) + +func main() { + e, err := envoyinit.NewEnvoyInit() + if err != nil { + log.Fatalf("Failed to create envoy: %v", err) + } + err = e.Init() + if err != nil { + log.Fatalf("Failed to init envoy: %v", err) + } + log.Println("Envoy init success.") +} diff --git a/cmd/kube-apiserver/apiserver.go b/cmd/kube-apiserver/apiserver.go new file mode 100644 index 0000000..282c3fc --- /dev/null +++ b/cmd/kube-apiserver/apiserver.go @@ -0,0 +1,13 @@ +package main + +import "minikubernetes/pkg/kubeapiserver/app" + +/* This is the starting interface of apiserver in main */ + +func main() { + kubeApiServer, err := app.NewKubeApiServer() + if err != nil { + return + } + kubeApiServer.Run() +} diff --git a/cmd/kube-controller-manager/controllerManager.go b/cmd/kube-controller-manager/controllerManager.go new file mode 100644 index 0000000..3666990 --- /dev/null +++ b/cmd/kube-controller-manager/controllerManager.go @@ -0,0 +1,10 @@ +package main + +import ( + "minikubernetes/pkg/controller" +) + +func main() { + cm := controller.NewControllerManager("192.168.1.10") + cm.Run() +} diff --git a/cmd/kubectl/kubectl.go b/cmd/kubectl/kubectl.go new file mode 100644 index 0000000..eeac027 --- /dev/null +++ b/cmd/kubectl/kubectl.go @@ -0,0 +1,7 @@ +package main + +import "minikubernetes/pkg/kubectl/cmd" + +func main() { + cmd.Execute() +} diff --git a/cmd/kubectl/testyaml/dns.yaml b/cmd/kubectl/testyaml/dns.yaml new file mode 100644 index 0000000..680c9a4 --- /dev/null +++ b/cmd/kubectl/testyaml/dns.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: DNS +metadata: + name: nginx-dns +spec: + rules: + - host: myservice.com + paths: + - path: /service1 + backend: + service: + name: nginx-service + port: 801 + - path: /service2 + backend: + service: + name: nginx-service-2 + port: 802 diff --git a/cmd/kubectl/testyaml/hpa-cpu.yaml b/cmd/kubectl/testyaml/hpa-cpu.yaml new file mode 100644 index 0000000..333eed0 --- /dev/null +++ b/cmd/kubectl/testyaml/hpa-cpu.yaml @@ -0,0 +1,28 @@ +kind: HorizontalPodAutoscaler +apiVersion: v1 +metadata: + name: test-hpa +spec: + scaleTargetRef: + kind: ReplicaSet + name: nginx-replicaset + namespace: default + minReplicas: 1 + maxReplicas: 3 + scaleWindowSeconds: 20 + metrics: + - name: cpu + target: + type: Utilization + averageUtilization: 50 + upperThreshold: 80 + lowerThreshold: 20 + behavior: + scaleUp: + type: Pods + value: 1 + periodSeconds: 60 + scaleDown: + type: Pods + value: 1 + periodSeconds: 60 diff --git a/cmd/kubectl/testyaml/hpa-memory.yaml b/cmd/kubectl/testyaml/hpa-memory.yaml new file mode 100644 index 0000000..cad1a65 --- /dev/null +++ b/cmd/kubectl/testyaml/hpa-memory.yaml @@ -0,0 +1,26 @@ +kind: HorizontalPodAutoscaler +apiVersion: v1 +metadata: + name: test-hpa +spec: + scaleTargetRef: + kind: ReplicaSet + name: nginx-replicaset + namespace: default + minReplicas: 1 + maxReplicas: 3 + scaleWindowSeconds: 20 + metrics: + - name: memory + target: + type: AverageValue + AverageValue: 100 + behavior: + scaleUp: + type: Pods + value: 1 + periodSeconds: 60 + scaleDown: + type: Pods + value: 1 + periodSeconds: 60 diff --git a/cmd/kubectl/testyaml/pod.yaml b/cmd/kubectl/testyaml/pod.yaml new file mode 100644 index 0000000..e8fff71 --- /dev/null +++ b/cmd/kubectl/testyaml/pod.yaml @@ -0,0 +1,14 @@ +kind: Pod +apiVersion: v1 +metadata: + name: nginx-pod-1 + namespace: default + labels: + app: nginx +spec: + containers: + - name: container + image: python:latest + ports: + - containerPort: 1024 + protocol: tcp \ No newline at end of file diff --git a/cmd/kubectl/testyaml/replicaset.yaml b/cmd/kubectl/testyaml/replicaset.yaml new file mode 100644 index 0000000..c83b33c --- /dev/null +++ b/cmd/kubectl/testyaml/replicaset.yaml @@ -0,0 +1,23 @@ +kind: ReplicaSet +apiVersion: v1 +metadata: + name: nginx-replicaset + namespace: default +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + name: nginx-pod + namespace: default + labels: + app: nginx + spec: + containers: + - name: container + image: python:latest + ports: + - containerPort: 1024 + protocol: tcp \ No newline at end of file diff --git a/cmd/kubectl/testyaml/rolling-update.yaml b/cmd/kubectl/testyaml/rolling-update.yaml new file mode 100644 index 0000000..e67695f --- /dev/null +++ b/cmd/kubectl/testyaml/rolling-update.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: RollingUpdate +metadata: + name: my-ru +spec: + serviceRef: reviews + port: 9080 + minimumAlive: 1 + interval: 20 + newPodSpec: + containers: + - name: reviews + image: istio/examples-bookinfo-reviews-v3:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.1 + securityContext: + runAsUser: 1337 + initContainers: + - name: proxy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/cmd/kubectl/testyaml/service.yaml b/cmd/kubectl/testyaml/service.yaml new file mode 100644 index 0000000..98f6cf0 --- /dev/null +++ b/cmd/kubectl/testyaml/service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: nginx-service + +spec: + type: NodePort + ports: + - port: 801 + targetPort: 1024 + nodePort: 30081 + selector: + app: nginx diff --git a/cmd/kubectl/testyaml/subset.yaml b/cmd/kubectl/testyaml/subset.yaml new file mode 100644 index 0000000..681f67a --- /dev/null +++ b/cmd/kubectl/testyaml/subset.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Subset +metadata: + name: nginx-v1 + namespace: default +spec: + pods: + - nginx-pod-1 + - nginx-pod-2 diff --git a/cmd/kubectl/testyaml/virtual-service.yaml b/cmd/kubectl/testyaml/virtual-service.yaml new file mode 100644 index 0000000..7c4cde6 --- /dev/null +++ b/cmd/kubectl/testyaml/virtual-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: VirtualService +metadata: + name: nginx-vs + namespace: default +spec: + serviceRef: nginx-service + port: 802 + subsets: + - name: nginx-v1 + weight: 1 diff --git a/cmd/kubelet/kubelet.go b/cmd/kubelet/kubelet.go new file mode 100644 index 0000000..d16214b --- /dev/null +++ b/cmd/kubelet/kubelet.go @@ -0,0 +1,58 @@ +package main + +import ( + "encoding/json" + "fmt" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubectl/utils" + "minikubernetes/pkg/kubelet/app" + "net" + "os" +) + +func usage() { + fmt.Println("usage: kubelet -j|--join [-c|--config ]") + os.Exit(1) +} + +func main() { + if len(os.Args) != 3 && len(os.Args) != 5 { + usage() + } + if os.Args[1] != "-j" && os.Args[1] != "--join" { + usage() + } + ip := os.Args[2] + if net.ParseIP(ip) == nil { + fmt.Printf("invalid ip: %s\n", ip) + os.Exit(1) + } + var node v1.Node + if len(os.Args) == 5 { + if os.Args[3] != "-c" && os.Args[3] != "--config" { + usage() + } + filename := os.Args[4] + yamlBytes, err := os.ReadFile(filename) + if err != nil { + fmt.Printf("failed to read file %s: %v\n", filename, err) + os.Exit(1) + } + jsonBytes, err := utils.YAML2JSON(yamlBytes) + if err != nil { + fmt.Printf("failed to parse yaml file\n") + os.Exit(1) + } + err = json.Unmarshal(jsonBytes, &node) + if err != nil { + fmt.Printf("failed to parse yaml file\n") + os.Exit(1) + } + } + kubeletServer, err := app.NewKubeletServer(ip, &node) + // kubeletServer, err := app.NewKubeletServer("10.119.12.123") + if err != nil { + return + } + kubeletServer.Run() +} diff --git a/cmd/kubeproxy/kubeproxy.go b/cmd/kubeproxy/kubeproxy.go new file mode 100644 index 0000000..eeef83d --- /dev/null +++ b/cmd/kubeproxy/kubeproxy.go @@ -0,0 +1,11 @@ +package main + +import "minikubernetes/pkg/kubeproxy/app" + +func main() { + server, err := app.NewProxyServer("10.119.12.123") + if err != nil { + panic(err) + } + server.Run() +} diff --git a/cmd/pilot/pilot.go b/cmd/pilot/pilot.go new file mode 100644 index 0000000..8d71887 --- /dev/null +++ b/cmd/pilot/pilot.go @@ -0,0 +1,14 @@ +package main + +import ( + "fmt" + "minikubernetes/pkg/microservice/pilot" +) + +func main() { + rsm := pilot.NewPilot("192.168.1.10") + err := rsm.Start() + if err != nil { + fmt.Println(err) + } +} diff --git a/cmd/replicaset-controller/replicasetController.go b/cmd/replicaset-controller/replicasetController.go new file mode 100644 index 0000000..c394f9a --- /dev/null +++ b/cmd/replicaset-controller/replicasetController.go @@ -0,0 +1,13 @@ +package main + +import ( + "minikubernetes/pkg/controller/replicaset" +) + +func main() { + manager := replicaset.NewReplicasetManager("192.168.1.10") + err := manager.RunRSC() + if err != nil { + return + } +} diff --git a/cmd/scheduler/scheduler.go b/cmd/scheduler/scheduler.go new file mode 100644 index 0000000..7c48a07 --- /dev/null +++ b/cmd/scheduler/scheduler.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + scheduler2 "minikubernetes/pkg/scheduler" + "os" +) + +func main() { + var policy string + if len(os.Args) < 2 { + policy = "Round_Policy" + } else { + policy = os.Args[1] + } + + switch policy { + case "Round_Policy": + break + case "Random_Policy": + break + case "NodeAffinity_Policy": + break + default: + fmt.Println("Invalid policy") + os.Exit(1) + } + scheduler := scheduler2.NewScheduler("10.119.12.123", policy) + scheduler.Run() +} diff --git a/go.mod b/go.mod index e393682..ec9b5e1 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,87 @@ module minikubernetes go 1.22 + +require ( + github.com/coreos/go-iptables v0.7.0 + github.com/docker/docker v26.0.2+incompatible + github.com/docker/go-connections v0.5.0 + github.com/google/uuid v1.6.0 + github.com/moby/ipvs v1.1.0 + github.com/spf13/cobra v1.8.0 + github.com/vishvananda/netlink v1.1.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/bytedance/sonic v1.9.1 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.14.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + github.com/vishvananda/netns v0.0.2 // indirect + go.etcd.io/etcd/api/v3 v3.5.13 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.17.0 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + k8s.io/klog/v2 v2.100.1 // indirect +) + +require ( + github.com/Microsoft/go-winio v0.4.15 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/gin-gonic/gin v1.9.1 + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/cadvisor v0.49.1 + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.etcd.io/etcd/client/v3 v3.5.13 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect + go.opentelemetry.io/otel v1.25.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 // indirect + go.opentelemetry.io/otel/metric v1.25.0 // indirect + go.opentelemetry.io/otel/sdk v1.25.0 // indirect + go.opentelemetry.io/otel/trace v1.25.0 // indirect + golang.org/x/sys v0.18.0 + golang.org/x/time v0.5.0 // indirect + gotest.tools/v3 v3.5.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..1e4b6c9 --- /dev/null +++ b/go.sum @@ -0,0 +1,244 @@ +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= +github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= +github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v26.0.2+incompatible h1:yGVmKUFGgcxA6PXWAokO0sQL22BrQ67cgVjko8tGdXE= +github.com/docker/docker v26.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/cadvisor v0.49.1 h1:9M++63nWvdq6Oci6wUDuAfQNTZpuz1ZObln0Bhs9xN0= +github.com/google/cadvisor v0.49.1/go.mod h1:s6Fqwb2KiWG6leCegVhw4KW40tf9f7m+SF1aXiE8Wsk= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= +github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.2 h1:Cn05BRLm+iRP/DZxyVSsfVyrzgjDbwHwkVt38qvXnNI= +github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4= +go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c= +go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg= +go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8= +go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js= +go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= +go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= +go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8= +go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= +go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= +go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo= +go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw= +go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= +go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= +go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= +go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/pkg/_your_public_lib_/.keep b/pkg/_your_public_lib_/.keep deleted file mode 100644 index e69de29..0000000 diff --git a/pkg/api/v1/endpoint.go b/pkg/api/v1/endpoint.go new file mode 100644 index 0000000..ed99b9d --- /dev/null +++ b/pkg/api/v1/endpoint.go @@ -0,0 +1,18 @@ +package v1 + +// 一个Endpoint指向一个Pod +type Endpoint struct { + //PodName string + IP string + Ports []EndpointPort +} + +type EndpointPort struct { + Port int32 + Protocol Protocol +} + +type ServiceAndEndpoints struct { + Service *Service + EndpointsMapWithPodName map[string]Endpoint +} diff --git a/pkg/api/v1/replicaset.go b/pkg/api/v1/replicaset.go new file mode 100644 index 0000000..f924ba6 --- /dev/null +++ b/pkg/api/v1/replicaset.go @@ -0,0 +1,33 @@ +package v1 + +// ReplicaSet : 用来存储备份的ReplicaSet类 +type ReplicaSet struct { + TypeMeta `json:",inline"` + + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ReplicaSet内规格 + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// ReplicaSetSpec : ReplicaSet内规格类 +type ReplicaSetSpec struct { + // 备份数 + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // 筛选相关联pod的selector + Selector *LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // ReplicaSet内创建pod的模板 + Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +type LabelSelector struct { + MatchLabels map[string]string `json:"matchLabels,omitempty" yaml:"matchLabels" protobuf:"bytes,1,rep,name=matchLabels"` +} + +type PodTemplateSpec struct { + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec PodSpec `json:"spec,omitempty"` +} diff --git a/pkg/api/v1/response.go b/pkg/api/v1/response.go new file mode 100644 index 0000000..d5c1c9d --- /dev/null +++ b/pkg/api/v1/response.go @@ -0,0 +1,6 @@ +package v1 + +type BaseResponse[T any] struct { + Data T `json:"data,omitempty"` + Error string `json:"error,omitempty"` +} diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go new file mode 100644 index 0000000..9ec3e0c --- /dev/null +++ b/pkg/api/v1/types.go @@ -0,0 +1,517 @@ +package v1 + +import ( + "time" +) + +type UID string + +type PodPhase string + +const ( + // 已创建,未运行 + PodPending PodPhase = "Pending" + // 正在运行 + PodRunning PodPhase = "Running" + // 已退出,退出代码0 + PodSucceeded PodPhase = "Succeeded" + // 已退出,退出代码非0 + PodFailed PodPhase = "Failed" +) + +type ResourceName string + +const ( + // CPU核数 + ResourceCPU ResourceName = "cpu" + // 内存大小 + ResourceMemory ResourceName = "memory" +) + +// 暂时用string表示资源,动态解析 +type ResourceList map[ResourceName]string + +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +type TypeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} + +type ObjectMeta struct { + Name string `json:"name,omitempty"` + + Namespace string `json:"namespace,omitempty"` + + // 由apiserver生成 + UID UID `json:"uid,omitempty"` + + // 由apiserver生成 + CreationTimestamp time.Time `json:"creationTimestamp,omitempty"` + + Labels map[string]string `json:"labels,omitempty"` +} + +type Volume struct { + Name string `json:"name"` + VolumeSource `json:",inline"` +} + +type VolumeSource struct { + HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` + EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty"` +} + +// 挂载主机目录 +// volume: +// - name: xxx +// hostPath: +// path: /data +type HostPathVolumeSource struct { + Path string `json:"path"` +} + +// 自动创建空目录 +// volume: +// - name: xxx +// emptyDir: {} +type EmptyDirVolumeSource struct { +} + +type Container struct { + // 容器名称 + Name string `json:"name"` + // 容器镜像(包括版本) + Image string `json:"image,omitempty"` + // entrypoint命令 + Command []string `json:"command,omitempty"` + // 容器暴露端口 + Ports []ContainerPort `json:"ports,omitempty"` + // 容器资源限制 + Resources ResourceRequirements `json:"resources,omitempty"` + // 卷挂载点 + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"` + + SecurityContext *SecurityContext `json:"securityContext,omitempty"` +} + +// ports: +// - containerPort: 80 +type ContainerPort struct { + ContainerPort int32 `json:"containerPort"` + Protocol Protocol `json:"protocol,omitempty"` +} + +// resources: +// +// limits: +// cpu: 2 +// requests: +// cpu: 1 +type ResourceRequirements struct { + // 资源上限 + Limits ResourceList `json:"limits,omitempty"` + // 资源下限 + Requests ResourceList `json:"requests,omitempty"` +} + +// volumeMounts: +// - name: xxx +// mountPath: /data +type VolumeMount struct { + // 挂载卷的名称 + Name string `json:"name"` + // 挂载在容器内的路径 + MountPath string `json:"mountPath,omitempty"` +} + +type Pod struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty"` + Spec PodSpec `json:"spec,omitempty"` + Status PodStatus `json:"status,omitempty"` +} + +type PodSpec struct { + // 卷声明 + Volumes []Volume `json:"volumes,omitempty"` + // 容器声明 + Containers []Container `json:"containers,omitempty"` + + InitContainers []Container `json:"initContainers,omitempty"` + // 重启策略:仅由kubelet实现 + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` + + //Sidecar *SidecarSpec `json:"sidecar,omitempty"` +} + +type SecurityContext struct { + Privileged *bool `json:"privileged,omitempty"` + RunAsUser *int64 `json:"runAsUser,omitempty"` +} + +//type SidecarSpec struct { +// Inject bool `json:"inject,omitempty"` +//} + +type PodStatus struct { + Phase PodPhase `json:"phase,omitempty"` + PodIP string `json:"podIP,omitempty"` +} + +type MetricsQuery struct { + // 查询的资源 + UID UID `form:"uid" json:"uid,omitempty"` + // 当前的时间戳 + TimeStamp time.Time `form:"timestamp" json:"timestamp,omitempty"` + // 查询的时间窗口 (秒) + Window int32 `form:"window" json:"window,omitempty"` +} + +// PodRawMetrics 用于标记Pod的资源使用情况, 作为stat接口的传输结构 +type PodRawMetrics struct { + UID UID `json:"uid,omitempty"` + // 各个容器的资源使用情况 + ContainerInfo map[string][]PodRawMetricsItem `json:"containers,omitempty"` +} + +// 会再增加统计的条目的 +type PodRawMetricsItem struct { + // 保证切片单元的时间戳是对齐的 + TimeStamp time.Time `json:"timestamp,omitempty"` + // 单个容器的CPU使用率,以百分比计算 + CPUUsage float32 `json:"cpuUsage"` + // 单个容器内存使用量,以MB计算 + MemoryUsage float32 `json:"memoryUsage"` +} + +type ScalerType string + +const ( + // // ReplicaSet + // ScalerTypeReplicaSet ScalerType = "ReplicaSet" + // // Deployment + // ScalerTypeDeployment ScalerType = "Deployment" + // HorizontalPodAutoscaler + ScalerTypeHPA ScalerType = "HorizontalPodAutoscaler" +) + +type CrossVersionObjectReference struct { + // 例如ReplicaSet,Deployment等 + Kind string `json:"kind"` + // 对象名称 + Name string `json:"name"` + // 默认v1 + APIVersion string `json:"apiVersion,omitempty"` +} + +type HorizontalPodAutoscaler struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty"` + + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` +} + +type HorizontalPodAutoscalerSpec struct { + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef"` + + // 期望的最小副本数 + MinReplicas int32 `json:"minReplicas,omitempty"` + // 期望的最大副本数 + MaxReplicas int32 `json:"maxReplicas"` + + // 扩缩容的时间窗口 + ScaleWindowSeconds int32 `json:"scaleWindowSeconds,omitempty"` + + // 监控的资源指标 + // Metrics []MetricSpec `json:"metrics,omitempty"` + Metrics []ResourceMetricSource `json:"metrics,omitempty"` + // 行为 + Behavior HorizontalPodAutoscalerBehavior `json:"behavior,omitempty"` +} + +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name ResourceName `json:"name"` + + // target specifies the target value for the given metric + Target MetricTarget `json:"target"` +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +type MetricTarget struct { + // type represents whether the metric type is Utilization, AverageValue + Type MetricTargetType `json:"type"` + + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // 和具体的指标单位有关,内存是MB + AverageValue float32 `json:"averageValue,omitempty"` + + // averageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + // 0 ~ 100 (百分比) + AverageUtilization float32 `json:"averageUtilization,omitempty"` + + // 触发扩缩容的阈值(百分比,可能大于100) + // 上限,默认是min(0.5*(100+目标使用率), 1.5*目标使用率) + UpperThreshold float32 `json:"upperThreshold,omitempty"` + // 下限,默认是0.5*目标使用率 + LowerThreshold float32 `json:"lowerThreshold,omitempty"` +} + +// MetricTargetType specifies the type of metric being targeted, and should be either +// "AverageValue", or "Utilization" +// AverageValue指的是使用量的真值,Utilization指的是使用率 +// 完全匹配的value现在不支持 +// 也许之后会通过维持偏差范围来扩缩容 不保真 +type MetricTargetType string + +const ( + // UtilizationMetricType declares a MetricTarget is an AverageUtilization value + UtilizationMetricType MetricTargetType = "Utilization" + // AverageValueMetricType declares a MetricTarget is an + AverageValueMetricType MetricTargetType = "AverageValue" +) + +// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target +// in both Up and Down directions (scaleUp and scaleDown fields respectively). +type HorizontalPodAutoscalerBehavior struct { + // 扩容的策略 + // 现在扩缩容都只各支持一种策略 + ScaleUp *HPAScalingPolicy `json:"scaleUp,omitempty"` + + // scaleDown is scaling policy for scaling Down. + // If not set, the default value is to allow to scale down to minReplicas pods, with a + // 300 second stabilization window (i.e., the highest recommendation for + // the last 300sec is used). + // 缩容的策略 + ScaleDown *HPAScalingPolicy `json:"scaleDown,omitempty"` +} + +// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. +type HPAScalingPolicyType string + +const ( + // PodsScalingPolicy is a policy used to specify a change in absolute number of pods. + PodsScalingPolicy HPAScalingPolicyType = "Pods" + + // PercentScalingPolicy is a policy used to specify a relative amount of change with respect to + // the current number of pods. + // 百分比的支持 + PercentScalingPolicy HPAScalingPolicyType = "Percent" +) + +// HPAScalingPolicy is a single policy which must hold true for a specified past interval. +type HPAScalingPolicy struct { + // type is used to specify the scaling policy. + Type HPAScalingPolicyType `json:"type"` + + // value contains the amount of change which is permitted by the policy. + // It must be greater than zero + Value int32 `json:"value"` + + // periodSeconds specifies the window of time for which the policy should hold true. + // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + PeriodSeconds int32 `json:"periodSeconds"` +} + +type Service struct { + TypeMeta `json:",inline"` + + ObjectMeta `json:"metadata,omitempty"` + + Spec ServiceSpec `json:"spec,omitempty"` + + Status ServiceStatus `json:"status,omitempty"` +} + +type ServiceSpec struct { + // 类型,默认为ClusterIP + Type ServiceType `json:"type,omitempty"` + // 端口声明 + Ports []ServicePort `json:"ports,omitempty"` + // 选择器,对应pod label + Selector map[string]string `json:"selector,omitempty"` + // 创建时由apiserver随机分配 + ClusterIP string `json:"clusterIP,omitempty"` +} + +type ServiceStatus struct { +} + +type ServiceType string + +const ( + ServiceTypeClusterIP ServiceType = "ClusterIP" + ServiceTypeNodePort ServiceType = "NodePort" +) + +const ( + NodePortMin = 30000 + NodePortMax = 32767 + PortMin = 1 + PortMax = 65535 +) + +type ServicePort struct { + Name string `json:"name,omitempty"` + // 默认为TCP + Protocol Protocol `json:"protocol,omitempty"` + // 端口号, 1-65535 + Port int32 `json:"port"` + // 目标端口号,1-65535 + TargetPort int32 `json:"targetPort"` + // type为NodePort时,指定的端口号 + NodePort int32 `json:"nodePort,omitempty"` +} + +type Protocol string + +const ( + ProtocolTCP Protocol = "tcp" + ProtocolUDP Protocol = "udp" +) + +type DNS struct { + TypeMeta `json:",inline"` + + ObjectMeta `json:"metadata,omitempty"` + + Spec DNSSpec `json:"spec,omitempty"` + + Status DNSStatus `json:"status,omitempty"` +} + +//spec: +// rules: +// - host: mywebsite.com +// paths: +// - path: /demo +// backend: +// service: +// name: myservice +// port: 8080 + +type DNSSpec struct { + Rules []DNSRule `json:"rules,omitempty"` +} + +type DNSRule struct { + Host string `json:"host,omitempty"` + Paths []DNSPath `json:"paths,omitempty"` +} + +type DNSPath struct { + Path string `json:"path,omitempty"` + Backend DNSBackend `json:"backend,omitempty"` +} + +type DNSBackend struct { + Service DNSServiceBackend `json:"service,omitempty"` +} + +type DNSServiceBackend struct { + Name string `json:"name,omitempty"` + Port int32 `json:"port,omitempty"` +} + +type DNSStatus struct { +} + +type Node struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty"` + Spec NodeSpec `json:"spec,omitempty"` + Status NodeStatus `json:"status,omitempty"` +} + +type NodeSpec struct { +} + +type NodeStatus struct { + // IP地址 + Address string `json:"address,omitempty"` +} + +// ServiceName -> ClusterIP +type SidecarServiceNameMapping map[string]string + +type SidecarMapping map[string][]SidecarEndpoints + +type SidecarEndpoints struct { + Weight *int32 `json:"weight,omitempty"` + URL *string `json:"url,omitempty"` + Endpoints []SingleEndpoint `json:"endpoints,omitempty"` +} + +type SingleEndpoint struct { + IP string `json:"ip"` + TargetPort int32 `json:"targetPort"` +} + +type VirtualService struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty"` + Spec VirtualServiceSpec `json:"spec,omitempty"` +} + +type VirtualServiceSpec struct { + ServiceRef string `json:"serviceRef,omitempty"` + Port int32 `json:"port,omitempty"` + Subsets []VirtualServiceSubset `json:"subsets,omitempty"` +} + +type VirtualServiceSubset struct { + Name string `json:"name,omitempty"` + // Weight subset中的所有endpoint都有相同的权重 + Weight *int32 `json:"weight,omitempty"` + // URL 路径为URL时,转发到本subset中的endpoint,支持带正则表达式的路径 + URL *string `json:"url,omitempty"` +} + +type Subset struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty"` + Spec SubsetSpec `json:"spec,omitempty"` +} + +type SubsetSpec struct { + // pod名 + Pods []string `json:"pods,omitempty"` +} + +type RollingUpdate struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty"` + Spec RollingUpdateSpec `json:"spec,omitempty"` + Status RollingUpdateStatus `json:"status,omitempty"` +} + +type RollingUpdateSpec struct { + ServiceRef string `json:"serviceRef,omitempty"` + Port int32 `json:"port,omitempty"` + MinimumAlive int32 `json:"minimumAlive,omitempty"` + Interval int32 `json:"interval,omitempty"` + NewPodSpec PodSpec `json:"newPodSpec,omitempty"` +} + +type RollingUpdatePhase string + +const ( + RollingUpdatePending RollingUpdatePhase = "Pending" + RollingUpdateRunning RollingUpdatePhase = "Running" + RollingUpdateFinished RollingUpdatePhase = "Finished" +) + +type RollingUpdateStatus struct { + Phase RollingUpdatePhase `json:"phase,omitempty"` +} diff --git a/pkg/controller/manager.go b/pkg/controller/manager.go new file mode 100644 index 0000000..a2d2d60 --- /dev/null +++ b/pkg/controller/manager.go @@ -0,0 +1,35 @@ +package controller + +import ( + "minikubernetes/pkg/controller/podautoscaler" + "minikubernetes/pkg/controller/replicaset" +) + +type ControllerManager interface { + Run() error +} + +type controllerManager struct { + rsController replicaset.ReplicaSetController + hpaController podautoscaler.HorizonalController +} + +func NewControllerManager(apiServerIP string) ControllerManager { + manager := &controllerManager{} + manager.rsController = replicaset.NewReplicasetManager(apiServerIP) + manager.hpaController = podautoscaler.NewHorizonalController(apiServerIP) + return manager +} + +func (cm *controllerManager) Run() error { + // 同时跑起两个controller + err := cm.rsController.RunRSC() + if err != nil { + return err + } + err = cm.hpaController.Run() + if err != nil { + return err + } + return nil +} diff --git a/pkg/controller/podautoscaler/horizonal.go b/pkg/controller/podautoscaler/horizonal.go new file mode 100644 index 0000000..25fb8bd --- /dev/null +++ b/pkg/controller/podautoscaler/horizonal.go @@ -0,0 +1,576 @@ +package podautoscaler + +import ( + // "minikubernetes/pkg/api/v1" + + "fmt" + "log" + "math" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "os" + "os/signal" + "time" +) + +var hpadbg bool = false + +type HorizonalController interface { + Run() error +} +type horizonalController struct { + kube_cli kubeclient.Client +} + +func NewHorizonalController(apiServerIP string) HorizonalController { + return &horizonalController{ + kube_cli: kubeclient.NewClient(apiServerIP), + } +} + +const ( + defaultSyncHPAInterval = 10 + defaultPeriod = 60 + defaultScaleWindow = 15 +) + +var defaultUpscalePolicy = v1.HPAScalingPolicy{ + Type: v1.PodsScalingPolicy, + Value: 1, + PeriodSeconds: defaultPeriod, +} + +var defaultDownscalePolicy = v1.HPAScalingPolicy{ + Type: v1.PodsScalingPolicy, + Value: 1, + PeriodSeconds: defaultPeriod, +} + +func (hc *horizonalController) Run() error { + + prevActTimeMap := make(map[v1.UID]time.Time) + + // 定时同步hpa的时间间隔 + var defaultSyncHPAInterval int = 10 + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt) + log.Printf("[HPA] Start HPA Controller\n") + // 现在只处理一个scaler的情况 + // 如果之后有多个scaler,分别起协程定时处理 + // 这样还得有一个类似pleg的东西,定时检查是否有新的scaler + go func() { + syncTicker := time.NewTicker(time.Duration(defaultSyncHPAInterval) * time.Second) + defer syncTicker.Stop() + defer log.Printf("Stop HPA Controller\n") + for { + select { + case <-syncTicker.C: + + // allPods []*v1.Pod + // allReplicaSets []*v1.ReplicaSet + // allHPAScalers []*v1.HorizontalPodAutoscaler + + // 1. 获取所有的HorizontalPodAutoscaler + allHPAScalers, err := hc.kube_cli.GetAllHPAScalers() + if hpadbg { + log.Printf("[HPA] Get all HorizontalPodAutoscaler: %v", allHPAScalers) + } + if err != nil { + log.Printf("[HPA] Get all HorizontalPodAutoscaler failed, error: %v", err) + // return err + } + if len(allHPAScalers) == 0 { + log.Printf("[HPA] No HorizontalPodAutoscaler found\n") + continue + } + + // 2. 获取所有的Pod + allPods, err := hc.kube_cli.GetAllPods() + if hpadbg { + log.Printf("[HPA] Get all Pods: %v", allPods) + } + if err != nil { + log.Printf("[HPA] Get all Pods failed, error: %v", err) + // return err + } + + for _, hpa := range allHPAScalers { + if hpadbg { + log.Printf("[HPA] Conducting HPA: %v\n", hpa) + } + reps, err := hc.kube_cli.GetAllReplicaSets() + if err != nil { + log.Printf("[HPA] Get all ReplicaSets failed, error: %v\n", err) + // return err + } + // 根据hpa中spec的scaleTargetRef找到对应的ReplicaSet(目前只能有一个) + rep, err := oneMatchRps(hpa, reps) + if err != nil { + log.Printf("[HPA] Get all ReplicaSets failed, error: %v\n", err) + // return err + } + if rep == nil { + log.Printf("[HPA] ReplicaSet not found\n") + continue + } + if hpadbg { + log.Printf("[HPA] ReplicaSet Matched: %v\n", rep) + } + // 根据ReplicaSet的labels筛选所有的Pod + podsMatch, err := oneMatchRpsLabels(rep, allPods) + if err != nil { + log.Printf("[HPA] Get all Pods failed, error: %v\n", err) + // return err + } + if len(podsMatch) == 0 { + log.Printf("[HPA] No matched pods!\n") + continue + } + if hpadbg { + log.Printf("[HPA] Pods Matched: %v\n", podsMatch) + } + // replicaSet目前的副本数 + var curRpsNum int32 = rep.Spec.Replicas + + // 出现过的最大值 + var maxRpsNumAprd int32 = 0 + + // 默认选择各种策略下出现的最大值 + selectPolicy := "Max" + + for metricPos, it := range hpa.Spec.Metrics { + + switch { + case it.Name == v1.ResourceCPU: + repNum := genRepNumFromCPU(hpa, metricPos, podsMatch, curRpsNum, hc.kube_cli) + if repNum != -1 { + maxRpsNumAprd = max(maxRpsNumAprd, repNum) + } + + case it.Name == v1.ResourceMemory: + repNum := genRepNumFromMemory(hpa, metricPos, podsMatch, curRpsNum, hc.kube_cli) + if repNum != -1 { + maxRpsNumAprd = max(maxRpsNumAprd, repNum) + } + default: + log.Printf("[HPA] Metrics type not supported!\n") + } + } + + if selectPolicy == "Max" { + + scaleWindowSz := hpa.Spec.ScaleWindowSeconds + if scaleWindowSz == 0 { + scaleWindowSz = defaultScaleWindow + } + + // 如果当前的时间戳和上一次的时间戳相差小于一个周期,那么不进行操作 + if time.Now().Sub(prevActTimeMap[hpa.UID]) < time.Duration(scaleWindowSz)*time.Second { + log.Printf("[HPA] In the same period, no need to change\n") + } else { + prevActTimeMap[hpa.UID] = time.Now() + if maxRpsNumAprd != curRpsNum && maxRpsNumAprd != 0 { + // 通知apiserver改变ReplicaSet的副本数 + err := hc.changeRpsPodNum(rep.Name, rep.Namespace, maxRpsNumAprd) + if err != nil { + log.Printf("[HPA] Change ReplicaSet Pod Num failed, error: %v\n", err) + } + } else { + log.Printf("[HPA] No need to change\n") + } + } + } + } + } + } + + }() + // ctrl+c + <-signalCh + log.Printf("[HPA] HPA Controller exit\n") + return nil + // log.Printf("HPA Controller exit abnormaly") +} + +// 向apiserver发送请求改变ReplicaSet的副本数 +func (hc *horizonalController) changeRpsPodNum(name string, namespace string, repNum int32) error { + fmt.Printf("changeRpsPodNum: %v, %v, %v\n", name, namespace, repNum) + err := hc.kube_cli.UpdateReplicaSet(name, namespace, repNum) + if err != nil { + return err + } + + return nil +} + +// func policyByRatio(hpa *v1.HorizontalPodAutoscaler,ratio float32) int32{ + +// return 0 +// } +// +// TODOS 由ratio计算副本数的函数抽象成一个 +func genRepNumFromCPU(hpa *v1.HorizontalPodAutoscaler, metricTypePos int, podsMatch []*v1.Pod, curRpsNum int32, kube_cli kubeclient.Client) int32 { + + if hpadbg { + log.Printf("[HPA] genRepNumFromCPU\n") + } + //需要取得hpa中相关的策略字段,以获取相关的统计窗口大小 + upBehavior := hpa.Spec.Behavior.ScaleUp + downBehavior := hpa.Spec.Behavior.ScaleDown + + // 防御性编程 + if upBehavior == nil { + upBehavior = &defaultUpscalePolicy + } + if downBehavior == nil { + downBehavior = &defaultDownscalePolicy + } + + // 这里假设apiserver在创建的时候就处理了默认策略 + + upPeriod := upBehavior.PeriodSeconds + downPeriod := downBehavior.PeriodSeconds + if hpadbg { + log.Printf("[HPA] UpPeriod: %v, DownPeriod: %v\n", upPeriod, downPeriod) + } + // spec中metrics的模板,包含资源类型,目标值等 + metricsTplt := hpa.Spec.Metrics[metricTypePos] + if metricsTplt.Target.Type != v1.UtilizationMetricType { + log.Printf("[HPA] Warning, use utilization for cpu\n") + return -1 + } + + // 尝试扩容 + log.Printf("[HPA] Try to upscale by cpu\n") + upAvg := genAvgCpuUsage(upPeriod, podsMatch, kube_cli) + + var ratio float32 + ratio = float32(upAvg / (metricsTplt.Target.AverageUtilization / 100)) + + if ratio > 1.1 { + var newRepNum int32 = 0 + // 可以扩容 + if hpadbg { + log.Printf("[HPA] UpScale: %v\n", hpa) + } + tryAdd := (int32)(math.Ceil((float64)((ratio - 1) * float32(curRpsNum)))) + if hpa.Spec.Behavior.ScaleUp.Type == v1.PercentScalingPolicy { + tryAddByPercent := (int32)(math.Ceil((float64)(hpa.Spec.Behavior.ScaleUp.Value * curRpsNum / 100))) + tryAdd = min(tryAdd, tryAddByPercent) + } else if hpa.Spec.Behavior.ScaleUp.Type == v1.PodsScalingPolicy { + tryAdd = min(tryAdd, hpa.Spec.Behavior.ScaleUp.Value) + } else { + tryAdd = 1 + } + log.Printf("[HPA] try Add: %v\n", tryAdd) + newRepNum = curRpsNum + tryAdd + if newRepNum > hpa.Spec.MaxReplicas { + newRepNum = hpa.Spec.MaxReplicas + } + return newRepNum + } + + // 尝试缩容 + log.Printf("[HPA] Try to downscale by cpu\n") + downAvg := genAvgCpuUsage(downPeriod, podsMatch, kube_cli) + + ratio = float32(downAvg / (metricsTplt.Target.AverageUtilization / 100)) + + if ratio < 0.9 { + var newRepNum int32 = 0 + // 可以缩容 + if hpadbg { + log.Printf("[HPA] DownScale: %v\n", hpa) + } + trySub := (int32)(math.Ceil((float64)((1 - ratio) * float32(curRpsNum)))) + if hpa.Spec.Behavior.ScaleDown.Type == v1.PercentScalingPolicy { + trySubByPercent := (int32)(math.Ceil((float64)(hpa.Spec.Behavior.ScaleDown.Value * curRpsNum / 100))) + trySub = min(trySub, trySubByPercent) + } else if hpa.Spec.Behavior.ScaleDown.Type == v1.PodsScalingPolicy { + trySub = min(trySub, hpa.Spec.Behavior.ScaleDown.Value) + } else { + trySub = 1 + } + log.Printf("[HPA] try Sub: %v\n", trySub) + newRepNum = curRpsNum - trySub + if newRepNum < hpa.Spec.MinReplicas { + newRepNum = hpa.Spec.MinReplicas + } + return newRepNum + } + + // 保持不变 + log.Printf("[HPA] In tolerance, keep the same: %v\n", hpa) + return curRpsNum +} + +func genAvgCpuUsage(windowSz int32, podsMatch []*v1.Pod, kube_cli kubeclient.Client) float32 { + // 当前时间戳 + now := time.Now() + + allPodCpuAvg := float32(0) + // 根据pod的Id获取所有的Metrics + for _, pod := range podsMatch { + // 1. 处理扩容的请求 + oneQuery := v1.MetricsQuery{ + UID: pod.UID, + TimeStamp: now, + Window: windowSz, + } + + // 一个pod的统计参数 + oneMetrics, err := kube_cli.GetPodMetrics(oneQuery) + if err != nil { + log.Printf("[HPA] Get Pod Metrics failed, error: %v\n", err) + // return err + } + + if oneMetrics == nil { + log.Printf("[HPA] Pod Metrics not found\n") + continue + } + if len(oneMetrics.ContainerInfo) == 0 { + log.Printf("[HPA] Wait for ContainerInfo to become valid\n") + continue + } + + var podCpuSum float32 = 0 + // 单个container的cpu使用率 + var sum, avg float32 + + // 对于每个container 统计其cpu使用率 + // 目前是计算时间窗口内的平均使用率 + for _, oneCtnr := range oneMetrics.ContainerInfo { + sum = 0 + avg = 0 + for _, i := range oneCtnr { + sum += i.CPUUsage + } + if len(oneCtnr) != 0 { + avg = sum / float32(len(oneCtnr)) + if hpadbg { + log.Printf("[HPA] Pod Metrics avg cpu / one container: %v\n", avg) + } + } + podCpuSum += avg + } + podCpuAvg := podCpuSum / float32(len(oneMetrics.ContainerInfo)) + + // 计算Pod的平均cpu使用率 + // 默认是所有container的平均值 + if hpadbg { + log.Printf("[HPA] Pod Metrics avg cpu / all containers in one pod: %v\n", podCpuAvg) + } + allPodCpuAvg += podCpuAvg + } + + // 计算所有Pod的平均cpu使用率 + allPodCpuAvg = allPodCpuAvg / float32(len(podsMatch)) + log.Printf("[HPA] All Pods Metrics avg cpu / all pods: %v\n", allPodCpuAvg) + return allPodCpuAvg +} + +func genRepNumFromMemory(hpa *v1.HorizontalPodAutoscaler, metricPos int, podsMatch []*v1.Pod, curRpsNum int32, kube_cli kubeclient.Client) int32 { + + if hpadbg { + log.Printf("[HPA] genRepNumFromMemory\n") + } + //需要取得hpa中相关的策略字段,以获取相关的统计窗口大小 + upBehavior := hpa.Spec.Behavior.ScaleUp + downBehavior := hpa.Spec.Behavior.ScaleDown + // 防御性编程 + if upBehavior == nil { + upBehavior = &defaultUpscalePolicy + } + if downBehavior == nil { + downBehavior = &defaultDownscalePolicy + } + + upPeriod := upBehavior.PeriodSeconds + downPeriod := downBehavior.PeriodSeconds + if hpadbg { + log.Printf("[HPA] UpPeriod: %v, DownPeriod: %v\n", upPeriod, downPeriod) + } + // spec中metrics的模板,包含资源类型,目标值等 + metricsTplt := hpa.Spec.Metrics[metricPos] + fmt.Print(string(metricsTplt.Name)) + if metricsTplt.Target.Type != v1.AverageValueMetricType { + log.Printf("[HPA] Warning, use AverageValue for memory\n") + return -1 + } + + // 尝试扩容 + log.Printf("[HPA] Try to upscale by memory\n") + upAvg := genAvgMemoryUsage(upPeriod, podsMatch, kube_cli) + + var ratio float32 + ratio = float32(upAvg / metricsTplt.Target.AverageValue) + + // 因为内存的抖动可能会更大,所以这里将阈值设置得高一点 + if ratio > 1.5 { + var newRepNum int32 = 0 + // 可以扩容 + if hpadbg { + log.Printf("[HPA] UpScale: %v\n", hpa) + } + + tryAdd := (int32)(math.Ceil((float64)((ratio - 1) * float32(curRpsNum)))) + if hpa.Spec.Behavior.ScaleUp.Type == v1.PercentScalingPolicy { + tryAddByPercent := (int32)(math.Ceil((float64)(hpa.Spec.Behavior.ScaleUp.Value * curRpsNum / 100))) + tryAdd = min(tryAdd, tryAddByPercent) + } else if hpa.Spec.Behavior.ScaleUp.Type == v1.PodsScalingPolicy { + tryAdd = min(tryAdd, hpa.Spec.Behavior.ScaleUp.Value) + } else { + tryAdd = 1 + } + log.Printf("[HPA] try Add: %v\n", tryAdd) + newRepNum = curRpsNum + tryAdd + if newRepNum > hpa.Spec.MaxReplicas { + newRepNum = hpa.Spec.MaxReplicas + } + return newRepNum + } + + // 尝试缩容 + log.Printf("[HPA] Try to downscale by memory\n") + downAvg := genAvgMemoryUsage(downPeriod, podsMatch, kube_cli) + + ratio = float32(downAvg / metricsTplt.Target.AverageValue) + + if ratio < 0.5 { + var newRepNum int32 = 0 + // 可以缩容 + if hpadbg { + log.Printf("[HPA] DownScale: %v\n", hpa) + } + trySub := (int32)(math.Ceil((float64)((1 - ratio) * float32(curRpsNum)))) + if hpa.Spec.Behavior.ScaleDown.Type == v1.PercentScalingPolicy { + trySubByPercent := (int32)(math.Ceil((float64)(hpa.Spec.Behavior.ScaleDown.Value * curRpsNum / 100))) + trySub = min(trySub, trySubByPercent) + } else if hpa.Spec.Behavior.ScaleDown.Type == v1.PodsScalingPolicy { + trySub = min(trySub, hpa.Spec.Behavior.ScaleDown.Value) + } else { + trySub = 1 + } + log.Printf("[HPA] try Sub: %v\n", trySub) + newRepNum = curRpsNum - trySub + if newRepNum < hpa.Spec.MinReplicas { + newRepNum = hpa.Spec.MinReplicas + } + + return newRepNum + } + + // 保持不变 + log.Printf("[HPA] In tolerance interval, keep the same: %v\n", hpa) + return curRpsNum +} + +// TODO 将获取平均值的函数抽象成一个 +// 但是memory比较特殊 如果没有大的变化 新的 +func genAvgMemoryUsage(windowSz int32, podsMatch []*v1.Pod, kube_cli kubeclient.Client) float32 { + // 当前时间戳 + now := time.Now() + + allPodMemAvg := float32(0) + // 根据pod的Id获取所有的Metrics + for _, pod := range podsMatch { + // 1. 处理扩容的请求 + oneQuery := v1.MetricsQuery{ + UID: pod.UID, + TimeStamp: now, + Window: windowSz, + } + + // 一个pod的统计参数 + oneMetrics, err := kube_cli.GetPodMetrics(oneQuery) + if err != nil { + log.Printf("[HPA] Get Pod Metrics failed, error: %v\n", err) + // return err + } + + if oneMetrics == nil { + log.Printf("[HPA] Pod Metrics not found\n") + continue + } + if len(oneMetrics.ContainerInfo) == 0 { + log.Printf("[HPA] Wait for ContainerInfo to become valid\n") + continue + } + + var podMemSum float32 = 0 + // 单个container的内存使用率 + var sum, avg float32 + + // 对于每个container 统计其内存使用率 + // 目前是计算时间窗口内的平均使用率 + for _, oneCtnr := range oneMetrics.ContainerInfo { + sum = 0 + avg = 0 + for _, i := range oneCtnr { + sum += i.MemoryUsage + } + if len(oneCtnr) != 0 { + avg = sum / float32(len(oneCtnr)) + if hpadbg { + log.Printf("[HPA] Pod Metrics avg memory / one container: %v\n", avg) + } + } + podMemSum += avg + } + podMemAvg := podMemSum / float32(len(oneMetrics.ContainerInfo)) + + // 计算Pod的平均内存使用率 + // 默认是所有container的平均值 + if hpadbg { + log.Printf("[HPA] Pod Metrics avg memory / all containers in one pod: %v\n", podMemAvg) + } + allPodMemAvg += podMemAvg + } + + // 计算所有Pod的平均内存使用率 + allPodMemAvg = allPodMemAvg / float32(len(podsMatch)) + log.Printf("[HPA] All Pods Metrics avg memory / all pods: %v\n", allPodMemAvg) + return allPodMemAvg +} + +// 从apiServer里面获取相关的ReplicaSet +func oneMatchRps(hpa *v1.HorizontalPodAutoscaler, reps []*v1.ReplicaSet) (*v1.ReplicaSet, error) { + refRpsName := hpa.Spec.ScaleTargetRef.Name + // 默认在和hpa同一个namespace下去找hpa + refRpsNamespace := hpa.Namespace + + for _, rep := range reps { + if rep.Name == refRpsName && rep.Namespace == refRpsNamespace { + return rep, nil + } + } + return nil, fmt.Errorf("[HPA] ReplicaSet not found") + +} + +// 根据ReplicaSet的labels筛选所有的Pod +func oneMatchRpsLabels(rep *v1.ReplicaSet, allPods []*v1.Pod) ([]*v1.Pod, error) { + resLabelSelector := rep.Spec.Selector + var podBelonged []*v1.Pod + + for _, pod := range allPods { + if pod == nil || pod.Labels == nil { + continue + } + + belonged := true + for k, v := range resLabelSelector.MatchLabels { + if pod.Labels[k] != v { + belonged = false + break + } + } + if belonged { + podBelonged = append(podBelonged, pod) + } + + } + return podBelonged, nil + +} diff --git a/pkg/controller/replicaset/controller.go b/pkg/controller/replicaset/controller.go new file mode 100644 index 0000000..6694f43 --- /dev/null +++ b/pkg/controller/replicaset/controller.go @@ -0,0 +1,164 @@ +package replicaset + +import ( + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + uuid2 "minikubernetes/tools/uuid" + "time" +) + +const FormatTime = "2006-01-02 15:04:05.000000" + +type ReplicaSetController interface { + RunRSC() error +} + +type replicaSetController struct { + client kubeclient.Client + syncHandler func() error +} + +func NewReplicasetManager(apiServerIP string) ReplicaSetController { + manager := &replicaSetController{} + manager.client = kubeclient.NewClient(apiServerIP) + return manager +} + +func (rc *replicaSetController) RunRSC() error { + + err := rc.syncReplicaSet() + + if err != nil { + return err + } + return nil +} + +func (rc *replicaSetController) addPod(pod *v1.Pod) { + uuid := uuid2.NewUUID() + pod.Name = pod.Name + "-" + uuid + pod.TypeMeta.Kind = "Pod" + err := rc.client.AddPod(*pod) + if err != nil { + return + } + return +} + +func (rc *replicaSetController) deletePod(name, namespace string) { + err := rc.client.DeletePod(name, namespace) + if err != nil { + return + } + +} + +func (rc *replicaSetController) syncReplicaSet() error { + log.Printf("[RPS] start sync replica set") + go func() error { + for { + log.Printf("[RPS] sync replica set") + var reps []*v1.ReplicaSet + var allPods []*v1.Pod + log.Printf("[RPS] get all replica set") + reps, err := rc.client.GetAllReplicaSets() + if err != nil { + log.Printf("[RPS] get all replica set failed, error: %s", err.Error()) + } + + allPods, err = rc.client.GetAllPods() + if err != nil { + log.Printf("[RPS] get all pods failed, error: %s", err.Error()) + } + + for _, rep := range reps { + + allPodsMatch, err := rc.oneReplicaSetMatch(rep, allPods) + if err != nil { + log.Printf("[RPS] match replica set failed, error: %s", err.Error()) + return err + } + toStart, err := rc.oneReplicaSetCheck(allPodsMatch, rep.Spec.Replicas) + if err != nil { + log.Printf("[RPS] check replica set failed, error: %s", err.Error()) + return err + } + for i := 1; i <= toStart; i++ { + + pod := &v1.Pod{ + TypeMeta: v1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: rep.Spec.Template.ObjectMeta, + Spec: rep.Spec.Template.Spec, + } + rc.addPod(pod) + } + + } + + time.Sleep(5000 * time.Millisecond) + + } + }() + return nil +} + +func (rc *replicaSetController) oneReplicaSetMatch(rep *v1.ReplicaSet, allPods []*v1.Pod) ([]*v1.Pod, error) { + resLabelSelector := rep.Spec.Selector + var podBelonged []*v1.Pod + + for _, pod := range allPods { + if pod == nil || pod.Labels == nil { + continue + } + + belonged := true + for k, v := range resLabelSelector.MatchLabels { + if pod.Labels[k] != v { + belonged = false + break + } + } + if belonged { + podBelonged = append(podBelonged, pod) + } + + } + return podBelonged, nil + +} + +func (rc *replicaSetController) oneReplicaSetCheck(allPodsMatch []*v1.Pod, wanted int32) (int, error) { + wantedNum := int(wanted) + replicasNum := 0 + stateMark := false + for _, pod := range allPodsMatch { + if replicasNum == wantedNum { + stateMark = true + } + if stateMark { + rc.deletePod(pod.Name, pod.Namespace) + + } else { + if pod.Status.Phase == v1.PodRunning { + replicasNum++ + } else if pod.Status.Phase == v1.PodPending { + replicasNum++ + } else if pod.Status.Phase == v1.PodFailed { + + } else if pod.Status.Phase == v1.PodSucceeded { + replicasNum++ + } + } + + } + + if !stateMark { + neededNum := wantedNum - replicasNum + return neededNum, nil + } + return 0, nil +} diff --git a/pkg/kubeapiserver/app/routine.go b/pkg/kubeapiserver/app/routine.go new file mode 100644 index 0000000..237a238 --- /dev/null +++ b/pkg/kubeapiserver/app/routine.go @@ -0,0 +1,2940 @@ +package app + +import ( + "bufio" + "encoding/json" + "fmt" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeapiserver/etcd" + "minikubernetes/pkg/kubeapiserver/metrics" + "minikubernetes/pkg/kubeapiserver/utils" + "minikubernetes/tools/timestamp" + "minikubernetes/tools/uuid" + "net" + "sort" + "strconv" + "strings" + "sync" + + "net/http" + "time" + + gin "github.com/gin-gonic/gin" +) + +/* this is the simple routine of apiserver */ + +/* For The apis: */ + +/* API: [SYSTEM INFO] + /metrics + /healthz + Handle: + pass to handlers like "metrics.go" by route. +*/ + +/* + API: [CORE GROUPS] + /api/v1/... + /pods/... + + Descriptions: + Basic command apis of k8s. + + Handle: + Now in KubeApiServer +*/ + +/* URL Consts + */ +const ( + All_nodes_url = "/api/v1/node" + Node_status_url = "/api/v1/nodes/:nodename/status" + + All_pods_url = "/api/v1/pods" + Namespace_Pods_url = "/api/v1/namespaces/:namespace/pods" + Single_pod_url = "/api/v1/namespaces/:namespace/pods/:podname" + Pod_status_url = "/api/v1/namespaces/:namespace/pods/:podname/status" + + Node_pods_url = "/api/v1/nodes/:nodename/pods" + + AllServicesURL = "/api/v1/services" + NamespaceServicesURL = "/api/v1/namespaces/:namespace/services" + SingleServiceURL = "/api/v1/namespaces/:namespace/services/:servicename" + + AllDNSURL = "/api/v1/dns" + NamespaceDNSURL = "/api/v1/namespaces/:namespace/dns" + SingleDNSURL = "/api/v1/namespaces/:namespace/dns/:dnsname" + + RegisterNodeURL = "/api/v1/nodes/register" + UnregisterNodeURL = "/api/v1/nodes/unregister" + AllNodesURL = "/api/v1/nodes" + SchedulePodURL = "/api/v1/schedule" + UnscheduledPodsURL = "/api/v1/pods/unscheduled" + + AllReplicaSetsURL = "/api/v1/replicasets" + NamespaceReplicaSetsURL = "/api/v1/namespaces/:namespace/replicasets" + SingleReplicaSetURL = "/api/v1/namespaces/:namespace/replicasets/:replicasetname" + + StatsDataURL = "/api/v1/stats/data" + AllScalingURL = "/api/v1/scaling" + NamespaceScalingsURL = "/api/v1/namespaces/:namespace/scaling" + SingleScalingURL = "/api/v1/namespaces/:namespace/scaling/scalingname/:name" + + AllVirtualServicesURL = "/api/v1/virtualservices" + NamespaceVirtualServicesURL = "/api/v1/namespaces/:namespace/virtualservices" + SingleVirtualServiceURL = "/api/v1/namespaces/:namespace/virtualservices/:virtualservicename" + + AllSubsetsURL = "/api/v1/subsets" + NamespaceSubsetsURL = "/api/v1/namespaces/:namespace/subsets" + SingleSubsetURL = "/api/v1/namespaces/:namespace/subsets/:subsetname" + + SidecarMappingURL = "/api/v1/sidecar-mapping" + SidecarServiceNameMappingURL = "/api/v1/sidecar-service-name-mapping" + + AllRollingUpdateURL = "/api/v1/rollingupdates" + NamespaceRollingUpdateURL = "/api/v1/namespaces/:namespace/rollingupdates" + SingleRollingUpdateURL = "/api/v1/namespaces/:namespace/rollingupdates/:rollingupdatename" +) + +/* NAMESPACE + * and NODE HARDSHIT + */ +const ( + Default_Namespace = "default" + Default_Nodename = "node-0" + Default_Podname = "example-pod" +) + +type kubeApiServer struct { + router *gin.Engine + listen_ip string + port int + store_cli etcd.Store + metrics_cli metrics.MetricsDatabase + + lock sync.Mutex +} + +type KubeApiServer interface { + Run() +} + +func (ser *kubeApiServer) init() { + var err error + newStore, err := etcd.NewEtcdStore() + if err != nil { + log.Panicln("etcd store init failed") + return + } + ser.store_cli = newStore + + metricsDb, err := metrics.NewMetricsDb() + if err != nil { + log.Panicln("metrics db init failed") + return + } + + ser.metrics_cli = metricsDb + + // assume that node-0 already registered + + // TODO:(unnecessary) 检查etcdcli是否有效 + +} + +func (ser *kubeApiServer) Run() { + // debugging + now := time.Now() + hour := now.Hour() + minute := now.Minute() + second := now.Second() + + log.Println("starting the kubeApiServer") + log.Printf("at time: %d:%d:%d\n", hour, minute, second) + + // fake init + log.Println("kubeApiServer init") + ser.init() + + // exactly initialing + log.Println("kubeApiServer is binding handlers") + ser.binder() + + log.Printf("binding ip: %v, listening port: %v\n", ser.listen_ip, ser.port) + ser.router.Run(ser.listen_ip + ":" + fmt.Sprint(ser.port)) + + defer log.Printf("server stop") +} + +func NewKubeApiServer() (KubeApiServer, error) { + // return an kubeapi server + return &kubeApiServer{ + router: gin.Default(), + listen_ip: "0.0.0.0", + port: 8001, + }, nil +} + +// binding Restful requests to urls +// could initialize with config + +func (ser *kubeApiServer) binder() { + // debug + ser.router.GET("/ping", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "pong", + }) + }) + + ser.router.GET(All_nodes_url, GetNodesHandler) + ser.router.POST(All_nodes_url, AddNodeHandler) + ser.router.GET(Node_status_url, GetNodeStatusHandler) + ser.router.PUT(Node_status_url, PutNodeStatusHandler) // only modify the status of node + + ser.router.GET(All_pods_url, ser.GetAllPodsHandler) + ser.router.GET(Namespace_Pods_url, ser.GetPodsByNamespaceHandler) + ser.router.GET(Single_pod_url, ser.GetPodHandler) + ser.router.POST(Namespace_Pods_url, ser.AddPodHandler) // for single-pod testing + ser.router.PUT(Single_pod_url, UpdatePodHandler) + ser.router.DELETE(Single_pod_url, ser.DeletePodHandler) + ser.router.GET(Pod_status_url, ser.GetPodStatusHandler) + ser.router.PUT(Pod_status_url, ser.PutPodStatusHandler) // only modify the status of a single pod + + ser.router.GET(Node_pods_url, ser.GetPodsByNodeHandler) // for single-pod testing + + ser.router.GET(AllServicesURL, ser.GetAllServicesHandler) + ser.router.POST(NamespaceServicesURL, ser.AddServiceHandler) + ser.router.DELETE(SingleServiceURL, ser.DeleteServiceHandler) + + ser.router.GET(AllDNSURL, ser.GetAllDNSHandler) + ser.router.POST(NamespaceDNSURL, ser.AddDNSHandler) + ser.router.DELETE(SingleDNSURL, ser.DeleteDNSHandler) + + ser.router.GET(AllNodesURL, ser.GetAllNodesHandler) + ser.router.POST(RegisterNodeURL, ser.RegisterNodeHandler) + ser.router.POST(UnregisterNodeURL, ser.UnregisterNodeHandler) + ser.router.POST(SchedulePodURL, ser.SchedulePodToNodeHandler) + ser.router.GET(UnscheduledPodsURL, ser.GetUnscheduledPodHandler) + + ser.router.GET(AllReplicaSetsURL, ser.GetAllReplicaSetsHandler) + ser.router.POST(NamespaceReplicaSetsURL, ser.AddReplicaSetHandler) + ser.router.GET(SingleReplicaSetURL, ser.GetReplicaSetHandler) + ser.router.PUT(SingleReplicaSetURL, ser.UpdateReplicaSetHandler) + ser.router.DELETE(SingleReplicaSetURL, ser.DeleteReplicaSetHandler) + + ser.router.GET(StatsDataURL, ser.GetStatsDataHandler) + ser.router.POST(StatsDataURL, ser.AddStatsDataHandler) + + ser.router.GET(AllScalingURL, ser.GetAllScalingHandler) + ser.router.POST(NamespaceScalingsURL, ser.AddScalingHandler) + ser.router.DELETE(SingleScalingURL, ser.DeleteScalingHandler) + + ser.router.GET(AllVirtualServicesURL, ser.GetAllVirtualServicesHandler) + ser.router.POST(NamespaceVirtualServicesURL, ser.AddVirtualServiceHandler) + ser.router.DELETE(SingleVirtualServiceURL, ser.DeleteVirtualServiceHandler) + + ser.router.GET(AllSubsetsURL, ser.GetAllSubsetsHandler) + ser.router.POST(NamespaceSubsetsURL, ser.AddSubsetHandler) + ser.router.DELETE(SingleSubsetURL, ser.DeleteSubsetHandler) + ser.router.GET(SingleSubsetURL, ser.GetSubsetHandler) + + ser.router.GET(SidecarMappingURL, ser.GetSidecarMapping) + ser.router.POST(SidecarMappingURL, ser.SaveSidecarMapping) + ser.router.GET(SidecarServiceNameMappingURL, ser.GetSidecarServiceNameMapping) + + ser.router.GET(AllRollingUpdateURL, ser.GetAllRollingUpdatesHandler) + ser.router.POST(NamespaceRollingUpdateURL, ser.AddRollingUpdateHandler) + ser.router.POST(SingleRollingUpdateURL, ser.UpdateRollingUpdateStatusHandler) + ser.router.DELETE(SingleRollingUpdateURL, ser.DeleteRollingUpdateHandler) +} + +func (s *kubeApiServer) GetStatsDataHandler(c *gin.Context) { + // 通过结构体来获取查询请求 + var query v1.MetricsQuery + err := c.ShouldBindQuery(&query) + if err != nil { + c.JSON(http.StatusBadRequest, + v1.BaseResponse[*v1.PodRawMetrics]{Error: "error in parsing query"}, + ) + return + } + // 通过query来获取数据 + fmt.Printf("get query: %v\n", query) + fmt.Printf("get query uid: %v\n", query.UID) + data, err := s.metrics_cli.GetPodMetrics(query.UID, query.TimeStamp, query.Window) + + if err != nil { + fmt.Printf("error in getting metrics: %v\n", err) + c.JSON(http.StatusInternalServerError, + v1.BaseResponse[*v1.PodRawMetrics]{Error: fmt.Sprintf("error in getting metrics: %v", err)}, + ) + return + } + c.JSON(http.StatusOK, + v1.BaseResponse[*v1.PodRawMetrics]{Data: data}, + ) + +} +func (s *kubeApiServer) AddStatsDataHandler(c *gin.Context) { + // 获取需要保存的metrics + // contentStr, _ := c.GetRawData() + // fmt.Printf("get content str: %v\n", string(contentStr)) + + var metrics []*v1.PodRawMetrics + err := c.ShouldBind(&metrics) + fmt.Printf("get metrics str: %v\n", metrics) + if err != nil { + log.Printf("error in parsing metrics: %v", err) + c.JSON(http.StatusBadRequest, + v1.BaseResponse[[]*v1.PodRawMetrics]{Error: "error in parsing metrics"}, + ) + return + } + // 保存metrics + err = s.metrics_cli.SavePodMetrics(metrics) + + if err != nil { + c.JSON(http.StatusInternalServerError, + v1.BaseResponse[[]*v1.PodRawMetrics]{Error: fmt.Sprintf("error in saving metrics: %v", err)}, + ) + return + } + c.JSON(http.StatusOK, + v1.BaseResponse[[]*v1.PodRawMetrics]{Data: metrics}, + ) +} + +func (s *kubeApiServer) GetAllScalingHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + allSca, err := s.getAllScalingsFromEtcd() + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.HorizontalPodAutoscaler]{ + Error: fmt.Sprintf("error in getting all scalings: %v", err), + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.HorizontalPodAutoscaler]{Data: allSca}) + +} +func (s *kubeApiServer) getAllScalingsFromEtcd() ([]*v1.HorizontalPodAutoscaler, error) { + allScaKeyPrefix := "/registry/scaling" + res, err := s.store_cli.GetSubKeysValues(allScaKeyPrefix) + if err != nil { + return nil, err + } + allSca := make([]*v1.HorizontalPodAutoscaler, 0) + for _, v := range res { + var sca v1.HorizontalPodAutoscaler + err = json.Unmarshal([]byte(v), &sca) + if err != nil { + return nil, err + } + allSca = append(allSca, &sca) + } + return allSca, nil +} + +func (ser *kubeApiServer) AddScalingHandler(c *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + var hpa v1.HorizontalPodAutoscaler + err := c.ShouldBind(&hpa) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in parsing hpa", + }) + return + } + if hpa.Kind != string(v1.ScalerTypeHPA) { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "scaling type not supported", + }) + return + } + if hpa.Name == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "hpa name is required", + }) + return + } + if hpa.Namespace == "" { + hpa.Namespace = Default_Namespace + } + hpa.CreationTimestamp = timestamp.NewTimestamp() + hpa.UID = (v1.UID)(uuid.NewUUID()) + if hpa.Spec.MinReplicas == 0 { + hpa.Spec.MinReplicas = 1 + } + + hpaKey := fmt.Sprintf("/registry/namespaces/%s/scaling/%s", hpa.Namespace, hpa.Name) + allhpaKey := fmt.Sprintf("/registry/scaling/%s", hpa.UID) + + // 检查是否有重复的 + hpaUid, err := ser.store_cli.Get(hpaKey) + if hpaUid != "" { + c.JSON(http.StatusConflict, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "scaling already exists", + }) + return + } + + // 没有重复,可以添加 + + hpaStr, err := json.Marshal(hpa) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in json marshal", + }) + return + } + err = ser.store_cli.Set(hpaKey, string(hpa.UID)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in writing scaling uid to etcd", + }) + return + } + + err = ser.store_cli.Set(allhpaKey, string(hpaStr)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in writing scaling to etcd", + }) + return + } + c.JSON(http.StatusCreated, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{Data: &hpa}) + +} + +func (ser *kubeApiServer) DeleteScalingHandler(c *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + namespace := c.Params.ByName("namespace") + name := c.Params.ByName("name") + if namespace == "" || name == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "namespace and name are required", + }) + return + } + hpaKey := fmt.Sprintf("/registry/namespaces/%s/scaling/%s", namespace, name) + hpaUid, err := ser.store_cli.Get(hpaKey) + + if hpaUid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "scaling not found", + }) + return + + } + + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in getting scaling uid from etcd", + }) + return + } + err = ser.store_cli.Delete(hpaKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in deleting scaling from etcd", + }) + } + allhpaKey := fmt.Sprintf("/registry/scaling/%s", hpaUid) + + var hpa v1.HorizontalPodAutoscaler + hpaStr, err := ser.store_cli.Get(allhpaKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in getting scaling from etcd", + }) + return + } + err = json.Unmarshal([]byte(hpaStr), &hpa) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in json unmarshal", + }) + return + } + + err = ser.store_cli.Delete(allhpaKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{ + Error: "error in deleting scaling from etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.HorizontalPodAutoscaler]{Data: &hpa}) + +} + +// handlers (trivial) + +// Nodes have no namespace. +// TODO: 增加Node支持 +func GetNodesHandler(con *gin.Context) { + +} +func AddNodeHandler(con *gin.Context) { + +} + +func GetNodeStatusHandler(con *gin.Context) { + +} +func PutNodeStatusHandler(con *gin.Context) { + +} + +// For pods +// We set namespace to "default" right now. + +func (ser *kubeApiServer) GetAllPodsHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("GetAllPods") + + all_pod_str := make([]v1.Pod, 0) + + prefix := "/registry" + + all_pod_keystr := prefix + "/pods" + + res, err := ser.store_cli.GetSubKeysValues(all_pod_keystr) + + //if res == nil || len(res) == 0 || err != nil { + // log.Println("no pod exists") + // con.JSON(http.StatusNotFound, gin.H{ + // "error": "no pod exists", + // }) + // return + //} + if err != nil { + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in reading all pods from etcd", + }) + return + } + if len(res) == 0 { + con.JSON(http.StatusOK, v1.BaseResponse[[]*v1.Pod]{Data: nil}) + return + } + for _, v := range res { + var pod v1.Pod + err = json.Unmarshal([]byte(v), &pod) + if err != nil { + log.Println("error in json unmarshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json unmarshal", + }) + return + } + all_pod_str = append(all_pod_str, pod) + } + + con.JSON(http.StatusOK, + gin.H{ + "data": all_pod_str, + }, + ) + +} +func (ser *kubeApiServer) GetPodsByNamespaceHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("GetPodsByNamespace") + + np := con.Params.ByName("namespace") + if np == "" { + con.JSON(http.StatusNotFound, gin.H{ + "error": "error in parsing namespace ", + }) + return + } + + all_pod_str := make([]v1.Pod, 0) + + prefix := "/registry" + + namespace_pod_keystr := prefix + "/namespaces/" + np + "/pods" + + res, err := ser.store_cli.GetSubKeysValues(namespace_pod_keystr) + + //if res == nil || len(res) == 0 || err != nil { + // log.Println("no pod exists") + // con.JSON(http.StatusNotFound, gin.H{ + // "error": "no pod exists", + // }) + // return + //} + if err != nil { + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in reading all pods from etcd", + }) + return + } + if len(res) == 0 { + con.JSON(http.StatusOK, v1.BaseResponse[[]*v1.Pod]{Data: nil}) + return + } + + for _, v := range res { + pod_id := v + all_pod_keystr := prefix + "/pods/" + pod_id + + res, err := ser.store_cli.Get(all_pod_keystr) + if res == "" || err != nil { + log.Println("pod does not exist") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod does not exist", + }) + return + } + var pod v1.Pod + err = json.Unmarshal([]byte(res), &pod) + if err != nil { + log.Println("error in json unmarshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json unmarshal", + }) + return + } + all_pod_str = append(all_pod_str, pod) + } + + con.JSON(http.StatusOK, + gin.H{ + "data": all_pod_str, + }, + ) + +} +func (ser *kubeApiServer) GetPodHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("GetPod") + + np := con.Params.ByName("namespace") + pod_name := con.Params.ByName("podname") + + prefix := "/registry" + + namespace_pod_keystr := prefix + "/namespaces/" + np + "/pods/" + pod_name + + res, err := ser.store_cli.Get(namespace_pod_keystr) + if res == "" || err != nil { + log.Println("pod name does not exist in namespace") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod name does not exist in namespace", + }) + return + } + + pod_id := res + all_pod_keystr := prefix + "/pods/" + pod_id + + res, err = ser.store_cli.Get(all_pod_keystr) + if res == "" || err != nil { + log.Println("pod does not exist") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod does not exist", + }) + return + } + + var pod v1.Pod + err = json.Unmarshal([]byte(res), &pod) + if err != nil { + log.Println("error in json unmarshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json unmarshal", + }) + return + } + + con.JSON(http.StatusOK, gin.H{ + "data": pod, + }) +} +func (ser *kubeApiServer) AddPodHandler(con *gin.Context) { + // assign a pod to a node + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("Adding a new pod") + + var pod v1.Pod + err := con.ShouldBind(&pod) + if err != nil { + log.Println("something is wrong when parsing Pod") + return + } + pod_name := pod.ObjectMeta.Name + if pod_name == "" { + pod_name = Default_Podname + } + + pod.ObjectMeta.UID = (v1.UID)(uuid.NewUUID()) + + pod.ObjectMeta.CreationTimestamp = timestamp.NewTimestamp() + + pod.Status.Phase = v1.PodPending + + namespace := con.Param("namespace") + if namespace == "" { + con.JSON(http.StatusBadRequest, gin.H{ + "error": "namespace is required", + }) + return + } + if pod.Namespace == "" { + if namespace != Default_Namespace { + con.JSON(http.StatusBadRequest, gin.H{ + "error": "namespace does not match", + }) + return + } + } else { + if pod.Namespace != namespace { + con.JSON(http.StatusBadRequest, gin.H{ + "error": "namespace does not match", + }) + return + } + } + pod.Namespace = namespace + + /* fake store pod to: + 1. namespace , store the binding of podname and uid + 2. node , only uid + */ + // TODO: 加入shim 解析所谓的api格式到registry前缀的映射 + // TODO: 加入一个keystr解析函数 + // 现在可以简单的理解为将/api/v1/ 替换成/registry/ + + prefix := "/registry" + + // 全局用uid而不是podname来标识 + all_pod_keystr := prefix + "/pods/" + string(pod.ObjectMeta.UID) + + // namespace里面对应的是podname和uid的映射 + namespace_pod_keystr := prefix + "/namespaces/" + namespace + "/pods/" + pod_name + + // node里面对应的也是podname和uid的映射 + // node_pod_keystr := prefix + "/nodes/" + Default_Nodename + "/pods/" + pod_name + + // 首先查看namespace里面是否已经存在 + res, err := ser.store_cli.Get(namespace_pod_keystr) + if res != "" || err != nil { + log.Println("pod name already exists") + con.JSON(http.StatusConflict, gin.H{ + "error": "pod name already exists", + }) + return + } + // 然后写入namespace_pod_map + err = ser.store_cli.Set(namespace_pod_keystr, string(pod.ObjectMeta.UID)) + if err != nil { + log.Println("error in writing to etcd") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in writing to etcd", + }) + return + } + + // 然后写入node_pod_map + //err = ser.store_cli.Set(node_pod_keystr, string(pod.ObjectMeta.UID)) + //if err != nil { + // log.Println("error in writing to etcd") + // con.JSON(http.StatusInternalServerError, gin.H{ + // "error": "error in writing to etcd", + // }) + // return + //} + + // 最后写入pod_hub + // JSON序列化pod + pod_str, err := json.Marshal(pod) + + if err != nil { + log.Println("error in json marshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json marshal", + }) + return + } + + err = ser.store_cli.Set(all_pod_keystr, string(pod_str)) + if err != nil { + log.Println("error in writing to etcd") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in writing to etcd", + }) + return + } + + con.JSON(http.StatusCreated, gin.H{ + "message": "successfully created pod", + "UUID": pod.ObjectMeta.UID, + }) + +} + +func UpdatePodHandler(con *gin.Context) { + +} +func (ser *kubeApiServer) DeletePodHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("DeletePod") + + np := con.Params.ByName("namespace") + pod_name := con.Params.ByName("podname") + + prefix := "/registry" + + namespace_pod_keystr := prefix + "/namespaces/" + np + "/pods/" + pod_name + + // node_pod_keystr := prefix + "/nodes/" + Default_Nodename + "/pods/" + pod_name + + res, err := ser.store_cli.Get(namespace_pod_keystr) + // or change return type to DeleteResponse so there is no need to check Get result + if res == "" || err != nil { + log.Println("pod name does not exist in namespace") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod name does not exist in namespace", + }) + return + } + + pod_id := res + all_pod_keystr := prefix + "/pods/" + pod_id + + err = ser.store_cli.Delete(namespace_pod_keystr) + if err != nil { + log.Println("error in deleting from etcd") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in deleting from etcd", + }) + return + } + + //res, err = ser.store_cli.Get(node_pod_keystr) + //// or change return type to DeleteResponse so there is no need to check Get result + //if res == "" || err != nil { + // log.Println("pod name does not exist in node") + // con.JSON(http.StatusNotFound, gin.H{ + // "error": "pod name does not exist in node", + // }) + // return + //} + + //err = ser.store_cli.Delete(node_pod_keystr) + //if err != nil { + // log.Println("error in deleting from etcd") + // con.JSON(http.StatusInternalServerError, gin.H{ + // "error": "error in deleting from etcd", + // }) + // return + //} + + res, err = ser.store_cli.Get(all_pod_keystr) + if res == "" || err != nil { + log.Println("pod does not exist") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod does not exist", + }) + return + } + + err = ser.store_cli.Delete(all_pod_keystr) + if err != nil { + log.Println("error in deleting from etcd") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in deleting from etcd", + }) + return + } + + con.JSON(http.StatusOK, gin.H{ + "message": "successfully deleted pod", + }) + +} + +func (ser *kubeApiServer) GetPodStatusHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("GetPodStatus") + + // default here + np := con.Params.ByName("namespace") + pod_name := con.Params.ByName("podname") + + prefix := "/registry" + + namespace_pod_keystr := prefix + "/namespaces/" + np + "/pods/" + pod_name + + res, err := ser.store_cli.Get(namespace_pod_keystr) + if res == "" || err != nil { + log.Println("pod name does not exist in namespace") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod name does not exist in namespace", + }) + return + } + + pod_id := res + all_pod_keystr := prefix + "/pods/" + pod_id + + res, err = ser.store_cli.Get(all_pod_keystr) + if res == "" || err != nil { + log.Println("pod does not exist") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod does not exist", + }) + return + } + + var pod v1.Pod + err = json.Unmarshal([]byte(res), &pod) + if err != nil { + log.Println("error in json unmarshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json unmarshal", + }) + return + } + + pod_status := pod.Status + + con.JSON(http.StatusOK, gin.H{ + "data": pod_status, + }) + +} + +func (ser *kubeApiServer) PutPodStatusHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("PutPodStatus") + + np := con.Params.ByName("namespace") + pod_name := con.Params.ByName("podname") + + var pod_status v1.PodStatus + err := con.ShouldBind(&pod_status) + if err != nil { + log.Println("something is wrong when parsing Pod") + return + } + + prefix := "/registry" + + namespace_pod_keystr := prefix + "/namespaces/" + np + "/pods/" + pod_name + + res, err := ser.store_cli.Get(namespace_pod_keystr) + + if res == "" || err != nil { + log.Println("pod name does not exist in namespace") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod name does not exist in namespace", + }) + return + } + + pod_id := res + all_pod_keystr := prefix + "/pods/" + pod_id + + res, err = ser.store_cli.Get(all_pod_keystr) + if res == "" || err != nil { + log.Println("pod does not exist") + con.JSON(http.StatusNotFound, gin.H{ + "error": "pod does not exist", + }) + return + } + + var pod v1.Pod + + err = json.Unmarshal([]byte(res), &pod) + if err != nil { + log.Println("error in json unmarshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json unmarshal", + }) + return + } + + pod.Status = pod_status + + pod_str, err := json.Marshal(pod) + if err != nil { + log.Println("error in json marshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json marshal", + }) + return + } + + err = ser.store_cli.Set(all_pod_keystr, string(pod_str)) + if err != nil { + log.Println("error in writing to etcd") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in writing to etcd", + }) + return + } + + con.JSON(http.StatusOK, gin.H{ + "message": "successfully updated pod status", + }) +} + +func (ser *kubeApiServer) GetPodsByNodeHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + // first parse nodename + log.Println("GetPodsByNode") + + node_name := con.Params.ByName("nodename") + if node_name == "" { + con.JSON(http.StatusNotFound, gin.H{ + "error": "error in parsing nodename ", + }) + return + } + log.Printf("getting info of node: %v", node_name) + + all_pod_str := make([]v1.Pod, 0) + + prefix := "/registry" + + node_pod_keystr := prefix + "/host-nodes/" + node_name + "/pods" + + // 以这个前缀去搜索所有的pod + // 得调整接口 加一个GetSubKeysValues + res, err := ser.store_cli.GetSubKeysValues(node_pod_keystr) + // + if res == nil || err != nil { + log.Println("node does not exist") + con.JSON(http.StatusNotFound, gin.H{ + "error": "node does not exist", + }) + return + } + + var keysToDelete []string + //res返回的是pod的uid,回到etcd里面找到pod的信息 + for k, v := range res { + pod_id := v + all_pod_keystr := prefix + "/pods/" + pod_id + + res, err := ser.store_cli.Get(all_pod_keystr) + //if res == "" || err != nil { + // log.Println("pod does not exist") + // con.JSON(http.StatusNotFound, gin.H{ + // "error": "pod does not exist", + // }) + // return + //} + if res == "" || err != nil { + // lazy deleting + keysToDelete = append(keysToDelete, k) + continue + } + var pod v1.Pod + err = json.Unmarshal([]byte(res), &pod) + if err != nil { + log.Println("error in json unmarshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json unmarshal", + }) + return + } + all_pod_str = append(all_pod_str, pod) + } + + for _, mapping := range keysToDelete { + _ = ser.store_cli.Delete(mapping) + } + + // then return all of them + con.JSON(http.StatusOK, + gin.H{ + "data": all_pod_str, + }, + ) +} + +func (s *kubeApiServer) GetAllServicesHandler(c *gin.Context) { + //allSvcKey := "/registry/services" + //res, err := s.store_cli.GetSubKeysValues(allSvcKey) + //if err != nil { + // c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + // Error: "error in reading from etcd", + // }) + // return + //} + //services := make([]v1.Service, 0) + //for _, v := range res { + // var service v1.Service + // err = json.Unmarshal([]byte(v), &service) + // if err != nil { + // c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + // Error: "error in json unmarshal", + // }) + // return + // } + // services = append(services, service) + //} + s.lock.Lock() + defer s.lock.Unlock() + services, err := s.getAllServicesFromEtcd() + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]v1.Service]{ + Error: fmt.Sprintf("error in reading all services from etcd: %v", err), + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.Service]{ + Data: services, + }) +} + +func (s *kubeApiServer) getAllServicesFromEtcd() ([]*v1.Service, error) { + allSvcKey := "/registry/services" + res, err := s.store_cli.GetSubKeysValues(allSvcKey) + if err != nil { + return nil, err + } + services := make([]*v1.Service, 0) + for _, v := range res { + var service v1.Service + err = json.Unmarshal([]byte(v), &service) + if err != nil { + return nil, err + } + services = append(services, &service) + } + return services, nil +} + +func (s *kubeApiServer) checkTypeAndPorts(service *v1.Service) error { + // 默认类型为ClusterIP + if service.Spec.Type == "" { + service.Spec.Type = v1.ServiceTypeClusterIP + } else if service.Spec.Type != v1.ServiceTypeClusterIP && service.Spec.Type != v1.ServiceTypeNodePort { + return fmt.Errorf("invalid service type %s", service.Spec.Type) + } + // 默认协议为TCP + nodePortSet := make(map[int32]struct{}) + for i, port := range service.Spec.Ports { + if port.Protocol == "" { + service.Spec.Ports[i].Protocol = v1.ProtocolTCP + } + if port.TargetPort < v1.PortMin || port.TargetPort > v1.PortMax { + return fmt.Errorf("invalid target port %d", port.TargetPort) + } + if port.Port < v1.PortMin || port.Port > v1.PortMax { + return fmt.Errorf("invalid port %d", port.Port) + } + if service.Spec.Type == v1.ServiceTypeNodePort { + if port.NodePort < v1.NodePortMin || port.NodePort > v1.NodePortMax { + return fmt.Errorf("invalid node port %d", port.NodePort) + } + if _, ok := nodePortSet[port.NodePort]; ok { + return fmt.Errorf("there are conflicting node ports: %v", port.NodePort) + } + nodePortSet[port.NodePort] = struct{}{} + } + } + // 检查所有service node port是否有冲突 + if service.Spec.Type == v1.ServiceTypeNodePort { + allServices, err := s.getAllServicesFromEtcd() + if err != nil { + return fmt.Errorf("error in reading all services from etcd: %v", err) + } + for _, svc := range allServices { + if svc.Spec.Type != v1.ServiceTypeNodePort { + continue + } + for _, port := range svc.Spec.Ports { + if _, ok := nodePortSet[port.NodePort]; ok { + return fmt.Errorf("node port %v conflicts with service %v", port.NodePort, svc.Name) + } + } + } + } + return nil +} + +func (s *kubeApiServer) AddServiceHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var service v1.Service + err := c.ShouldBind(&service) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: "invalid service json", + }) + return + } + if service.Name == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: "service name is required", + }) + return + } + if service.Kind != "Service" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: "invalid api object kind", + }) + return + } + + namespace := c.Param("namespace") + if namespace == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: "namespace is required", + }) + return + } + if service.Namespace != "" { + if service.Namespace != namespace { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: fmt.Sprintf("namespace mismatch, spec: %s, url: %s", service.Namespace, namespace), + }) + return + } + } else { + if namespace != "default" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: fmt.Sprintf("namespace mismatch, spec: empty(using default), url: %s", namespace), + }) + return + } + } + + // 存uid + namespaceSvcKey := fmt.Sprintf("/registry/namespaces/%s/services/%s", namespace, service.Name) + + // 检查service是否已经存在 + result, err := s.store_cli.Get(namespaceSvcKey) + if err == nil && result != "" { + c.JSON(http.StatusConflict, v1.BaseResponse[*v1.Service]{ + Error: fmt.Sprintf("service %s/%s already exists", namespace, service.Name), + }) + return + } + + err = s.checkTypeAndPorts(&service) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: err.Error(), + }) + return + } + + // 查bitmap,分配ip + bitmapKey := "/registry/IPPool/bitmap" + bitmapString, err := s.store_cli.Get(bitmapKey) + bitmap := []byte(bitmapString) + // etcd里没存bitmap,则初始化bitmap + if err != nil || bitmapString == "" { + initialBitmap := make([]byte, utils.IPPoolSize/8) + bitmap = initialBitmap + err = s.store_cli.Set(bitmapKey, string(initialBitmap)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in writing to etcd", + }) + return + } + } + ip, err := utils.AllocIP(bitmap) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: "no available ip", + }) + return + } + err = s.store_cli.Set(bitmapKey, string(bitmap)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in writing to etcd", + }) + return + } + service.Spec.ClusterIP = ip + + service.Namespace = namespace + service.UID = v1.UID(uuid.NewUUID()) + service.CreationTimestamp = timestamp.NewTimestamp() + + serviceJson, err := json.Marshal(service) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in json marshal", + }) + return + } + + // 存service json + allSvcKey := fmt.Sprintf("/registry/services/%s", service.UID) + + // allSvcKey存service json + err = s.store_cli.Set(allSvcKey, string(serviceJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in writing to etcd", + }) + return + } + + // namespaceSvcKey存uid + err = s.store_cli.Set(namespaceSvcKey, string(service.UID)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in writing to etcd", + }) + return + } + + c.JSON(http.StatusCreated, v1.BaseResponse[*v1.Service]{ + Data: &service, + }) +} + +func (s *kubeApiServer) DeleteServiceHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + namespace := c.Param("namespace") + serviceName := c.Param("servicename") + if namespace == "" || serviceName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Service]{ + Error: "namespace and service name cannot be empty", + }) + return + } + namespaceSvcKey := fmt.Sprintf("/registry/namespaces/%s/services/%s", namespace, serviceName) + uid, err := s.store_cli.Get(namespaceSvcKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.Service]{ + Error: fmt.Sprintf("service %s/%s not found", namespace, serviceName), + }) + return + } + + allSvcKey := fmt.Sprintf("/registry/services/%s", uid) + serviceJson, err := s.store_cli.Get(allSvcKey) + if err != nil || serviceJson == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in reading service from etcd", + }) + return + } + + var service v1.Service + err = json.Unmarshal([]byte(serviceJson), &service) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in json unmarshal", + }) + return + } + + // 释放ip + bitmapKey := "/registry/IPPool/bitmap" + bitmapString, err := s.store_cli.Get(bitmapKey) + if err != nil || bitmapString == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in reading ip pool bit map from etcd", + }) + return + } + bitmap := []byte(bitmapString) + err = utils.FreeIP(service.Spec.ClusterIP, bitmap) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: fmt.Sprintf("error in free ip %s", service.Spec.ClusterIP), + }) + return + } + err = s.store_cli.Set(bitmapKey, string(bitmap)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in writing ip pool bitmap to etcd", + }) + return + } + + err = s.store_cli.Delete(namespaceSvcKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in deleting service from etcd", + }) + return + } + err = s.store_cli.Delete(allSvcKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in deleting service from etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.Service]{ + Data: &service, + }) +} + +func (s *kubeApiServer) getAllDNSFromEtcd() ([]*v1.DNS, error) { + allDNSKey := "/registry/dns" + res, err := s.store_cli.GetSubKeysValues(allDNSKey) + if err != nil { + return nil, err + } + dnsSlice := make([]*v1.DNS, 0) + for _, v := range res { + var dns v1.DNS + err = json.Unmarshal([]byte(v), &dns) + if err != nil { + return nil, err + } + dnsSlice = append(dnsSlice, &dns) + } + return dnsSlice, nil +} + +func (s *kubeApiServer) GetAllDNSHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + allDNS, err := s.getAllDNSFromEtcd() + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.DNS]{ + Error: fmt.Sprintf("error in reading all dns from etcd: %v", err), + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.DNS]{ + Data: allDNS, + }) +} + +func (s *kubeApiServer) validateDNS(dns *v1.DNS, urlNamespace string) error { + if dns.Name == "" { + return fmt.Errorf("dns name is required") + } + if dns.Namespace == "" { + if urlNamespace != "default" { + return fmt.Errorf("namespace mismatch, spec: empty(using default), url: %s", urlNamespace) + } + } else { + if dns.Namespace != urlNamespace { + return fmt.Errorf("namespace mismatch, spec: %s, url: %s", dns.Namespace, urlNamespace) + } + } + if dns.Kind != "DNS" { + return fmt.Errorf("invalid api object kind") + } + if len(dns.Spec.Rules) == 0 { + return fmt.Errorf("no rules for this dns") + } + for _, rule := range dns.Spec.Rules { + if rule.Host == "" { + return fmt.Errorf("host cannot be empty") + } + for _, path := range rule.Paths { + if path.Path == "" { + return fmt.Errorf("path cannot be empty") + } + if path.Backend.Service.Name == "" { + return fmt.Errorf("service name cannot be empty") + } + if path.Backend.Service.Port < v1.PortMin || path.Backend.Service.Port > v1.PortMax { + return fmt.Errorf("invalid service port %d", path.Backend.Service.Port) + } + } + } + return nil +} + +func (s *kubeApiServer) AddDNSHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var dns v1.DNS + err := c.ShouldBind(&dns) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.DNS]{ + Error: "invalid dns json", + }) + return + } + namespace := c.Param("namespace") + if namespace == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.DNS]{ + Error: "namespace is required", + }) + return + } + + // 参数校验 + err = s.validateDNS(&dns, namespace) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.DNS]{ + Error: err.Error(), + }) + return + } + dns.Namespace = namespace + + // 检查dns是否已经存在 + namespaceDNSKey := fmt.Sprintf("/registry/namespaces/%s/dns/%s", dns.Namespace, dns.Name) + uid, err := s.store_cli.Get(namespaceDNSKey) + if err == nil && uid != "" { + c.JSON(http.StatusConflict, v1.BaseResponse[*v1.DNS]{ + Error: fmt.Sprintf("dns %s/%s already exists", dns.Namespace, dns.Name), + }) + return + } + + // 检查域名冲突 + hostKey := "/registry/hosts" + hosts, err := s.store_cli.Get(hostKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in reading hosts from etcd", + }) + return + } + hostMap := make(map[string]struct{}) + scanner := bufio.NewScanner(strings.NewReader(hosts)) + for scanner.Scan() { + hostMap[scanner.Text()] = struct{}{} + } + for _, rule := range dns.Spec.Rules { + if _, ok := hostMap[rule.Host]; ok { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.DNS]{ + Error: fmt.Sprintf("host %s conflicts with existing host", rule.Host), + }) + return + } + hosts += rule.Host + "\n" + } + + // 检查每个path的service backend是否存在 + for _, rule := range dns.Spec.Rules { + for _, path := range rule.Paths { + svcName := path.Backend.Service.Name + namespaceSvcKey := fmt.Sprintf("/registry/namespaces/%s/services/%s", namespace, svcName) + uid, err := s.store_cli.Get(namespaceSvcKey) + if err != nil || uid == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.DNS]{ + Error: fmt.Sprintf("service %s/%s not found", namespace, svcName), + }) + return + } + allSvcKey := fmt.Sprintf("/registry/services/%s", uid) + svcJson, err := s.store_cli.Get(allSvcKey) + if err != nil || svcJson == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in reading service from etcd", + }) + return + } + var svc v1.Service + err = json.Unmarshal([]byte(svcJson), &svc) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in json unmarshal", + }) + return + } + isPortMatched := false + for _, port := range svc.Spec.Ports { + if port.Port == path.Backend.Service.Port { + isPortMatched = true + break + } + } + if !isPortMatched { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.DNS]{ + Error: fmt.Sprintf("service %s does not have port %d", svcName, path.Backend.Service.Port), + }) + return + } + } + } + + // now create it! + dns.CreationTimestamp = timestamp.NewTimestamp() + dns.UID = v1.UID(uuid.NewUUID()) + + // 存hosts + err = s.store_cli.Set(hostKey, hosts) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in writing hosts to etcd", + }) + return + } + + // 存uuid + err = s.store_cli.Set(namespaceDNSKey, string(dns.UID)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in writing to etcd", + }) + return + } + + // 存dns json + allDNSKey := fmt.Sprintf("/registry/dns/%s", dns.UID) + dnsJson, err := json.Marshal(dns) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in json marshal", + }) + return + } + err = s.store_cli.Set(allDNSKey, string(dnsJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in writing to etcd", + }) + return + } + c.JSON(http.StatusCreated, v1.BaseResponse[*v1.DNS]{ + Data: &dns, + }) +} + +func (s *kubeApiServer) DeleteDNSHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + namespace := c.Param("namespace") + dnsName := c.Param("dnsname") + if namespace == "" || dnsName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.DNS]{ + Error: "namespace and dns name cannot be empty", + }) + return + } + namespaceDNSKey := fmt.Sprintf("/registry/namespaces/%s/dns/%s", namespace, dnsName) + uid, err := s.store_cli.Get(namespaceDNSKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.DNS]{ + Error: fmt.Sprintf("dns %s/%s not found", namespace, dnsName), + }) + return + } + + allDNSKey := fmt.Sprintf("/registry/dns/%s", uid) + dnsJson, err := s.store_cli.Get(allDNSKey) + if err != nil || dnsJson == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in reading dns from etcd", + }) + return + } + + var dns v1.DNS + err = json.Unmarshal([]byte(dnsJson), &dns) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in json unmarshal", + }) + return + } + + // 删除host + hostKey := "/registry/hosts" + hosts, err := s.store_cli.Get(hostKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in reading hosts from etcd", + }) + return + } + hostsToDelete := make(map[string]struct{}) + for _, rule := range dns.Spec.Rules { + hostsToDelete[rule.Host] = struct{}{} + } + scanner := bufio.NewScanner(strings.NewReader(hosts)) + newHosts := "" + for scanner.Scan() { + host := scanner.Text() + if _, ok := hostsToDelete[host]; !ok { + newHosts += host + "\n" + } + } + err = s.store_cli.Set(hostKey, newHosts) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in writing hosts to etcd", + }) + return + } + + // 删除dns + err = s.store_cli.Delete(namespaceDNSKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in deleting dns from etcd", + }) + return + } + err = s.store_cli.Delete(allDNSKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.DNS]{ + Error: "error in deleting dns from etcd", + }) + return + } + + c.JSON(http.StatusOK, v1.BaseResponse[*v1.DNS]{ + Data: &dns, + }) +} + +func (s *kubeApiServer) RegisterNodeHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var n v1.Node + err := c.ShouldBind(&n) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Node]{ + Error: "invalid node json", + }) + return + } + address := c.Query("address") + if net.ParseIP(address) == nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Node]{ + Error: "invalid ip address", + }) + } + bitmapKey := "/registry/NodePool/bitmap" + bitmapStr, err := s.store_cli.Get(bitmapKey) + bitmap := []byte(bitmapStr) + if err != nil || bitmapStr == "" { + initialBitmap := make([]byte, utils.NodePoolSize/8) + bitmap = initialBitmap + err = s.store_cli.Set(bitmapKey, string(initialBitmap)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in writing bitmap to etcd", + }) + return + } + } + nodeName, err := utils.AllocNode(bitmap) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: err.Error(), + }) + return + } + err = s.store_cli.Set(bitmapKey, string(bitmap)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Service]{ + Error: "error in writing bitmap to etcd", + }) + return + } + node := &v1.Node{ + TypeMeta: v1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: nodeName, + Namespace: Default_Namespace, + UID: v1.UID(uuid.NewUUID()), + CreationTimestamp: timestamp.NewTimestamp(), + Labels: n.Labels, + }, + Status: v1.NodeStatus{ + Address: address, + }, + } + allNodeKey := fmt.Sprintf("/registry/nodes/%v", node.UID) + namespaceNodeKey := fmt.Sprintf("/registry/namespaces/%v/nodes/%v", node.Namespace, node.Name) + nodeJson, err := json.Marshal(node) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in json marshal", + }) + return + } + err = s.store_cli.Set(allNodeKey, string(nodeJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in writing node to etcd", + }) + return + } + err = s.store_cli.Set(namespaceNodeKey, string(node.UID)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in writing node to etcd", + }) + return + } + c.JSON(http.StatusCreated, v1.BaseResponse[*v1.Node]{ + Data: node, + }) +} + +func (s *kubeApiServer) UnregisterNodeHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + nodeName := c.Query("nodename") + namespace := Default_Namespace + if nodeName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Node]{ + Error: "node name or namespace cannot be empty", + }) + return + } + namespaceNodeKey := fmt.Sprintf("/registry/namespaces/%s/nodes/%s", namespace, nodeName) + uid, err := s.store_cli.Get(namespaceNodeKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.Node]{ + Error: fmt.Sprintf("node %s/%s not found", namespace, nodeName), + }) + return + } + // 把所有pod变为unscheduled + nodePodKey := fmt.Sprintf("/registry/host-nodes/%s/pods", nodeName) + err = s.store_cli.DeleteSubKeys(nodePodKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in deleting node-pod mappings from etcd", + }) + return + } + // 删除pod + err = s.store_cli.Delete(namespaceNodeKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in deleting node from etcd", + }) + return + } + allNodeKey := fmt.Sprintf("/registry/nodes/%s", uid) + err = s.store_cli.Delete(allNodeKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in deleting node from etcd", + }) + return + } + // 释放node + bitmapKey := "/registry/NodePool/bitmap" + bitmapStr, err := s.store_cli.Get(bitmapKey) + if err != nil || bitmapStr == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in reading node pool bitmap from etcd", + }) + return + } + bitmap := []byte(bitmapStr) + err = utils.FreeNode(nodeName, bitmap) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: err.Error(), + }) + return + } + err = s.store_cli.Set(bitmapKey, string(bitmap)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in writing node pool bitmap to etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.Node]{}) +} + +func (s *kubeApiServer) GetAllNodesHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + allNodeKey := "/registry/nodes" + res, err := s.store_cli.GetSubKeysValues(allNodeKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in reading from etcd", + }) + return + } + nodes := make([]*v1.Node, 0) + for _, v := range res { + var node v1.Node + err = json.Unmarshal([]byte(v), &node) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Node]{ + Error: "error in json unmarshal", + }) + return + } + nodes = append(nodes, &node) + } + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].Name < nodes[j].Name + }) + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.Node]{ + Data: nodes, + }) +} + +func (s *kubeApiServer) SchedulePodToNodeHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + podUid := c.Query("podUid") + nodeName := c.Query("nodename") + if podUid == "" || nodeName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[interface{}]{ + Error: "pod uid or node name cannot be empty", + }) + return + } + podKey := fmt.Sprintf("/registry/pods/%s", podUid) + podJson, err := s.store_cli.Get(podKey) + if err != nil || podJson == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[interface{}]{ + Error: fmt.Sprintf("pod %s not found", podUid), + }) + return + } + + var pod v1.Pod + err = json.Unmarshal([]byte(podJson), &pod) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[interface{}]{ + Error: "error in json unmarshal", + }) + return + } + + res, err := s.store_cli.GetSubKeysValues("/registry/host-nodes") + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[interface{}]{ + Error: "error in reading from etcd", + }) + return + } + for k, v := range res { + if v == podUid { + err = s.store_cli.Delete(k) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[interface{}]{ + Error: "error in deleting old node-pod mapping from etcd", + }) + return + } + } + } + + nodePodKey := fmt.Sprintf("/registry/host-nodes/%s/pods/%s_%s", nodeName, pod.Namespace, pod.Name) + err = s.store_cli.Set(nodePodKey, podUid) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[interface{}]{ + Error: "error in writing new node-pod mapping to etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[interface{}]{}) +} + +func (s *kubeApiServer) GetUnscheduledPodHandler(c *gin.Context) { + // 获取所有pod + s.lock.Lock() + defer s.lock.Unlock() + allPodKey := "/registry/pods" + res, err := s.store_cli.GetSubKeysValues(allPodKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.Pod]{ + Error: "error in reading from etcd", + }) + return + } + // 通过/registry/host-nodes获取所有已被调度的pod uid + hostKeyPrefix := "/registry/host-nodes" + hostRes, err := s.store_cli.GetSubKeysValues(hostKeyPrefix) + scheduledUidSet := make(map[string]struct{}) + for _, uid := range hostRes { + scheduledUidSet[uid] = struct{}{} + } + var unscheduledPods []*v1.Pod + for _, v := range res { + var pod v1.Pod + err = json.Unmarshal([]byte(v), &pod) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.Pod]{ + Error: "error in json unmarshal", + }) + return + } + if _, ok := scheduledUidSet[string(pod.UID)]; !ok { + unscheduledPods = append(unscheduledPods, &pod) + } + } + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.Pod]{ + Data: unscheduledPods, + }) +} + +func (ser *kubeApiServer) GetAllReplicaSetsHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("GetAllReplicaSets") + all_replicaset_str := make([]v1.ReplicaSet, 0) + prefix := "/registry" + all_replicaset_keystr := prefix + "/replicaset" + + res, err := ser.store_cli.GetSubKeysValues(all_replicaset_keystr) + if err != nil { + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in reading all replicas sets in etcd", + }) + return + } + + //namespace_replicaset_keystr := prefix + "/namespace/" + Default_Namespace + "/replicasets/" + // + //res2, err2 := ser.store_cli.Get(namespace_replicaset_keystr) + //if err2 != nil { + // log.Println("replica set name already exists") + // con.JSON(http.StatusConflict, gin.H{ + // "error": "replica set name already exists", + // }) + // return + //} + //for k, v := range res2 { + // println("-----------------------------------") + // println(k) + // println("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") + // println(v) + // + //} + + if len(res) == 0 { + con.JSON(http.StatusOK, v1.BaseResponse[[]*v1.ReplicaSet]{Data: nil}) + return + } + + for _, v := range res { + var rps v1.ReplicaSet + err = json.Unmarshal([]byte(v), &rps) + if err != nil { + log.Println("error in json unmarshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json unmarshal", + }) + return + } + println(rps.UID) + all_replicaset_str = append(all_replicaset_str, rps) + } + + con.JSON(http.StatusOK, + gin.H{ + "data": all_replicaset_str, + }, + ) +} + +func (ser *kubeApiServer) AddReplicaSetHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + log.Println("Adding a new replica set") + var rps v1.ReplicaSet + err := con.ShouldBind(&rps) + if err != nil { + log.Println("something is wrong when parsing replica set") + return + } + rps_name := rps.ObjectMeta.Name + if rps_name == "" { + rps_name = Default_Podname + } + + log.Print("replica set selector: ", rps.Spec.Selector) + rps_label := rps.Spec.Selector.MatchLabels + if rps_label == nil { + con.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "replica set labels are required", + }) + return + } + + //rps_template := rps.Template + //if rps_template == empty { + // con.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.ReplicaSet]{ + // Error: "replica set template is required", + // }) + // return + //} + + rps.ObjectMeta.UID = (v1.UID)(uuid.NewUUID()) + rps.ObjectMeta.CreationTimestamp = timestamp.NewTimestamp() + prefix := "/registry" + + all_replicaset_keystr := prefix + "/replicaset/" + string(rps.ObjectMeta.UID) + namespace_replicaset_keystr := prefix + "/namespaces/" + Default_Namespace + "/replicasets/" + rps_name + + res, err := ser.store_cli.Get(namespace_replicaset_keystr) + if res != "" || err != nil { + log.Println("replica set name already exists") + con.JSON(http.StatusConflict, gin.H{ + "error": "replica set name already exists", + }) + return + } + + err = ser.store_cli.Set(namespace_replicaset_keystr, string(rps.ObjectMeta.UID)) + if err != nil { + log.Println("error in writing to etcd") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in writing to etcd", + }) + return + } + + rps_str, err := json.Marshal(rps) + + if err != nil { + log.Println("error in json marshal") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in json marshal", + }) + return + } + + err = ser.store_cli.Set(all_replicaset_keystr, string(rps_str)) + if err != nil { + log.Println("error in writing to etcd") + con.JSON(http.StatusInternalServerError, gin.H{ + "error": "error in writing to etcd", + }) + return + } + + con.JSON(http.StatusCreated, gin.H{ + "message": "successfully created replica set", + "UUID": rps.ObjectMeta.UID, + }) + +} + +func (s *kubeApiServer) GetReplicaSetHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + namespace := c.Param("namespace") + rpsName := c.Param("replicasetname") + if namespace == "" || rpsName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "namespace and replica set name cannot be empty", + }) + return + } + namespaceRpsKey := fmt.Sprintf("/registry/namespaces/%s/replicasets/%s", namespace, rpsName) + uid, err := s.store_cli.Get(namespaceRpsKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.ReplicaSet]{ + Error: fmt.Sprintf("replica set %s/%s not found", namespace, rpsName), + }) + return + } + + allRpsKey := fmt.Sprintf("/registry/replicaset/%s", uid) + rpsJson, err := s.store_cli.Get(allRpsKey) + if err != nil || rpsJson == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in reading replica set from etcd", + }) + return + } + var rps v1.ReplicaSet + err = json.Unmarshal([]byte(rpsJson), &rps) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in json unmarshal", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.ReplicaSet]{ + Data: &rps, + }) + +} + +func (s *kubeApiServer) UpdateReplicaSetHandler(c *gin.Context) { + // 目前的更新方式 + // 1.更新replica set的replicas数量 + s.lock.Lock() + defer s.lock.Unlock() + // 获取name . namespace 和待更新的数量int + namespace := c.Param("namespace") + rpsName := c.Param("replicasetname") + if namespace == "" || rpsName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "namespace and replica set name cannot be empty", + }) + return + } + // PUT方法获取int + replicas, err := strconv.Atoi(c.Query("replicas")) + + fmt.Printf("next replicas: %d\n", replicas) + + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "replicas must be an integer", + }) + return + } + + // 从etcd中获取replica set + namespaceRpsKey := fmt.Sprintf("/registry/namespaces/%s/replicasets/%s", namespace, rpsName) + uid, err := s.store_cli.Get(namespaceRpsKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.ReplicaSet]{ + Error: fmt.Sprintf("replica set %s/%s not found", namespace, rpsName), + }) + return + } + + allRpsKey := fmt.Sprintf("/registry/replicaset/%s", uid) + rpsJson, err := s.store_cli.Get(allRpsKey) + if err != nil || rpsJson == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in reading replica set from etcd", + }) + return + } + var rps v1.ReplicaSet + err = json.Unmarshal([]byte(rpsJson), &rps) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in json unmarshal", + }) + return + } + + // 更新replicas数量 + rps.Spec.Replicas = (int32)(replicas) + + rpsRawStr, err := json.Marshal(rps) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in json marshal", + }) + return + } + // 存回 + err = s.store_cli.Set(allRpsKey, string(rpsRawStr)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in writing replica set to etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.ReplicaSet]{ + Data: &rps, + }) +} + +func (ser *kubeApiServer) DeleteReplicaSetHandler(con *gin.Context) { + ser.lock.Lock() + defer ser.lock.Unlock() + namespace := con.Param("namespace") + rpsName := con.Param("replicasetname") + if namespace == "" || rpsName == "" { + con.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "namespace and replica set name cannot be empty", + }) + return + } + namespaceRpsKey := fmt.Sprintf("/registry/namespaces/%s/replicasets/%s", Default_Namespace, rpsName) + uid, err := ser.store_cli.Get(namespaceRpsKey) + if err != nil || uid == "" { + con.JSON(http.StatusNotFound, v1.BaseResponse[*v1.ReplicaSet]{ + Error: fmt.Sprintf("replica set %s/%s not found", namespace, rpsName), + }) + return + } + + allRpsKey := fmt.Sprintf("/registry/replicaset/%s", uid) + rpsJson, err := ser.store_cli.Get(allRpsKey) + if err != nil || rpsJson == "" { + con.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in reading replica set from etcd", + }) + return + } + var rps v1.ReplicaSet + err = json.Unmarshal([]byte(rpsJson), &rps) + if err != nil { + con.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in json unmarshal", + }) + return + } + + err = ser.store_cli.Delete(namespaceRpsKey) + if err != nil { + con.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in deleting replica set from etcd", + }) + return + } + err = ser.store_cli.Delete(allRpsKey) + if err != nil { + con.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.ReplicaSet]{ + Error: "error in deleting replica set from etcd", + }) + return + } + con.JSON(http.StatusOK, v1.BaseResponse[*v1.ReplicaSet]{ + Data: &rps, + }) +} + +// // 持久化scaled Podname && PodUID +// func (s *kubeApiServer) storeScaledPod(namespace, deploymentName, podName, podUID string) error { + +// } + +func (s *kubeApiServer) validateVirtualService(vs *v1.VirtualService, urlNamespace string) error { + if vs.Name == "" { + return fmt.Errorf("virtual service name is required") + } + if vs.Namespace == "" { + if urlNamespace != "default" { + return fmt.Errorf("namespace mismatch, spec: empty(using default), url: %s", urlNamespace) + } + } else { + if vs.Namespace != urlNamespace { + return fmt.Errorf("namespace mismatch, spec: %s, url: %s", vs.Namespace, urlNamespace) + } + } + if vs.Kind != "VirtualService" { + return fmt.Errorf("invalid api object kind") + } + return nil +} + +func (s *kubeApiServer) AddVirtualServiceHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var vs v1.VirtualService + err := c.ShouldBind(&vs) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: "invalid virtual service json", + }) + return + } + namespace := c.Param("namespace") + if namespace == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: "namespace is required", + }) + return + } + err = s.validateVirtualService(&vs, namespace) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: err.Error(), + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/virtualservices/%s", namespace, vs.Name) + uid, err := s.store_cli.Get(namespaceKey) + if err == nil && uid != "" { + c.JSON(http.StatusConflict, v1.BaseResponse[*v1.VirtualService]{ + Error: fmt.Sprintf("virtual service %s/%s already exists", namespace, vs.Name), + }) + return + } + + svcName := vs.Spec.ServiceRef + svcUID, err := s.store_cli.Get(fmt.Sprintf("/registry/namespaces/%s/services/%s", namespace, svcName)) + if err != nil || svcUID == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: fmt.Sprintf("service %s/%s not found", namespace, svcName), + }) + return + } + svcJson, err := s.store_cli.Get(fmt.Sprintf("/registry/services/%s", svcUID)) + if err != nil || svcJson == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.VirtualService]{ + Error: "error in reading service from etcd", + }) + return + } + var svc v1.Service + _ = json.Unmarshal([]byte(svcJson), &svc) + isPortMatched := false + for _, port := range svc.Spec.Ports { + if port.Port == vs.Spec.Port && port.Protocol == v1.ProtocolTCP { + isPortMatched = true + break + } + } + if !isPortMatched { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: fmt.Sprintf("service %s does not have tcp port %d", svcName, vs.Spec.Port), + }) + return + } + + for _, subset := range vs.Spec.Subsets { + subsetUID, err := s.store_cli.Get(fmt.Sprintf("/registry/namespaces/%s/subsets/%s", namespace, subset.Name)) + if err != nil || subsetUID == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: fmt.Sprintf("subset %s/%s not found", namespace, subset.Name), + }) + return + } + if subset.URL == nil && subset.Weight == nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: "subset url and weight cannot be both empty", + }) + return + } + } + + vs.Namespace = namespace + vs.CreationTimestamp = timestamp.NewTimestamp() + vs.UID = v1.UID(uuid.NewUUID()) + allKey := fmt.Sprintf("/registry/virtualservices/%s", vs.UID) + vsJson, _ := json.Marshal(vs) + err = s.store_cli.Set(namespaceKey, string(vs.UID)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.VirtualService]{ + Error: "error in writing virtual service to etcd", + }) + return + } + err = s.store_cli.Set(allKey, string(vsJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.VirtualService]{ + Error: "error in writing virtual service to etcd", + }) + return + } + c.JSON(http.StatusCreated, v1.BaseResponse[*v1.VirtualService]{ + Data: &vs, + }) +} + +func (s *kubeApiServer) DeleteVirtualServiceHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + namespace := c.Param("namespace") + vsName := c.Param("virtualservicename") + if namespace == "" || vsName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.VirtualService]{ + Error: "namespace and virtual service name cannot be empty", + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/virtualservices/%s", namespace, vsName) + uid, err := s.store_cli.Get(namespaceKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.VirtualService]{ + Error: fmt.Sprintf("virtual service %s/%s not found", namespace, vsName), + }) + return + } + allKey := fmt.Sprintf("/registry/virtualservices/%s", uid) + vsJson, _ := s.store_cli.Get(allKey) + var vs v1.VirtualService + _ = json.Unmarshal([]byte(vsJson), &vs) + err = s.store_cli.Delete(namespaceKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.VirtualService]{ + Error: "error in deleting virtual service from etcd", + }) + return + } + err = s.store_cli.Delete(allKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.VirtualService]{ + Error: "error in deleting virtual service from etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.VirtualService]{ + Data: &vs, + }) +} + +func (s *kubeApiServer) GetAllVirtualServicesHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + allKey := "/registry/virtualservices" + res, err := s.store_cli.GetSubKeysValues(allKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.VirtualService]{ + Error: "error in reading from etcd", + }) + return + } + vss := make([]*v1.VirtualService, 0) + for _, v := range res { + var vs v1.VirtualService + err = json.Unmarshal([]byte(v), &vs) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.VirtualService]{ + Error: "error in json unmarshal", + }) + return + } + vss = append(vss, &vs) + } + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.VirtualService]{ + Data: vss, + }) +} + +func (s *kubeApiServer) validateSubset(subset *v1.Subset, namespace string) error { + if subset.Name == "" { + return fmt.Errorf("subset name is required") + } + if subset.Namespace == "" { + if namespace != "default" { + return fmt.Errorf("namespace mismatch, spec: empty(using default), url: %s", namespace) + } + } else { + if subset.Namespace != namespace { + return fmt.Errorf("namespace mismatch, spec: %s, url: %s", subset.Namespace, namespace) + } + } + if subset.Kind != "Subset" { + return fmt.Errorf("invalid api object kind") + } + return nil +} + +func (s *kubeApiServer) AddSubsetHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var subset v1.Subset + err := c.ShouldBind(&subset) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Subset]{ + Error: "invalid subset json", + }) + return + } + namespace := c.Param("namespace") + if namespace == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Subset]{ + Error: "namespace is required", + }) + return + } + err = s.validateSubset(&subset, namespace) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Subset]{ + Error: err.Error(), + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/subsets/%s", namespace, subset.Name) + isUpdate := false + var oldUID v1.UID + uid, err := s.store_cli.Get(namespaceKey) + if err == nil && uid != "" { + //c.JSON(http.StatusConflict, v1.BaseResponse[*v1.Subset]{ + // Error: fmt.Sprintf("subset %s/%s already exists", namespace, subset.Name), + //}) + //return + isUpdate = true + oldUID = v1.UID(uid) + } + + for _, podName := range subset.Spec.Pods { + podUID, err := s.store_cli.Get(fmt.Sprintf("/registry/namespaces/%s/pods/%s", namespace, podName)) + if err != nil || podUID == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Subset]{ + Error: fmt.Sprintf("pod %s/%s not found", namespace, podName), + }) + return + } + } + + subset.Namespace = namespace + subset.CreationTimestamp = timestamp.NewTimestamp() + + if isUpdate { + subset.UID = oldUID + } else { + subset.UID = v1.UID(uuid.NewUUID()) + } + + allKey := fmt.Sprintf("/registry/subsets/%s", subset.UID) + subsetJson, _ := json.Marshal(subset) + err = s.store_cli.Set(namespaceKey, string(subset.UID)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Subset]{ + Error: "error in writing subset to etcd", + }) + return + } + err = s.store_cli.Set(allKey, string(subsetJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Subset]{ + Error: "error in writing subset to etcd", + }) + return + } + c.JSON(http.StatusCreated, v1.BaseResponse[*v1.Subset]{ + Data: &subset, + }) +} + +func (s *kubeApiServer) GetAllSubsetsHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + allKey := "/registry/subsets" + res, err := s.store_cli.GetSubKeysValues(allKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.Subset]{ + Error: "error in reading from etcd", + }) + return + } + subsets := make([]*v1.Subset, 0) + for _, v := range res { + var subset v1.Subset + err = json.Unmarshal([]byte(v), &subset) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.Subset]{ + Error: "error in json unmarshal", + }) + return + } + subsets = append(subsets, &subset) + } + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.Subset]{ + Data: subsets, + }) +} + +func (s *kubeApiServer) GetSubsetHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + namespace := c.Param("namespace") + subsetName := c.Param("subsetname") + if namespace == "" || subsetName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Subset]{ + Error: "namespace and subset name cannot be empty", + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/subsets/%s", namespace, subsetName) + uid, err := s.store_cli.Get(namespaceKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.Subset]{ + Error: fmt.Sprintf("subset %s/%s not found", namespace, subsetName), + }) + return + } + allKey := fmt.Sprintf("/registry/subsets/%s", uid) + subsetJson, _ := s.store_cli.Get(allKey) + var subset v1.Subset + _ = json.Unmarshal([]byte(subsetJson), &subset) + c.JSON(http.StatusOK, v1.BaseResponse[*v1.Subset]{ + Data: &subset, + }) +} + +func (s *kubeApiServer) DeleteSubsetHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + namespace := c.Param("namespace") + subsetName := c.Param("subsetname") + if namespace == "" || subsetName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.Subset]{ + Error: "namespace and subset name cannot be empty", + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/subsets/%s", namespace, subsetName) + uid, err := s.store_cli.Get(namespaceKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.Subset]{ + Error: fmt.Sprintf("subset %s/%s not found", namespace, subsetName), + }) + return + } + allKey := fmt.Sprintf("/registry/subsets/%s", uid) + subsetJson, _ := s.store_cli.Get(allKey) + var subset v1.Subset + _ = json.Unmarshal([]byte(subsetJson), &subset) + err = s.store_cli.Delete(namespaceKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Subset]{ + Error: "error in deleting subset from etcd", + }) + return + } + err = s.store_cli.Delete(allKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.Subset]{ + Error: "error in deleting subset from etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.Subset]{ + Data: &subset, + }) +} + +func (s *kubeApiServer) SaveSidecarMapping(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var mapping v1.SidecarMapping + err := c.ShouldBind(&mapping) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.SidecarMapping]{ + Error: "invalid sidecar mapping json", + }) + return + } + key := "/registry/sidecar-mapping" + mappingJson, err := json.Marshal(mapping) + err = s.store_cli.Set(key, string(mappingJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.SidecarMapping]{ + Error: "error in writing sidecar mapping to etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.SidecarMapping]{}) +} + +func (s *kubeApiServer) GetSidecarMapping(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + key := "/registry/sidecar-mapping" + mappingJson, err := s.store_cli.Get(key) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.SidecarMapping]{ + Error: "error in reading sidecar mapping from etcd", + }) + return + } + if mappingJson == "" { + mappingJson = "{}" + } + var mapping v1.SidecarMapping + err = json.Unmarshal([]byte(mappingJson), &mapping) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.SidecarMapping]{ + Error: "error in json unmarshal", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.SidecarMapping]{ + Data: &mapping, + }) +} + +func (s *kubeApiServer) GetSidecarServiceNameMapping(c *gin.Context) { + services, err := s.getAllServicesFromEtcd() + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[v1.SidecarServiceNameMapping]{ + Error: err.Error(), + }) + return + } + mapping := make(v1.SidecarServiceNameMapping) + for _, svc := range services { + mapping[svc.Name] = svc.Spec.ClusterIP + } + c.JSON(http.StatusOK, v1.BaseResponse[v1.SidecarServiceNameMapping]{ + Data: mapping, + }) +} + +func (s *kubeApiServer) AddRollingUpdateHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var ru v1.RollingUpdate + err := c.ShouldBind(&ru) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.RollingUpdate]{ + Error: "invalid rolling update json", + }) + return + } + namespace := c.Param("namespace") + if namespace == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.RollingUpdate]{ + Error: "namespace is required", + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/rollingupdates/%s", namespace, ru.Name) + uid, err := s.store_cli.Get(namespaceKey) + if err == nil && uid != "" { + c.JSON(http.StatusConflict, v1.BaseResponse[*v1.RollingUpdate]{ + Error: fmt.Sprintf("rolling update %s/%s already exists", namespace, ru.Name), + }) + return + } + ru.Namespace = namespace + ru.CreationTimestamp = timestamp.NewTimestamp() + ru.UID = v1.UID(uuid.NewUUID()) + ru.Status.Phase = v1.RollingUpdatePending + allKey := fmt.Sprintf("/registry/rollingupdates/%s", ru.UID) + ruJson, _ := json.Marshal(ru) + err = s.store_cli.Set(allKey, string(ruJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.RollingUpdate]{ + Error: "error in writing rolling update to etcd", + }) + return + } + err = s.store_cli.Set(namespaceKey, string(ru.UID)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.RollingUpdate]{ + Error: "error in writing rolling update to etcd", + }) + return + } + c.JSON(http.StatusCreated, v1.BaseResponse[*v1.RollingUpdate]{ + Data: &ru, + }) +} + +func (s *kubeApiServer) GetAllRollingUpdatesHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + allKey := "/registry/rollingupdates" + res, err := s.store_cli.GetSubKeysValues(allKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.RollingUpdate]{ + Error: "error in reading from etcd", + }) + return + } + rus := make([]*v1.RollingUpdate, 0) + for _, v := range res { + var ru v1.RollingUpdate + err = json.Unmarshal([]byte(v), &ru) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[[]*v1.RollingUpdate]{ + Error: "error in json unmarshal", + }) + return + } + rus = append(rus, &ru) + } + c.JSON(http.StatusOK, v1.BaseResponse[[]*v1.RollingUpdate]{ + Data: rus, + }) +} + +func (s *kubeApiServer) UpdateRollingUpdateStatusHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + var ruStatus v1.RollingUpdateStatus + err := c.ShouldBind(&ruStatus) + if err != nil { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.RollingUpdateStatus]{ + Error: "invalid rolling update status json", + }) + return + } + namespace := c.Param("namespace") + ruName := c.Param("rollingupdatename") + if namespace == "" || ruName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.RollingUpdateStatus]{ + Error: "namespace and rolling update name cannot be empty", + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/rollingupdates/%s", namespace, ruName) + uid, err := s.store_cli.Get(namespaceKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.RollingUpdateStatus]{ + Error: fmt.Sprintf("rolling update %s/%s not found", namespace, ruName), + }) + return + } + allKey := fmt.Sprintf("/registry/rollingupdates/%s", uid) + ruJson, err := s.store_cli.Get(allKey) + if err != nil || ruJson == "" { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.RollingUpdateStatus]{ + Error: "error in reading rolling update from etcd", + }) + return + } + var ru v1.RollingUpdate + _ = json.Unmarshal([]byte(ruJson), &ru) + + ru.Status = ruStatus + newRuJson, _ := json.Marshal(ru) + err = s.store_cli.Set(allKey, string(newRuJson)) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.RollingUpdateStatus]{ + Error: "error in writing rolling update to etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.RollingUpdateStatus]{ + Data: &ruStatus, + }) +} + +func (s *kubeApiServer) DeleteRollingUpdateHandler(c *gin.Context) { + s.lock.Lock() + defer s.lock.Unlock() + namespace := c.Param("namespace") + ruName := c.Param("rollingupdatename") + if namespace == "" || ruName == "" { + c.JSON(http.StatusBadRequest, v1.BaseResponse[*v1.RollingUpdate]{ + Error: "namespace and rolling update name cannot be empty", + }) + return + } + namespaceKey := fmt.Sprintf("/registry/namespaces/%s/rollingupdates/%s", namespace, ruName) + uid, err := s.store_cli.Get(namespaceKey) + if err != nil || uid == "" { + c.JSON(http.StatusNotFound, v1.BaseResponse[*v1.RollingUpdate]{ + Error: fmt.Sprintf("rolling update %s/%s not found", namespace, ruName), + }) + return + } + allKey := fmt.Sprintf("/registry/rollingupdates/%s", uid) + ruJson, _ := s.store_cli.Get(allKey) + var ru v1.RollingUpdate + _ = json.Unmarshal([]byte(ruJson), &ru) + err = s.store_cli.Delete(namespaceKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.RollingUpdate]{ + Error: "error in deleting rolling update from etcd", + }) + return + } + err = s.store_cli.Delete(allKey) + if err != nil { + c.JSON(http.StatusInternalServerError, v1.BaseResponse[*v1.RollingUpdate]{ + Error: "error in deleting rolling update from etcd", + }) + return + } + c.JSON(http.StatusOK, v1.BaseResponse[*v1.RollingUpdate]{ + Data: &ru, + }) +} diff --git a/pkg/kubeapiserver/etcd/client.go b/pkg/kubeapiserver/etcd/client.go new file mode 100644 index 0000000..2a83b18 --- /dev/null +++ b/pkg/kubeapiserver/etcd/client.go @@ -0,0 +1,146 @@ +package etcd + +// kube-apiserver的etcd存储客户端 +import ( + "context" + "log" + "time" + + cliv3 "go.etcd.io/etcd/client/v3" +) + +// etcdcli 配置 +type Config struct { + // etcd地址 + Endpoints []string + // 超时时间 + DialTimeout time.Duration +} + +// 默认配置 +var defaultCfg = Config{ + Endpoints: []string{"localhost:2379"}, + DialTimeout: 3 * time.Second, +} + +// etcd 存储接口 +type Store interface { + // 获取key的值 + Get(key string) (string, error) + // 设置key的值 + Set(key, value string) error + // 删除key + Delete(key string) error + + // TODO: cascade 操作 + + // // 获取key的子key + // GetSubKeys(key string) ([]string, error) + // // 获取key的子key的值 + GetSubKeysValues(key string) (map[string]string, error) + // // 设置key的子key的值 + // SetSubKeysValues(key string, values map[string]string) error + // // 删除key的子key + DeleteSubKeys(key string) error + + // TODO: watch 连接 +} +type store struct { + // etcd配置 + cfg Config + // etcd客户端 + cli *cliv3.Client +} + +func NewEtcdStore() (*store, error) { + + var store store + + err := store.init(defaultCfg) + + return &store, err +} + +// 初始化etcd存储 +func (s *store) init(cfg Config) error { + log.Println("etcd store init") + // 创建etcd客户端 + cli, err := cliv3.New(cliv3.Config{ + Endpoints: cfg.Endpoints, + DialTimeout: s.cfg.DialTimeout, + }) + if err != nil { + return err + } + s.cli = cli + return nil +} + +// 获取key的值 +func (s *store) Get(key string) (string, error) { + log.Println("get key in store", key) + kv := cliv3.NewKV(s.cli) + res, err := kv.Get(context.TODO(), key) + + if err != nil { + return "", err + } + // 返回key的值 + if res.Count > 0 { + return string(res.Kvs[0].Value), nil + } + + return "", nil +} + +// 设置key的值 +func (s *store) Set(key, value string) error { + log.Println("set key in store", key, value) + kv := cliv3.NewKV(s.cli) + _, err := kv.Put(context.TODO(), key, value) + if err != nil { + return err + } + + return nil +} + +// 删除key +func (s *store) Delete(key string) error { + log.Println("delete key in store", key) + kv := cliv3.NewKV(s.cli) + _, err := kv.Delete(context.TODO(), key) + if err != nil { + return err + } + + return nil +} + +func (s *store) GetSubKeysValues(key string) (map[string]string, error) { + log.Println("get subkeys values in store", key) + kv := cliv3.NewKV(s.cli) + res, err := kv.Get(context.TODO(), key, cliv3.WithPrefix()) + + if err != nil { + return nil, err + } + + values := make(map[string]string) + for _, kv := range res.Kvs { + values[string(kv.Key)] = string(kv.Value) + } + + return values, nil + +} + +func (s *store) DeleteSubKeys(key string) error { + kv := cliv3.NewKV(s.cli) + _, err := kv.Delete(context.TODO(), key, cliv3.WithPrefix()) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/kubeapiserver/metrics/stream_data.go b/pkg/kubeapiserver/metrics/stream_data.go new file mode 100644 index 0000000..85add88 --- /dev/null +++ b/pkg/kubeapiserver/metrics/stream_data.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "fmt" + v1 "minikubernetes/pkg/api/v1" + "sort" + "sync" + "time" +) + +type MetricsDatabase interface { + // MetricsDatabase接口 + // 保存metrics + SavePodMetrics(podMetrics []*v1.PodRawMetrics) error + // 获取某个Pod在metrics的timeStamp前n秒内的记录 + GetPodMetrics(podId v1.UID, timeStamp time.Time, n int32) (*v1.PodRawMetrics, error) + + // 锁的实现应该隐藏在内部 +} + +const ( + DefaultWindowSize int = 60 +) + +type metricsDb struct { + // 保存metrics的map + metrics map[v1.UID]v1.PodRawMetrics + // 读写锁 + rwLock sync.RWMutex +} + +func NewMetricsDb() (*metricsDb, error) { + return (&metricsDb{ + metrics: make(map[v1.UID]v1.PodRawMetrics), + }), nil +} + +// // 初始化map +// func (db *MetricsDb) Init() { +// db.metrics =make(map[v1.UID]v1.PodRawMetrics,0) +// } + +func (db *metricsDb) SavePodMetrics(podMetrics []*v1.PodRawMetrics) error { + // 先上写锁 + db.rwLock.Lock() + defer db.rwLock.Unlock() + // 根据ContainerInfo中的键值对,保存metrics + + // 对于podMetrics中的每一个podId: + for _, podItem := range podMetrics { + // 如果podId不存在,则直接插入 + if _, ok := db.metrics[podItem.UID]; !ok { + fmt.Printf("podId %v not found, insert directly\n", podItem.UID) + fmt.Printf("podItem: %v\n", podItem) + db.metrics[podItem.UID] = *podItem + fmt.Printf("len of metrics: %v\n", len(db.metrics)) + } else { + // 如果podId存在,则对ContainerInfo进行处理 + for cName, cInfo := range podItem.ContainerInfo { + // 信任containerInfo中对齐,直接append + db.metrics[podItem.UID].ContainerInfo[cName] = append(db.metrics[podItem.UID].ContainerInfo[cName], cInfo...) + } + } + // 检查过长 + for cName := range podItem.ContainerInfo { + // 如果超过窗口大小,删除最早的 + curLen := len(db.metrics[podItem.UID].ContainerInfo[cName]) + + if curLen > DefaultWindowSize { + fmt.Printf("podId %v container %v has too many records, delete the earliest\n", podItem.UID, cName) + db.metrics[podItem.UID].ContainerInfo[cName] = db.metrics[podItem.UID].ContainerInfo[cName][curLen-DefaultWindowSize:] + } + } + } + + // 先假设不会出错 + return nil +} + +func (db *metricsDb) GetPodMetrics(podId v1.UID, timeStamp time.Time, n int32) (*v1.PodRawMetrics, error) { + // 上读锁 + db.rwLock.RLock() + defer db.rwLock.RUnlock() + + fmt.Printf("len of metrics: %v\n", len(db.metrics)) + for k, v := range db.metrics { + fmt.Printf("key: %v, value: %v\n", k, v) + } + fmt.Printf("podId: %v\n", podId) + + if podMetrics, ok := db.metrics[podId]; ok { + + // 由时间戳来判断是否在n秒内 metrics是按照时间戳递增的 + if len(podMetrics.ContainerInfo) == 0 { + return nil, fmt.Errorf("containerInfo is empty") + } + fmt.Printf("参数获取到的时间戳:%v\n", timeStamp) + earliestValidTS := timeStamp.Add(-time.Duration(n) * time.Second) + fmt.Printf("需要查询到的最早的时间戳:%v\n", earliestValidTS) + for cName, containerInfo := range podMetrics.ContainerInfo { + // 找到比earliestValidTS大的最小的index + l := sort.Search(len(containerInfo), func(i int) bool { + return containerInfo[i].TimeStamp.After(earliestValidTS) + }) + r := sort.Search(len(containerInfo), func(i int) bool { + return containerInfo[i].TimeStamp.After(timeStamp) + }) + + podMetrics.ContainerInfo[cName] = containerInfo[l:r] + } + + return &podMetrics, nil + + } else { + return nil, fmt.Errorf("podId not found") + } +} diff --git a/pkg/kubeapiserver/utils/alloc.go b/pkg/kubeapiserver/utils/alloc.go new file mode 100644 index 0000000..d164b47 --- /dev/null +++ b/pkg/kubeapiserver/utils/alloc.go @@ -0,0 +1,104 @@ +package utils + +import "fmt" + +const IPPrefix = "100.0.0." + +const IPPoolSize = 256 + +const NodePrefix = "node-" + +const NodePoolSize = 64 + +func AllocIP(bitmap []byte) (string, error) { + if len(bitmap) != IPPoolSize/8 { + return "", fmt.Errorf("invalid bitmap size") + } + for idx, b := range bitmap { + if b == 0xff { + continue + } + for i := 0; i < 8; i++ { + if b&(1<= IPPoolSize { + return fmt.Errorf("invalid ip") + } + b := bitmap[idx/8] + if b&(1<= size { + return fmt.Errorf("invalid index") + } + b := bitmap[idx/8] + if b&(1<= 0; i-- { + ip := fmt.Sprintf("%s%d", IPPrefix, i) + err := FreeIP(ip, bitmap) + if err != nil { + t.Fatalf("free ip failed: %v", err) + } + t.Logf("free ip: %s", ip) + } + for i := 0; i < IPPoolSize; i++ { + _, err := AllocIP(bitmap) + if err != nil { + t.Fatalf("alloc ip failed: %v", err) + } + } +} + +func TestFreeAndAlloc(t *testing.T) { + bitmap := make([]byte, IPPoolSize/8) + for i := 0; i < IPPoolSize; i++ { + _, err := AllocIP(bitmap) + if err != nil { + t.Fatalf("alloc ip failed: %v", err) + } + } + err := FreeIP("100.0.0.98", bitmap) + if err != nil { + t.Fatalf("free ip failed: %v", err) + } + ip, err := AllocIP(bitmap) + if err != nil { + t.Fatalf("alloc ip failed: %v", err) + } + if ip != "100.0.0.98" { + t.Fatalf("free and alloc failed: %s", ip) + } +} diff --git a/pkg/kubeclient/client.go b/pkg/kubeclient/client.go new file mode 100644 index 0000000..2c6aa60 --- /dev/null +++ b/pkg/kubeclient/client.go @@ -0,0 +1,1037 @@ +package kubeclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubectl/utils" + "net/http" +) + +type Client interface { + GetAllPods() ([]*v1.Pod, error) + GetPod(name, namespace string) (*v1.Pod, error) + AddPod(pod v1.Pod) error + DeletePod(name, namespace string) error + + GetAllUnscheduledPods() ([]*v1.Pod, error) + + GetAllDNS() ([]*v1.DNS, error) + GetDNS(name, namespace string) (*v1.DNS, error) + AddDNS(dns v1.DNS) error + DeleteDNS(name, namespace string) error + + GetAllNodes() ([]*v1.Node, error) + AddPodToNode(pod v1.Pod, node v1.Node) error + + GetAllServices() ([]*v1.Service, error) + GetService(name, namespace string) (*v1.Service, error) + AddService(service v1.Service) error + DeleteService(name, namespace string) error + + GetAllReplicaSets() ([]*v1.ReplicaSet, error) + GetReplicaSet(name, namespace string) (*v1.ReplicaSet, error) + AddReplicaSet(replicaSet v1.ReplicaSet) error + DeleteReplicaSet(name, namespace string) error + UpdateReplicaSet(name, namespace string, repNum int32) error + + GetAllHPAScalers() ([]*v1.HorizontalPodAutoscaler, error) + GetHPAScaler(name, namespace string) (*v1.HorizontalPodAutoscaler, error) + AddHPAScaler(hpa v1.HorizontalPodAutoscaler) error + DeleteHPAScaler(name, namespace string) error + + UploadPodMetrics(metrics []*v1.PodRawMetrics) error + GetPodMetrics(v1.MetricsQuery) (*v1.PodRawMetrics, error) + + GetSidecarMapping() (v1.SidecarMapping, error) + GetAllVirtualServices() ([]*v1.VirtualService, error) + GetSubsetByName(name, namespace string) (*v1.Subset, error) + + AddSidecarMapping(maps v1.SidecarMapping) error + + GetSidecarServiceNameMapping() (v1.SidecarServiceNameMapping, error) + + GetAllRollingUpdates() ([]*v1.RollingUpdate, error) + AddRollingUpdate(rollingUpdate *v1.RollingUpdate) error + DeleteRollingUpdate(name, namespace string) error + UpdateRollingUpdateStatus(name, namespace string, status *v1.RollingUpdateStatus) error + + GetAllSubsets() ([]*v1.Subset, error) + AddSubset(subset *v1.Subset) error + DeleteSubset(subset *v1.Subset) error + DeleteSubsetByNameNp(subsetName, nameSpace string) error + + GetVirtualService(name, namespace string) (*v1.VirtualService, error) + AddVirtualService(virtualService *v1.VirtualService) error + DeleteVirtualService(virtualService *v1.VirtualService) error + DeleteVirtualServiceByNameNp(vsName, nameSpace string) error +} + +type client struct { + apiServerIP string +} + +func NewClient(apiServerIP string) Client { + return &client{ + apiServerIP: apiServerIP, + } +} + +func (c *client) GetAllPods() ([]*v1.Pod, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/pods", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.Pod] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get pods failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetPod(name, namespace string) (*v1.Pod, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/pods/%s", c.apiServerIP, namespace, name)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[*v1.Pod] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get pod error: %v", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddPod(pod v1.Pod) error { + + var namespace string + if pod.Namespace == "" { + namespace = "default" + } else { + namespace = pod.Namespace + } + jsonBytes, err := utils.Pod2JSON(&pod) + if err != nil { + return err + } + // POST to API server + url := fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/pods", c.apiServerIP, namespace) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBytes)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("error: %v", resp.Status) + } + return nil +} + +func (c *client) DeletePod(name, namespace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/pods/%s", c.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete pod error: %v", resp.Status) + } + return nil +} + +func (c *client) GetAllUnscheduledPods() ([]*v1.Pod, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/pods/unscheduled", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.Pod] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get unscheduled pods failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetAllDNS() ([]*v1.DNS, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/dns", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.DNS] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get dns failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetDNS(name, namespace string) (*v1.DNS, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/dns/%s", c.apiServerIP, namespace, name)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[*v1.DNS] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get dns error: %v", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddDNS(dns v1.DNS) error { + if dns.Namespace == "" { + dns.Namespace = "default" + } + + dnsJson, _ := json.Marshal(dns) + + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/dns", c.apiServerIP, dns.Namespace), bytes.NewBuffer(dnsJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.DNS] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("add dns error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) DeleteDNS(name, namespace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/dns/%s", c.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.DNS] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete dns error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) GetAllNodes() ([]*v1.Node, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/nodes", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.Node] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get nodes failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddPodToNode(pod v1.Pod, node v1.Node) error { + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/schedule?podUid=%s&nodename=%s", c.apiServerIP, pod.UID, node.Name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("add pod to node error: %v", resp.Status) + } + return nil +} + +func (c *client) GetAllServices() ([]*v1.Service, error) { + url := fmt.Sprintf("http://%s:8001/api/v1/services", c.apiServerIP) + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.Service] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get services failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetService(name, namespace string) (*v1.Service, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/services/%s", c.apiServerIP, namespace, name)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[*v1.Service] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get service error: %v", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddService(service v1.Service) error { + if service.Namespace == "" { + service.Namespace = "default" + } + + serviceJson, err := json.Marshal(service) + + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/services", c.apiServerIP, service.Namespace), bytes.NewBuffer(serviceJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.Service] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("add service error: %v", baseResponse.Error) + } + return nil +} +func (c *client) DeleteService(name, namespace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/services/%s", c.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.Service] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete service error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) GetAllReplicaSets() ([]*v1.ReplicaSet, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/replicasets", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.ReplicaSet] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get replica sets failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetReplicaSet(name, namespace string) (*v1.ReplicaSet, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/replicasets/%s", c.apiServerIP, namespace, name)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var baseResponse v1.BaseResponse[v1.ReplicaSet] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get replica set error: %v", baseResponse.Error) + } + return &baseResponse.Data, nil +} + +func (c *client) AddReplicaSet(replicaSet v1.ReplicaSet) error { + if replicaSet.Namespace == "" { + replicaSet.Namespace = "default" + } + + replicaSetJson, _ := json.Marshal(replicaSet) + + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/replicasets", c.apiServerIP, replicaSet.Namespace), bytes.NewBuffer(replicaSetJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.Service] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("add replica set error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) DeleteReplicaSet(name, namespace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/replicasets/%s", c.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.ReplicaSet] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete replica set error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) UpdateReplicaSet(name, namespace string, repNum int32) error { + req, err := http.NewRequest("PUT", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/replicasets/%s", c.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + + query := req.URL.Query() + query.Add("replicas", fmt.Sprint(repNum)) + req.URL.RawQuery = query.Encode() + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("update replica set error: %v", resp.Status) + } + return nil +} + +func (c *client) GetAllHPAScalers() ([]*v1.HorizontalPodAutoscaler, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/scaling", c.apiServerIP)) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.HorizontalPodAutoscaler] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get hpa scalers failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} +func (c *client) GetHPAScaler(name, namespace string) (*v1.HorizontalPodAutoscaler, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/scaling/scalingname/%s", c.apiServerIP, namespace, name)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[*v1.HorizontalPodAutoscaler] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get hpa scaler error: %v", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddHPAScaler(hpa v1.HorizontalPodAutoscaler) error { + if hpa.Namespace == "" { + hpa.Namespace = "default" + } + + hpaJson, _ := json.Marshal(hpa) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/scaling", c.apiServerIP, hpa.Namespace), bytes.NewBuffer(hpaJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.Service] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("add hpa scaler error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) DeleteHPAScaler(name, namespace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/scaling/scalingname/%s", c.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[v1.HorizontalPodAutoscaler] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete hpa scaler error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) UploadPodMetrics(metrics []*v1.PodRawMetrics) error { + url := fmt.Sprintf("http://%s:8001/api/v1/stats/data", c.apiServerIP) + + metricsStr, _ := json.Marshal(metrics) + + // fmt.Printf("upload metrics str: %s\n", string(metricsStr)) + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(metricsStr)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("upload metrics failed, statusCode: %d", resp.StatusCode) + } + + return nil +} + +func (c *client) GetPodMetrics(metricsQry v1.MetricsQuery) (*v1.PodRawMetrics, error) { + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s:8001/api/v1/stats/data", c.apiServerIP), nil) + if err != nil { + return nil, err + } + query := req.URL.Query() + + query.Add("uid", fmt.Sprint(metricsQry.UID)) + query.Add("timestamp", metricsQry.TimeStamp.UTC().Format("2006-01-02T15:04:05.99999999Z")) + query.Add("window", fmt.Sprint(metricsQry.Window)) + req.URL.RawQuery = query.Encode() + + resp, err := http.DefaultClient.Do(req) + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var baseResponse v1.BaseResponse[*v1.PodRawMetrics] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get pod metrics failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetSidecarMapping() (v1.SidecarMapping, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/sidecar-mapping", c.apiServerIP)) + if err != nil { + return v1.SidecarMapping{}, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return v1.SidecarMapping{}, err + } + var baseResponse v1.BaseResponse[v1.SidecarMapping] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return v1.SidecarMapping{}, err + } + if resp.StatusCode != http.StatusOK { + return v1.SidecarMapping{}, fmt.Errorf("get sidecar mapping failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetAllVirtualServices() ([]*v1.VirtualService, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/virtualservices", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.VirtualService] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get virtual services failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetSubsetByName(name, namespace string) (*v1.Subset, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/subsets/%s", c.apiServerIP, namespace, name)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[*v1.Subset] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get virtual services failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddSidecarMapping(maps v1.SidecarMapping) error { + jsonBytes, err := json.Marshal(&maps) + if err != nil { + return err + } + // POST to API server + url := fmt.Sprintf("http://%s:8001/api/v1/sidecar-mapping", c.apiServerIP) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBytes)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("error: %v", resp.Status) + } + return nil +} + +func (c *client) GetSidecarServiceNameMapping() (v1.SidecarServiceNameMapping, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/sidecar-service-name-mapping", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[v1.SidecarServiceNameMapping] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get sidecar service name mapping failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) GetAllRollingUpdates() ([]*v1.RollingUpdate, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/rollingupdates", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.RollingUpdate] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get rolling updates failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddRollingUpdate(rollingUpdate *v1.RollingUpdate) error { + if rollingUpdate.Namespace == "" { + rollingUpdate.Namespace = "default" + } + rollingUpdateJson, _ := json.Marshal(rollingUpdate) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/rollingupdates", c.apiServerIP, rollingUpdate.Namespace), bytes.NewBuffer(rollingUpdateJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + var baseResponse v1.BaseResponse[v1.RollingUpdate] + err = json.NewDecoder(resp.Body).Decode(&baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("add rolling update error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) DeleteRollingUpdate(name, namespace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/rollingupdates/%s", c.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + var baseResponse v1.BaseResponse[v1.RollingUpdate] + err = json.NewDecoder(resp.Body).Decode(&baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete rolling update error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) UpdateRollingUpdateStatus(name, namespace string, status *v1.RollingUpdateStatus) error { + statusJson, _ := json.Marshal(status) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/rollingupdates/%s", c.apiServerIP, namespace, name), bytes.NewBuffer(statusJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("update rolling update status error: %v", resp.Status) + } + return nil +} + +func (c *client) GetAllSubsets() ([]*v1.Subset, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/subsets", c.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.Subset] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get subsets failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil + +} + +func (c *client) AddSubset(subset *v1.Subset) error { + subsetJson, _ := json.Marshal(subset) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/subsets", c.apiServerIP, subset.Namespace), bytes.NewBuffer(subsetJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + var baseResponse v1.BaseResponse[v1.Subset] + err = json.NewDecoder(resp.Body).Decode(&baseResponse) + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("add subset error: %v", baseResponse.Error) + } + return nil +} +func (c *client) GetVirtualService(name, namespace string) (*v1.VirtualService, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/virtualservices/%s", c.apiServerIP, namespace, name)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[*v1.VirtualService] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get virtual service error: %v", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *client) AddVirtualService(virtualService *v1.VirtualService) error { + vsJson, _ := json.Marshal(virtualService) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/virtualservices", c.apiServerIP, virtualService.Namespace), bytes.NewBuffer(vsJson)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + var baseResponse v1.BaseResponse[v1.VirtualService] + err = json.NewDecoder(resp.Body).Decode(&baseResponse) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("add virtual service error: %v", baseResponse.Error) + } + return nil +} + +func (c *client) DeleteSubset(subset *v1.Subset) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/subsets/%s", c.apiServerIP, subset.Namespace, subset.Name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete subset error: %v", resp.Status) + } + return nil +} + +func (c *client) DeleteSubsetByNameNp(subsetName, nameSpace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/subsets/%s", c.apiServerIP, nameSpace, subsetName), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete subset error: %v", resp.Status) + } + return nil +} + +func (c *client) DeleteVirtualService(virtualService *v1.VirtualService) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/virtualservices/%s", c.apiServerIP, virtualService.Namespace, virtualService.Name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete virtual service error: %v", resp.Status) + } + return nil +} + +func (c *client) DeleteVirtualServiceByNameNp(vsName, nameSpace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/virtualservices/%s", c.apiServerIP, nameSpace, vsName), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete virtual service error: %v", resp.Status) + } + return nil +} diff --git a/pkg/kubectl/client/client.go b/pkg/kubectl/client/client.go new file mode 100644 index 0000000..0be5715 --- /dev/null +++ b/pkg/kubectl/client/client.go @@ -0,0 +1,91 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubectl/utils" + "net/http" +) + +type Client interface { + AddPod(jsonBytes []byte) error + DeletePod(name, namespace string) error + GetPods() ([]*v1.Pod, error) +} + +type kubeletClient struct { + apiServerIP string +} + +func NewKubectlClient(apiServerIP string) Client { + return &kubeletClient{ + apiServerIP: apiServerIP, + } +} + +func (kc *kubeletClient) AddPod(jsonBytes []byte) error { + pod, err := utils.JSON2Pod(jsonBytes) + if err != nil { + return err + } + var namespace string + if pod.Namespace == "" { + namespace = "default" + } else { + namespace = pod.Namespace + } + // POST to API server + url := fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/pods", kc.apiServerIP, namespace) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBytes)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("error: %v", resp.Status) + } + return nil +} + +func (kc *kubeletClient) DeletePod(name, namespace string) error { + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/pods/%s", kc.apiServerIP, namespace, name), nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("delete pod error: %v", resp.Status) + } + return nil +} + +func (kc *kubeletClient) GetPods() ([]*v1.Pod, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8001/api/v1/pods", kc.apiServerIP)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[[]*v1.Pod] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get pods failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go new file mode 100644 index 0000000..24444b9 --- /dev/null +++ b/pkg/kubectl/cmd/apply.go @@ -0,0 +1,204 @@ +package cmd + +import ( + "encoding/json" + "fmt" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "minikubernetes/pkg/kubectl/utils" + "os" + + "github.com/spf13/cobra" +) + +func init() { + applyCmd.Flags().StringP("file", "f", "", "YAML file to apply resources from") + rootCmd.AddCommand(applyCmd) +} + +var applyCmd = &cobra.Command{ + Use: "apply", + Short: "Apply a configuration to a resource by yaml file", + Run: func(cmd *cobra.Command, args []string) { + filename, _ := cmd.Flags().GetString("file") + if filename == "" { + fmt.Println("Usage: kubectl apply -f [filename]") + return + } + apply(filename) + }, +} + +func apply(filename string) { + content, err := os.ReadFile(filename) + if err != nil { + fmt.Println(err) + return + } + kind := utils.GetKind(content) + if kind == "" { + fmt.Println("kind not found") + return + } + jsonBytes, err := utils.YAML2JSON(content) + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("get json: %v\n", string(jsonBytes)) + // 根据kind区分不同的资源 + switch kind { + case "Pod": + fmt.Println("Apply Pod") + var podGenerated v1.Pod + err := json.Unmarshal(jsonBytes, &podGenerated) + if err != nil { + fmt.Println(err) + return + } + applyPod(podGenerated) + fmt.Println("Pod Applied") + + case "Service": + fmt.Println("Apply Service") + var serviceGenerated v1.Service + err := json.Unmarshal(jsonBytes, &serviceGenerated) + if err != nil { + fmt.Println(err) + return + } + applyService(serviceGenerated) + fmt.Println("Service Applied") + + case "ReplicaSet": + fmt.Println("Apply ReplicaSet") + var replicaSetGenerated v1.ReplicaSet + err := json.Unmarshal(jsonBytes, &replicaSetGenerated) + if err != nil { + fmt.Println(err) + return + } + applyReplicaSet(replicaSetGenerated) + fmt.Println("ReplicaSet Applied") + + case "HorizontalPodAutoscaler": + fmt.Println("Apply HorizontalPodAutoscaler") + var hpaGenerated v1.HorizontalPodAutoscaler + err := json.Unmarshal(jsonBytes, &hpaGenerated) + if err != nil { + fmt.Println(err) + return + } + applyHPAScaler(hpaGenerated) + fmt.Println("HorizontalPodAutoscaler Applied") + + case "VirtualService": + fmt.Println("Apply VirtualService") + var virtualServiceGenerated v1.VirtualService + err := json.Unmarshal(jsonBytes, &virtualServiceGenerated) + if err != nil { + fmt.Println(err) + return + } + applyVirtualService(&virtualServiceGenerated) + fmt.Println("VirtualService Applied") + + case "Subset": + fmt.Println("Apply Subset") + var subsetGenerated v1.Subset + err := json.Unmarshal(jsonBytes, &subsetGenerated) + if err != nil { + fmt.Println(err) + return + } + applySubset(&subsetGenerated) + fmt.Println("Subset Applied") + + case "DNS": + fmt.Println("Apply DNS") + var dnsGenerated v1.DNS + err := json.Unmarshal(jsonBytes, &dnsGenerated) + if err != nil { + fmt.Println(err) + return + } + applyDNS(dnsGenerated) + fmt.Println("DNS Applied") + case "RollingUpdate": + fmt.Println("Apply Rolling Update") + var rollingUpdateGenerated v1.RollingUpdate + err := json.Unmarshal(jsonBytes, &rollingUpdateGenerated) + if err != nil { + fmt.Println(err) + return + } + applyRolingUpdate(&rollingUpdateGenerated) + fmt.Println("Rolling Update Applied") + + } + +} +func applyPod(pod v1.Pod) { + err := kubeclient.NewClient(apiServerIP).AddPod(pod) + if err != nil { + fmt.Println(err) + return + } +} + +func applyService(service v1.Service) { + err := kubeclient.NewClient(apiServerIP).AddService(service) + if err != nil { + fmt.Println(err) + return + } +} + +func applyReplicaSet(replicaSet v1.ReplicaSet) { + err := kubeclient.NewClient(apiServerIP).AddReplicaSet(replicaSet) + if err != nil { + fmt.Println(err) + return + } +} +func applyHPAScaler(hpa v1.HorizontalPodAutoscaler) { + err := kubeclient.NewClient(apiServerIP).AddHPAScaler(hpa) + if err != nil { + fmt.Println(err) + return + } +} + +func applyVirtualService(virtualService *v1.VirtualService) { + err := kubeclient.NewClient(apiServerIP).AddVirtualService(virtualService) + if err != nil { + fmt.Println(err) + return + } +} +func applySubset(subset *v1.Subset) { + err := kubeclient.NewClient(apiServerIP).AddSubset(subset) + if err != nil { + fmt.Println(err) + return + } +} + +func applyDNS(dns v1.DNS) { + err := kubeclient.NewClient(apiServerIP).AddDNS(dns) + if err != nil { + fmt.Println(err) + return + } +} + +// TODO 增加rolling update的部署 +func applyRolingUpdate(rol *v1.RollingUpdate) { + err := kubeclient.NewClient(apiServerIP).AddRollingUpdate(rol) + if err != nil { + fmt.Println(err) + return + } + +} diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go new file mode 100644 index 0000000..740a5dd --- /dev/null +++ b/pkg/kubectl/cmd/delete.go @@ -0,0 +1,323 @@ +package cmd + +import ( + "encoding/json" + "fmt" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "minikubernetes/pkg/kubectl/client" + "minikubernetes/pkg/kubectl/utils" + "os" + + "github.com/spf13/cobra" +) + +func init() { + deleteCommand.Flags().StringP("file", "f", "", "YAML file to delete resources from") + deleteCommand.Flags().StringP("namespace", "p", "default", "Namespace of the resources") + deleteCommand.Flags().StringP("name", "n", "", "Name of the resources") + rootCmd.AddCommand(deleteCommand) +} + +var deleteCommand = &cobra.Command{ + Use: "delete", + Short: "Delete resources", + Args: cobra.MaximumNArgs(2), + Run: func(cmd *cobra.Command, args []string) { + // 指定文件名 + filename, _ := cmd.Flags().GetString("file") + if filename != "" { + fmt.Println("Delete from file: ", filename) + deleteFromYAML(filename) + return + } + + // 不从文件中删除 + // 直接指定名字,默认在default namespace下删除 + if len(args) == 2 { + switch args[0] { + case "pod": + deletePod(args[1], "default") + case "service": + deleteService(args[1], "default") + case "hpa": + deleteHPA(args[1], "default") + case "replicaset": + deleteReplicaSet(args[1], "default") + case "virtualservice": + deleteVirtualService(args[1], "default") + case "subset": + deleteSubset(args[1], "default") + case "dns": + deleteDNS(args[1], "default") + case "rollingupdate": + deleteRollingUpdate(args[1], "default") + + } + } else if len(args) == 1 { + // 指定namespace和name + namespace, _ := cmd.Flags().GetString("namespace") + name, _ := cmd.Flags().GetString("name") + if namespace == "" { + namespace = "default" + } + if name == "" { + fmt.Println("Usage: kubectl delete [pod|service|hpa|replicaset] -np [namespace] -n [name]") + return + } + switch args[0] { + case "pod": + deletePod(name, namespace) + case "service": + deleteService(name, namespace) + case "hpa": + deleteHPA(name, namespace) + case "replicaset": + deleteReplicaSet(name, namespace) + case "virtualservice": + deleteVirtualService(name, namespace) + case "subsets": + deleteSubset(name, namespace) + case "dns": + deleteDNS(name, namespace) + case "rollingupdate": + deleteRollingUpdate(name, namespace) + } + + } else { + fmt.Println("Invalid usage.") + } + }, +} + +func deleteFromYAML(filename string) { + content, err := os.ReadFile(filename) + if err != nil { + fmt.Println(err) + return + } + kind := utils.GetKind(content) + if kind == "" { + fmt.Println("kind not found") + return + } + jsonBytes, err := utils.YAML2JSON(content) + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("get json: %v\n", string(jsonBytes)) + // 根据kind区分不同的资源 + switch kind { + case "Pod": + fmt.Println("Delete Pod") + var podGenerated v1.Pod + err := json.Unmarshal(jsonBytes, &podGenerated) + if err != nil { + fmt.Println(err) + return + } + if podGenerated.Name == "" { + fmt.Println("Pod name not found") + return + } + if podGenerated.Namespace == "" { + podGenerated.Namespace = "default" + } + deletePod(podGenerated.Name, podGenerated.Namespace) + + fmt.Println("Pod Deleted") + + case "Service": + fmt.Println("Delete Service") + var serviceGenerated v1.Service + err := json.Unmarshal(jsonBytes, &serviceGenerated) + if err != nil { + fmt.Println(err) + return + } + if serviceGenerated.Name == "" { + fmt.Println("Service name not found") + return + } + if serviceGenerated.Namespace == "" { + serviceGenerated.Namespace = "default" + } + deleteService(serviceGenerated.Name, serviceGenerated.Namespace) + fmt.Println("Service Deleted") + + case "ReplicaSet": + fmt.Println("Delete ReplicaSet") + var replicaSetGenerated v1.ReplicaSet + err := json.Unmarshal(jsonBytes, &replicaSetGenerated) + if err != nil { + fmt.Println(err) + return + } + if replicaSetGenerated.Name == "" { + fmt.Println("ReplicaSet name not found") + return + } + if replicaSetGenerated.Namespace == "" { + replicaSetGenerated.Namespace = "default" + } + deleteReplicaSet(replicaSetGenerated.Name, replicaSetGenerated.Namespace) + + fmt.Println("ReplicaSet Deleted") + + case "HorizontalPodAutoscaler": + fmt.Println("Delete HorizontalPodAutoscaler") + var hpaGenerated v1.HorizontalPodAutoscaler + err := json.Unmarshal(jsonBytes, &hpaGenerated) + if err != nil { + fmt.Println(err) + return + } + if hpaGenerated.Name == "" { + fmt.Println("HPA name not found") + return + } + if hpaGenerated.Namespace == "" { + hpaGenerated.Namespace = "default" + } + deleteHPA(hpaGenerated.Name, hpaGenerated.Namespace) + fmt.Println("HorizontalPodAutoscaler Deleted") + + case "VirtualService": + fmt.Println("Delete VirtualService") + var virtualServiceGenerated v1.VirtualService + err := json.Unmarshal(jsonBytes, &virtualServiceGenerated) + if err != nil { + fmt.Println(err) + return + } + if virtualServiceGenerated.Name == "" { + fmt.Println("VirtualService name not found") + return + } + if virtualServiceGenerated.Namespace == "" { + virtualServiceGenerated.Namespace = "default" + } + deleteVirtualService(virtualServiceGenerated.Name, virtualServiceGenerated.Namespace) + fmt.Println("VirtualService Deleted") + + case "Subset": + fmt.Println("Delete Subset") + var subsetGenerated v1.Subset + err := json.Unmarshal(jsonBytes, &subsetGenerated) + if err != nil { + fmt.Println(err) + return + } + if subsetGenerated.Name == "" { + fmt.Println("Subset name not found") + return + } + if subsetGenerated.Namespace == "" { + subsetGenerated.Namespace = "default" + } + deleteSubset(subsetGenerated.Name, subsetGenerated.Namespace) + fmt.Println("Subset Deleted") + case "DNS": + fmt.Println("Delete DNS") + var dnsGenerated v1.DNS + err := json.Unmarshal(jsonBytes, &dnsGenerated) + if err != nil { + fmt.Println(err) + return + } + if dnsGenerated.Name == "" { + fmt.Println("DNS name not found") + return + } + if dnsGenerated.Namespace == "" { + dnsGenerated.Namespace = "default" + } + deleteDNS(dnsGenerated.Name, dnsGenerated.Namespace) + fmt.Println("DNS Deleted") + case "RollingUpdate": + fmt.Println("Delete RollingUpdate") + var rollingUpdateGenerated v1.RollingUpdate + err := json.Unmarshal(jsonBytes, &rollingUpdateGenerated) + if err != nil { + fmt.Println(err) + return + } + if rollingUpdateGenerated.Name == "" { + fmt.Println("RollingUpdate name not found") + return + } + if rollingUpdateGenerated.Namespace == "" { + rollingUpdateGenerated.Namespace = "default" + } + deleteRollingUpdate(rollingUpdateGenerated.Name, rollingUpdateGenerated.Namespace) + fmt.Println("RollingUpdate Deleted") + } +} + +func deletePod(podName, nameSpace string) { + err := client.NewKubectlClient(apiServerIP).DeletePod(podName, nameSpace) + if err != nil { + fmt.Println(err) + return + } +} + +func deleteService(serviceName, nameSpace string) { + err := kubeclient.NewClient(apiServerIP).DeleteService(serviceName, nameSpace) + if err != nil { + fmt.Println(err) + return + } +} +func deleteHPA(hpaName, nameSpace string) { + err := kubeclient.NewClient(apiServerIP).DeleteHPAScaler(hpaName, nameSpace) + if err != nil { + fmt.Println(err) + return + } +} + +func deleteReplicaSet(replicaSetName, nameSpace string) { + + err := kubeclient.NewClient(apiServerIP).DeleteReplicaSet(replicaSetName, nameSpace) + if err != nil { + fmt.Println(err) + return + } +} + +func deleteVirtualService(vsName, nameSpace string) { + err := kubeclient.NewClient(apiServerIP).DeleteVirtualServiceByNameNp(vsName, nameSpace) + if err != nil { + fmt.Println(err) + return + } +} + +func deleteSubset(subsetName, nameSpace string) { + err := kubeclient.NewClient(apiServerIP).DeleteSubsetByNameNp(subsetName, nameSpace) + if err != nil { + fmt.Println(err) + return + } +} + +func deleteDNS(dnsName, nameSpace string) { + err := kubeclient.NewClient(apiServerIP).DeleteDNS(dnsName, nameSpace) + if err != nil { + fmt.Println(err) + return + } +} + +// TODO: 增加RolliingUpdate的删除 +func deleteRollingUpdate(rollingUpdateName, nameSpace string) { + err := kubeclient.NewClient(apiServerIP).DeleteRollingUpdate(rollingUpdateName, nameSpace) + if err != nil { + fmt.Println(err) + return + } + +} diff --git a/pkg/kubectl/cmd/descibe.go b/pkg/kubectl/cmd/descibe.go new file mode 100644 index 0000000..27f3985 --- /dev/null +++ b/pkg/kubectl/cmd/descibe.go @@ -0,0 +1,163 @@ +package cmd + +// 导出原始的json文件 + +import ( + "fmt" + "minikubernetes/pkg/kubeclient" + "os" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" +) + +func init() { + + rootCmd.AddCommand(describeCommand) +} + +var describeCommand = &cobra.Command{ + Use: "describe", + Short: "Describe resources", + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 2 { + if args[0] == "pod" { + describePod(args[1], "default") + } + if args[0] == "service" { + describeService(args[1], "default") + } + if args[0] == "hpa" { + describeHPA(args[1], "default") + } + if args[0] == "replicaset" { + describeReplicaSet(args[1], "default") + } + if args[0] == "virtualservice" { + describeVirtualService(args[1], "default") + } + if args[0] == "subsets" { + describeSubset(args[1], "default") + } + if args[0] == "dns" { + describeDNS(args[1], "default") + } + + } else if len(args) == 1 { + // 指定namespace和name + namespace, _ := cmd.Flags().GetString("namespace") + name, _ := cmd.Flags().GetString("name") + if namespace == "" { + namespace = "default" + } + if name == "" { + fmt.Println("Usage: kubectl describe [pod|service|hpa|replicaset] -np [namespace] -n [name]") + return + } + switch args[0] { + case "pod": + describePod(name, namespace) + case "service": + describeService(name, namespace) + case "hpa": + describeHPA(name, namespace) + case "replicaset": + describeReplicaSet(name, namespace) + case "virtualservice": + describeVirtualService(name, namespace) + case "subsets": + describeSubset(name, namespace) + case "dns": + describeDNS(name, namespace) + } + + } + }, +} + +func describePod(name, namespace string) { + pod, err := kubeclient.NewClient(apiServerIP).GetPod(name, namespace) + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Name", "Namespace", "Phase", "IP"}) + table.Append([]string{pod.Name, pod.Namespace, string(pod.Status.Phase), pod.Status.PodIP}) + table.Render() +} + +func describeService(name, namespace string) { + service, err := kubeclient.NewClient(apiServerIP).GetService(name, namespace) + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Name", "Namespace", "ClusterIP", "Ports"}) + table.Append([]string{service.Name, service.Namespace, service.Spec.ClusterIP, fmt.Sprintf("%v", service.Spec.Ports)}) + table.Render() +} + +func describeHPA(name, namespace string) { + hpa, err := kubeclient.NewClient(apiServerIP).GetHPAScaler(name, namespace) + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Name", "Namespace", "MinReplicas", "MaxReplicas", "Metrics"}) + table.Append([]string{hpa.Name, hpa.Namespace, fmt.Sprintf("%v", hpa.Spec.MinReplicas), fmt.Sprintf("%v", hpa.Spec.MaxReplicas), fmt.Sprintf("%v", hpa.Spec.Metrics)}) + table.Render() +} + +func describeReplicaSet(name, namespace string) { + replicaSet, err := kubeclient.NewClient(apiServerIP).GetReplicaSet(name, namespace) + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Name", "Namespace", "Replicas", "Selector"}) + table.Append([]string{replicaSet.Name, replicaSet.Namespace, fmt.Sprintf("%v", replicaSet.Spec.Replicas), fmt.Sprintf("%v", replicaSet.Spec.Selector)}) + table.Render() +} + +func describeVirtualService(name, namespace string) { + virtualService, err := kubeclient.NewClient(apiServerIP).GetVirtualService(name, namespace) + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Name", "Namespace", "ServiceRef", "Port", "Subsets"}) + table.Append([]string{virtualService.Name, virtualService.Namespace, virtualService.Spec.ServiceRef, fmt.Sprintf("%v", virtualService.Spec.Port), fmt.Sprintf("%v", virtualService.Spec.Subsets)}) + table.Render() +} + +func describeSubset(name, namespace string) { + subset, err := kubeclient.NewClient(apiServerIP).GetSubsetByName(name, namespace) + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Name", "Namespace", "Labels", "Pods"}) + table.Append([]string{subset.Name, subset.Namespace, fmt.Sprintf("%v", subset.Labels), fmt.Sprintf("%v", subset.Spec.Pods)}) + table.Render() +} + +func describeDNS(name, namespace string) { + dns, err := kubeclient.NewClient(apiServerIP).GetDNS(name, namespace) + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Name", "Namespace", "Rules"}) + table.Append([]string{dns.Name, dns.Namespace, fmt.Sprintf("%v", dns.Spec.Rules)}) + table.Render() +} + +// TODO 增加rolling update diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go new file mode 100644 index 0000000..705262d --- /dev/null +++ b/pkg/kubectl/cmd/get.go @@ -0,0 +1,254 @@ +package cmd + +import ( + "fmt" + "minikubernetes/pkg/kubeclient" + "os" + "strings" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(getCommand) +} + +var getCommand = &cobra.Command{ + Use: "get", + Short: "Get resources", + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 1 { + if args[0] == "pods" || args[0] == "pod" { + getAllPods() + } + if args[0] == "nodes" || args[0] == "node" { + getAllNodes() + } + if args[0] == "services" || args[0] == "service" { + getAllServices() + } + if args[0] == "hpas" || args[0] == "hpa" { + getAllHPAScalers() + } + if args[0] == "replicasets" || args[0] == "replicaset" { + getAllReplicaSets() + } + if args[0] == "virtualservices" || args[0] == "virtualservice" { + getAllVirtualServices() + } + if args[0] == "subsets" || args[0] == "subset" { + getAllSubsets() + } + if args[0] == "dns" { + getAllDNS() + } + if args[0] == "rollingupdates" || args[0] == "rollingupdate" { + getAllRollingUpdate() + } + + } + }, +} + +func getAllPods() { + pods, err := kubeclient.NewClient(apiServerIP).GetAllPods() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "Phase", "IP"}) + for _, pod := range pods { + table.Append([]string{"pod", pod.Namespace, pod.Name, string(pod.Status.Phase), pod.Status.PodIP}) + } + table.Render() +} + +func getAllNodes() { + nodes, err := kubeclient.NewClient(apiServerIP).GetAllNodes() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Name", "IP"}) + for _, node := range nodes { + table.Append([]string{"node", node.Name, node.Status.Address}) + } + table.Render() + +} + +func getAllServices() { + services, err := kubeclient.NewClient(apiServerIP).GetAllServices() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "ClusterIP"}) + for _, service := range services { + table.Append([]string{"service", service.Namespace, service.Name, service.Spec.ClusterIP}) + } + table.Render() + + ipDetailsTable := tablewriter.NewWriter(os.Stdout) + ipDetailsTable.SetHeader([]string{"Name/Service", "ClusterIP", "Port", "NodePort", "Endpoint", "Protocol"}) + // todo 显示service的port和endpoint + for _, service := range services { + ip := service.Spec.ClusterIP + + for _, svcPort := range service.Spec.Ports { + + sideCarMpKey := fmt.Sprintf("%v:%v", ip, svcPort.Port) + allSidecarMap, err := kubeclient.NewClient(apiServerIP).GetSidecarMapping() + if err != nil { + fmt.Println(err) + return + } + if sidecarEPList := allSidecarMap[sideCarMpKey]; sidecarEPList != nil { + var epFmtStr string + for _, sidecarEP := range sidecarEPList { + for _, singleEP := range sidecarEP.Endpoints { + epFmtStr += fmt.Sprintf("%v:%v\n", singleEP.IP, singleEP.TargetPort) + } + } + epFmtStr = strings.TrimSpace(epFmtStr) + if service.Spec.Type == "NodePort" { + ipDetailsTable.Append([]string{service.Namespace + "/" + service.Name, ip, fmt.Sprint(svcPort.Port), fmt.Sprint(svcPort.NodePort), epFmtStr, fmt.Sprint(svcPort.Protocol)}) + } else { + ipDetailsTable.Append([]string{service.Namespace + "/" + service.Name, ip, fmt.Sprint(svcPort.Port), "N/A", epFmtStr, fmt.Sprint(svcPort.Protocol)}) + } + } else { + if service.Spec.Type == "NodePort" { + ipDetailsTable.Append([]string{service.Namespace + "/" + service.Name, ip, fmt.Sprint(svcPort.Port), fmt.Sprint(svcPort.NodePort), "N/A", fmt.Sprint(svcPort.Protocol)}) + } else { + ipDetailsTable.Append([]string{service.Namespace + "/" + service.Name, ip, fmt.Sprint(svcPort.Port), "N/A", "N/A", fmt.Sprint(svcPort.Protocol)}) + } + } + + } + } + ipDetailsTable.Render() + +} +func getAllHPAScalers() { + hpas, err := kubeclient.NewClient(apiServerIP).GetAllHPAScalers() + + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "Min_Replicas", "Max_Replicas", "Current_Replicas"}) + for _, hpa := range hpas { + // 找到相对应的rps的replica数量 + + rps, err := kubeclient.NewClient(apiServerIP).GetReplicaSet(hpa.Spec.ScaleTargetRef.Name, hpa.Namespace) + if err != nil { + fmt.Println(err) + return + } + currentReps := rps.Spec.Replicas + + table.Append([]string{"hpa", hpa.Namespace, hpa.Name, fmt.Sprint(hpa.Spec.MinReplicas), fmt.Sprint(hpa.Spec.MaxReplicas), fmt.Sprint(currentReps)}) + } + table.Render() +} + +func getAllReplicaSets() { + replicasets, err := kubeclient.NewClient(apiServerIP).GetAllReplicaSets() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "Replicas"}) + for _, replicaset := range replicasets { + table.Append([]string{"replicaset", replicaset.Namespace, replicaset.Name, fmt.Sprint(replicaset.Spec.Replicas)}) + } + table.Render() +} + +func getAllVirtualServices() { + virtualservices, err := kubeclient.NewClient(apiServerIP).GetAllVirtualServices() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "ServiceRef", "Port", "Subsets"}) + for _, virtualservice := range virtualservices { + subsetsFmtStr := "" + for _, subset := range virtualservice.Spec.Subsets { + subsetsFmtStr += fmt.Sprintf("%v\n", subset.Name) + } + subsetsFmtStr = strings.TrimSpace(subsetsFmtStr) + + table.Append([]string{"virtualservice", virtualservice.Namespace, virtualservice.Name, virtualservice.Spec.ServiceRef, fmt.Sprint(virtualservice.Spec.Port), subsetsFmtStr}) + } + table.Render() +} + +func getAllSubsets() { + subsets, err := kubeclient.NewClient(apiServerIP).GetAllSubsets() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "Pods"}) + for _, subset := range subsets { + podFmtStr := "" + for _, pod := range subset.Spec.Pods { + podFmtStr += fmt.Sprintf("%v\n", pod) + } + podFmtStr = strings.TrimSpace(podFmtStr) + table.Append([]string{"subset", subset.Namespace, subset.Name, podFmtStr}) + } + table.Render() +} + +func getAllDNS() { + dns, err := kubeclient.NewClient(apiServerIP).GetAllDNS() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "Rules"}) + for _, dn := range dns { + hostPath2NamePortStr := "" + for _, rule := range dn.Spec.Rules { + host := rule.Host + for _, dnsPath := range rule.Paths { + hostPath2NamePortStr += fmt.Sprintf("%v%v\n --> %v:%v\n", host, dnsPath.Path, dnsPath.Backend.Service.Name, dnsPath.Backend.Service.Port) + } + } + if hostPath2NamePortStr != "" { + hostPath2NamePortStr = strings.TrimSpace(hostPath2NamePortStr) + table.Append([]string{"dns", dn.Namespace, dn.Name, hostPath2NamePortStr}) + } else { + table.Append([]string{"dns", dn.Namespace, dn.Name, "N/A"}) + } + } + table.Render() +} + +// TODO 展示rolling update +func getAllRollingUpdate() { + rollingUpdates, err := kubeclient.NewClient(apiServerIP).GetAllRollingUpdates() + if err != nil { + fmt.Println(err) + return + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Kind", "Namespace", "Name", "Status", "ServiceRef", "Port", "MinimumAlive", "Interval"}) + for _, rollingUpdate := range rollingUpdates { + table.Append([]string{"rollingupdate", rollingUpdate.Namespace, rollingUpdate.Name, string(rollingUpdate.Status.Phase), rollingUpdate.Spec.ServiceRef, fmt.Sprint(rollingUpdate.Spec.Port), fmt.Sprint(rollingUpdate.Spec.MinimumAlive), fmt.Sprint(rollingUpdate.Spec.Interval)}) + } + table.Render() + +} diff --git a/pkg/kubectl/cmd/root.go b/pkg/kubectl/cmd/root.go new file mode 100644 index 0000000..de1c844 --- /dev/null +++ b/pkg/kubectl/cmd/root.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "fmt" + "github.com/spf13/cobra" + "os" +) + +var rootCmd = &cobra.Command{ + Use: "kubectl", + Short: "kubectl controls the Kubernetes cluster", +} + +var apiServerIP string = "10.119.12.123" + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/pkg/kubectl/utils/parse.go b/pkg/kubectl/utils/parse.go new file mode 100644 index 0000000..d0385a8 --- /dev/null +++ b/pkg/kubectl/utils/parse.go @@ -0,0 +1,81 @@ +package utils + +import ( + "encoding/json" + v1 "minikubernetes/pkg/api/v1" + + "gopkg.in/yaml.v3" +) + +func JSON2YAML(jsonBytes []byte) ([]byte, error) { + var intermediate map[string]interface{} + err := json.Unmarshal(jsonBytes, &intermediate) + if err != nil { + return nil, err + } + yamlBytes, err := yaml.Marshal(intermediate) + if err != nil { + return nil, err + } + return yamlBytes, nil +} + +func YAML2JSON(yamlBytes []byte) ([]byte, error) { + var intermediate map[string]interface{} + err := yaml.Unmarshal(yamlBytes, &intermediate) + if err != nil { + return nil, err + } + jsonBytes, err := json.Marshal(intermediate) + if err != nil { + return nil, err + } + return jsonBytes, nil +} + +func Pod2YAML(pod *v1.Pod) ([]byte, error) { + yamlBytes, err := yaml.Marshal(pod) + if err != nil { + return nil, err + } + return yamlBytes, nil +} + +func YAML2Pod(yamlBytes []byte) (*v1.Pod, error) { + var pod v1.Pod + err := yaml.Unmarshal(yamlBytes, &pod) + if err != nil { + return nil, err + } + return &pod, nil +} + +func Pod2JSON(pod *v1.Pod) ([]byte, error) { + jsonBytes, err := json.Marshal(pod) + if err != nil { + return nil, err + } + return jsonBytes, nil +} + +func JSON2Pod(jsonBytes []byte) (*v1.Pod, error) { + var pod v1.Pod + err := json.Unmarshal(jsonBytes, &pod) + if err != nil { + return nil, err + } + return &pod, nil +} + +func GetKind(content []byte) string { + var intermediate map[string]interface{} + err := yaml.Unmarshal(content, &intermediate) + if err != nil { + return "" + } + kind, ok := intermediate["kind"] + if !ok { + return "" + } + return kind.(string) +} diff --git a/pkg/kubelet/app/server.go b/pkg/kubelet/app/server.go new file mode 100644 index 0000000..64efc9b --- /dev/null +++ b/pkg/kubelet/app/server.go @@ -0,0 +1,196 @@ +package app + +import ( + "context" + "github.com/google/uuid" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubelet" + "minikubernetes/pkg/kubelet/client" + "minikubernetes/pkg/kubelet/types" + "minikubernetes/pkg/kubelet/utils" + "os" + "os/signal" + "sync" + "time" +) + +type KubeletServer struct { + kubeClient client.KubeletClient + nodeName string + latestLocalPods []*v1.Pod + updates chan types.PodUpdate + nodeConfig *v1.Node +} + +func NewKubeletServer(apiServerIP string, node *v1.Node) (*KubeletServer, error) { + ks := &KubeletServer{} + ks.kubeClient = client.NewKubeletClient(apiServerIP) + ks.latestLocalPods = make([]*v1.Pod, 0) + ks.updates = make(chan types.PodUpdate) + ks.nodeConfig = node + return ks, nil +} + +func (kls *KubeletServer) Run() { + // TODO 获取当前节点的nodeId + // kls.nodeName = "node-0" + address, err := utils.GetHostIP() + if err != nil { + log.Fatalf("Failed to get host ip: %v", err) + } + node, err := kls.kubeClient.RegisterNode(address, kls.nodeConfig) + if err != nil { + log.Fatalf("Failed to register node: %v", err) + } + kls.nodeName = node.Name + + // context+wait group实现notify和join + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup + wg.Add(2) + + // 捕获SIGINT + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt) + + kls.RunKubelet(ctx, &wg) + + go kls.watchApiServer(ctx, &wg) + + // SIGINT到来,调用cancel(),并等待所有goroutine结束 + <-signalCh + log.Println("Received interrupt signal, shutting down...") + cancel() + wg.Wait() +} + +func (kls *KubeletServer) RunKubelet(ctx context.Context, wg *sync.WaitGroup) { + kl, err := kls.createAndInitKubelet() + if err != nil { + return + } + kls.startKubelet(ctx, wg, kl) +} + +func (kls *KubeletServer) createAndInitKubelet() (*kubelet.Kubelet, error) { + kl, err := kubelet.NewMainKubelet(kls.nodeName, kls.kubeClient) + if err != nil { + log.Printf("Failed to create kubelet: %v", err) + return nil, err + } + return kl, nil +} + +func (kls *KubeletServer) startKubelet(ctx context.Context, wg *sync.WaitGroup, kl *kubelet.Kubelet) { + go kl.Run(ctx, wg, kls.updates) +} + +func (kls *KubeletServer) watchApiServer(ctx context.Context, wg *sync.WaitGroup) { + defer wg.Done() + SyncPeriod := 7 * time.Second + timer := time.NewTimer(SyncPeriod) + for { + select { + case <-timer.C: + kls.updateLocalPods() + timer.Reset(SyncPeriod) + case <-ctx.Done(): + log.Println("Shutting down api server watcher") + return + } + } +} + +func (kls *KubeletServer) updateLocalPods() { + // Mock + // newLocalPods := getMockPods(kls.latestLocalPods) + newLocalPods, err := kls.kubeClient.GetPodsByNodeName(kls.nodeName) + if err != nil { + log.Printf("Failed to get pods for node %s: %v", kls.nodeName, err) + return + } + + // For now, we only allow additions and deletions + oldTable := make(map[v1.UID]*v1.Pod) + newTable := make(map[v1.UID]*v1.Pod) + for _, pod := range kls.latestLocalPods { + oldTable[pod.ObjectMeta.UID] = pod + } + for _, pod := range newLocalPods { + newTable[pod.ObjectMeta.UID] = pod + } + additions := make([]*v1.Pod, 0) + deletions := make([]*v1.Pod, 0) + for _, pod := range kls.latestLocalPods { + if _, ok := newTable[pod.ObjectMeta.UID]; !ok { + deletions = append(deletions, pod) + } + } + for _, pod := range newLocalPods { + if _, ok := oldTable[pod.ObjectMeta.UID]; !ok { + additions = append(additions, pod) + } + } + if len(additions) != 0 { + kls.updates <- types.PodUpdate{ + Pods: additions, + Op: types.ADD, + } + } + if len(deletions) != 0 { + kls.updates <- types.PodUpdate{ + Pods: deletions, + Op: types.DELETE, + } + } + kls.latestLocalPods = newLocalPods +} + +// 以下为fake数据 +var firstTime bool = true +var secondTime bool = false + +func getMockPods(oldPods []*v1.Pod) []*v1.Pod { + if !firstTime && secondTime { + secondTime = false + //return []*v1.Pod{} + return oldPods + } + if !firstTime && !secondTime { + return oldPods + } + firstTime = false + secondTime = true + conport := v1.ContainerPort{ContainerPort: 8080} + //conport2 := v1.ContainerPort{ContainerPort: 8090} + contain := v1.Container{ + Image: "alpine:latest", + Command: []string{}, + Ports: []v1.ContainerPort{conport}, + } + //contain2 := v1.Container{ + // Image: "alpine:latest", + // Command: []string{"echo", "hello world"}, + // Ports: []v1.ContainerPort{conport2}, + //} + podSpec := v1.PodSpec{ + //Containers: []v1.Container{contain, contain2}, + Containers: []v1.Container{contain}, + RestartPolicy: v1.RestartPolicyAlways, + } + pod := &v1.Pod{ + TypeMeta: v1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, ObjectMeta: v1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + CreationTimestamp: time.Now(), + UID: v1.UID(uuid.New().String()), + }, + Spec: podSpec, + } + return []*v1.Pod{pod} +} diff --git a/pkg/kubelet/client/client.go b/pkg/kubelet/client/client.go new file mode 100644 index 0000000..a94e8a4 --- /dev/null +++ b/pkg/kubelet/client/client.go @@ -0,0 +1,150 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + v1 "minikubernetes/pkg/api/v1" + "net/http" +) + +type KubeletClient interface { + GetPodsByNodeName(nodeId string) ([]*v1.Pod, error) + UpdatePodStatus(pod *v1.Pod, status *v1.PodStatus) error + RegisterNode(address string, node *v1.Node) (*v1.Node, error) + UnregisterNode(nodeName string) error +} + +type kubeletClient struct { + apiServerIP string +} + +func NewKubeletClient(apiServerIP string) KubeletClient { + return &kubeletClient{ + apiServerIP: apiServerIP, + } +} + +type BaseResponse[T any] struct { + Data T `json:"data,omitempty"` + Error string `json:"error,omitempty"` +} + +func (kc *kubeletClient) GetPodsByNodeName(nodeName string) ([]*v1.Pod, error) { + url := fmt.Sprintf("http://%s:8001/api/v1/nodes/%s/pods", kc.apiServerIP, nodeName) + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer func(Body io.ReadCloser) { + err := Body.Close() + if err != nil { + panic(err) + } + }(resp.Body) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse BaseResponse[[]*v1.Pod] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("get pods failed, error: %s", baseResponse.Error) + } + + return baseResponse.Data, nil +} + +func (kc *kubeletClient) UpdatePodStatus(pod *v1.Pod, status *v1.PodStatus) error { + url := fmt.Sprintf("http://%s:8001/api/v1/namespaces/%s/pods/%s/status", kc.apiServerIP, pod.Namespace, pod.Name) + + statusJson, err := json.Marshal(status) + if err != nil { + return err + } + req, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer(statusJson)) + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("update pod status failed, statusCode: %d", resp.StatusCode) + } + + return nil +} + +func (c *kubeletClient) RegisterNode(address string, node *v1.Node) (*v1.Node, error) { + jsonBytes, err := json.Marshal(node) + if err != nil { + return nil, err + } + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s:8001/api/v1/nodes/register", c.apiServerIP), bytes.NewBuffer(jsonBytes)) + if err != nil { + return nil, err + } + query := req.URL.Query() + query.Add("address", address) + req.URL.RawQuery = query.Encode() + req.Header.Add("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var baseResponse v1.BaseResponse[*v1.Node] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, fmt.Errorf("register node failed, error: %s", baseResponse.Error) + } + return baseResponse.Data, nil +} + +func (c *kubeletClient) UnregisterNode(nodeName string) error { + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s:8001/api/v1/nodes/unregister", c.apiServerIP), nil) + if err != nil { + return err + } + query := req.URL.Query() + query.Add("nodename", nodeName) + req.URL.RawQuery = query.Encode() + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + var baseResponse v1.BaseResponse[interface{}] + err = json.Unmarshal(body, &baseResponse) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("unregister node failed, error: %s", baseResponse.Error) + } + return nil +} diff --git a/pkg/kubelet/client/client_test.go b/pkg/kubelet/client/client_test.go new file mode 100644 index 0000000..461c29d --- /dev/null +++ b/pkg/kubelet/client/client_test.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + "testing" +) + +const ( + apiServerIP = "10.119.12.123" +) + +func TestKubeletClient_GetPodsByNodeName(t *testing.T) { + kubeClient := NewKubeletClient(apiServerIP) + pods, err := kubeClient.GetPodsByNodeName("node-0") + if err != nil { + // CI/CD无法访问到api server + t.Log(err) + return + } + podsJson, err := json.Marshal(pods) + if err != nil { + t.Fatal(err) + } + t.Log(string(podsJson)) +} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go new file mode 100644 index 0000000..a485c23 --- /dev/null +++ b/pkg/kubelet/kubelet.go @@ -0,0 +1,280 @@ +package kubelet + +import ( + "context" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubelet/client" + kubemetrics "minikubernetes/pkg/kubelet/metrics" + "minikubernetes/pkg/kubelet/pleg" + kubepod "minikubernetes/pkg/kubelet/pod" + "minikubernetes/pkg/kubelet/runtime" + "minikubernetes/pkg/kubelet/types" + "minikubernetes/pkg/kubelet/utils" + "sync" + "time" +) + +type Kubelet struct { + nodeName string + podManger kubepod.Manager + podWorkers PodWorkers + // collector + pleg pleg.PodLifecycleEventGenerator + kubeClient client.KubeletClient + runtimeManager runtime.RuntimeManager + cache runtime.Cache + nameserverIP string + + // metrics collector + metricsCollector kubemetrics.MetricsCollector +} + +func NewMainKubelet(nodeName string, kubeClient client.KubeletClient) (*Kubelet, error) { + kl := &Kubelet{} + + nameserverIP, err := runtime.GetContainerBridgeIP("coredns") + if err != nil { + return nil, err + } + kl.nameserverIP = nameserverIP + + kl.nodeName = nodeName + kl.podManger = kubepod.NewPodManager() + kl.kubeClient = kubeClient + kl.runtimeManager = runtime.NewRuntimeManager(nameserverIP) + kl.cache = runtime.NewCache() + kl.pleg = pleg.NewPLEG(kl.runtimeManager, kl.cache) + kl.podWorkers = NewPodWorkers(kl, kl.cache) + + kl.metricsCollector = kubemetrics.NewMetricsCollector() + kl.metricsCollector.Run() + log.Println("Kubelet initialized.") + return kl, nil +} + +func (kl *Kubelet) Run(ctx context.Context, wg *sync.WaitGroup, updates <-chan types.PodUpdate) { + log.Println("Kubelet running...") + // TODO 启动各种组件 + kl.pleg.Start() + // kl.statusManager.Start() + log.Println("Managers started.") + kl.syncLoop(ctx, wg, updates) +} + +func (kl *Kubelet) syncLoop(ctx context.Context, wg *sync.WaitGroup, updates <-chan types.PodUpdate) { + defer wg.Done() + log.Println("Sync loop started.") + syncTicker := time.NewTicker(time.Second) + defer syncTicker.Stop() + plegCh := kl.pleg.Watch() + for { + if !kl.syncLoopIteration(ctx, updates, syncTicker.C, plegCh) { + break + } + } + log.Println("Sync loop ended.") + kl.DoCleanUp() +} + +func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan types.PodUpdate, syncCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool { + // TODO 加入plegCh,syncCh等 + select { + case update, ok := <-configCh: + if !ok { + // 意料之外的通道关闭 + log.Println("Config channel is closed") + return false + } + switch update.Op { + case types.ADD: + kl.HandlePodAdditions(update.Pods) + case types.DELETE: + kl.HandlePodDeletions(update.Pods) + default: + log.Printf("Type %v is not implemented.\n", update.Op) + } + case e := <-plegCh: + log.Printf("PLEG event: pod %v --- %v.\n", e.PodId, e.Type) + pod, ok := kl.podManger.GetPodByUid(e.PodId) + if !ok { + log.Printf("Pod %v not found.\n", e.PodId) + return true + } + kl.HandlePodLifecycleEvent(pod, e) + case <-syncCh: + // TODO 定时同步Pod信息到metrics collector + allPods, err := kl.runtimeManager.GetAllPods() + // log.Printf("allPods: %v\n", allPods) + if err != nil { + log.Printf("Failed to get all pods: %v\n", err) + return true + } + // 由allPods取到所有的status + var podStatusList []*runtime.PodStatus + for _, pod := range allPods { + // log.Printf("pod: %v\n", pod) + podStatus, err := kl.runtimeManager.GetPodStatus(pod.ID, pod.Name, pod.Namespace) + if err != nil { + log.Printf("Failed to get pod %v status: %v\n", pod.Name, err) + continue + } + podStatusList = append(podStatusList, podStatus) + } + + // 调用metrics collector的接口 + kl.metricsCollector.SetPodInfo(podStatusList) + + case <-ctx.Done(): + // 人为停止 + return false + } + return true +} + +func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { + log.Println("Handling pod additions...") + utils.SortPodsByCreationTime(pods) + for _, pod := range pods { + // log.Printf("new pod %v: %v.\n", i, pod.Name) + kl.podManger.UpdatePod(pod) + // TODO 检查pod是否可以被admit + kl.podWorkers.UpdatePod(pod, types.SyncPodCreate) + } +} + +func (kl *Kubelet) HandlePodDeletions(pods []*v1.Pod) { + log.Println("Handling pod deletions...") + for i, pod := range pods { + log.Printf("deleted pod %v: %v.\n", i, pod.Name) + kl.podManger.DeletePod(pod) + kl.podWorkers.UpdatePod(pod, types.SyncPodKill) + } +} + +func (kl *Kubelet) HandlePodLifecycleEvent(pod *v1.Pod, event *pleg.PodLifecycleEvent) { + log.Println("Handling pod lifecycle events...") + //if event.Type == pleg.ContainerRemoved { + // if pod.Spec.RestartPolicy == v1.RestartPolicyAlways || pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { + // kl.podWorkers.UpdatePod(pod, types.SyncPodRecreate) + // return + // } + //} + if event.Type == pleg.ContainerDied && pod.Spec.RestartPolicy == v1.RestartPolicyAlways { + kl.podWorkers.UpdatePod(pod, types.SyncPodRecreate) + return + } + // TODO 实现OnFailure策略 + kl.podWorkers.UpdatePod(pod, types.SyncPodSync) +} + +func (kl *Kubelet) DoCleanUp() { + log.Println("Kubelet cleanup started.") + // TODO 停止各种组件 + kl.pleg.Stop() + // unregister node + err := kl.kubeClient.UnregisterNode(kl.nodeName) + if err != nil { + log.Printf("Failed to unregister node %v: %v\n", kl.nodeName, err) + } + log.Println("Kubelet cleanup ended.") +} + +func (kl *Kubelet) SyncPod(pod *v1.Pod, syncPodType types.SyncPodType, podStatus *runtime.PodStatus) { + switch syncPodType { + case types.SyncPodCreate: + log.Printf("Creating pod %v using container manager.\n", pod.Name) + err := kl.runtimeManager.AddPod(pod) + if err != nil { + log.Printf("Failed to create pod %v: %v\n", pod.Name, err) + return + } + log.Printf("Pod %v created.\n", pod.Name) + case types.SyncPodSync: + log.Printf("Syncing pod %v\n", pod.Name) + if podStatus == nil { + log.Printf("Pod %v status is nil.\n", pod.Name) + return + } + apiStatus := kl.computeApiStatus(pod, podStatus) + if apiStatus == nil { + // log.Printf("Pod %v status is unknown.\n", pod.Name) + return + } + err := kl.kubeClient.UpdatePodStatus(pod, apiStatus) + log.Printf("Pod %v syncing to apiserver. Phase: %v\n", pod.Name, apiStatus.Phase) + if err != nil { + log.Printf("Failed to update pod %v status to api server: %v\n", pod.Name, err) + return + } + log.Printf("Pod %v synced\n", pod.Name) + case types.SyncPodKill: + log.Printf("Killing pod %v\n", pod.Name) + err := kl.runtimeManager.DeletePod(pod.UID) + if err != nil { + log.Printf("Failed to kill pod %v: %v\n", pod.Name, err) + return + } + log.Printf("Pod %v killed.\n", pod.Name) + case types.SyncPodRecreate: + log.Printf("Recreating pod %v\n", pod.Name) + err := kl.runtimeManager.RestartPod(pod) + if err != nil { + log.Printf("Failed to recreate pod %v: %v\n", pod.Name, err) + return + } + log.Printf("Pod %v recreated.\n", pod.Name) + default: + log.Printf("SyncPodType %v is not implemented.\n", syncPodType) + } +} + +func (kl *Kubelet) computeApiStatus(pod *v1.Pod, podStatus *runtime.PodStatus) *v1.PodStatus { + running := 0 + exited := 0 + succeeded := 0 + failed := 0 + + if len(podStatus.ContainerStatuses) != len(pod.Spec.Containers) { + log.Printf("Pod %v has untracked container!\n", podStatus.Name) + return nil + } + + podIP := "" + if podStatus.IPs != nil && len(podStatus.IPs) > 0 { + podIP = podStatus.IPs[0] + } + + for _, containerStatus := range podStatus.ContainerStatuses { + if containerStatus.State == runtime.ContainerStateRunning { + running++ + } else if containerStatus.State == runtime.ContainerStateExited { + exited++ + if containerStatus.ExitCode == 0 { + succeeded++ + } else { + failed++ + } + } + } + if running != 0 { + return &v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: podIP, + } + } else if exited == len(podStatus.ContainerStatuses) { + if failed > 0 { + return &v1.PodStatus{ + Phase: v1.PodFailed, + PodIP: podIP, + } + } + return &v1.PodStatus{ + Phase: v1.PodSucceeded, + PodIP: podIP, + } + } else { + log.Printf("Pod %v status unknown!\n", podStatus.Name) + return nil + } +} diff --git a/pkg/kubelet/metrics/collector.go b/pkg/kubelet/metrics/collector.go new file mode 100644 index 0000000..5bdcfc3 --- /dev/null +++ b/pkg/kubelet/metrics/collector.go @@ -0,0 +1,317 @@ +package metrics // test + +import ( + "bytes" + "fmt" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "minikubernetes/pkg/kubelet/runtime" + "os/exec" + "strings" + "sync" + "time" + + CAdvClient "github.com/google/cadvisor/client/v2" + Info "github.com/google/cadvisor/info/v2" +) + +// func main() { +// mc, err := NewMetricsCollector() +// if err != nil { +// fmt.Printf("new metrics collector err: %v", err.Error()) +// } +// mc.Run() +// } + +var mdebug bool = false + +type metricsCollector struct { + // 从cadvisor中获取容器指标的操作者 + ip string + port int + cad_client *CAdvClient.Client + + // kubeclient,和apiserver的stats接口交互 + kube_cli kubeclient.Client + // podStat得加锁 + podStatsLock sync.Mutex + podStats []*runtime.PodStatus + conLastTime map[string]time.Time +} + +type MetricsCollector interface { + // MetricsCollector接口 + SetPodInfo(podStats []*runtime.PodStatus) + Run() +} + +func NewMetricsCollector() MetricsCollector { + // 创建一个新的MetricsCollector + var newMetricsCollector MetricsCollector + newMetricsCollector = &metricsCollector{ + ip: "127.0.0.1", + port: 8090, + } + return newMetricsCollector +} +func (mc *metricsCollector) tryStartCAdvisor() { + cmd := exec.Command("docker", "run", + "--volume=/:/rootfs:ro", + "--volume=/var/run:/var/run:rw", + "--volume=/sys:/sys:ro", + "--volume=/var/lib/docker/:/var/lib/docker:ro", + "--publish="+fmt.Sprint(mc.port)+":8080", + "--detach=true", + "--name=cadvisor", + "google/cadvisor:latest") + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + log.Printf("start cadvisor err: %v, stderr: %v", err.Error(), stderr.String()) + } +} + +func (mc *metricsCollector) Run() { + // 运行MetricsCollector + mc.init() + + const interval int = 5 + + // 定期检查cadvisor是否存活 + go func() { + syncTicker := time.NewTicker(time.Duration(interval) * time.Second) + defer syncTicker.Stop() + defer log.Printf("stop metrics collector") + for { + select { + case <-syncTicker.C: + err := mc.CheckAlive() + if err != nil { + // 尝试启动cadvisor + log.Printf(err.Error()) + mc.tryStartCAdvisor() + } else { + if mc.podStats != nil { + + // 获取容器指标 + curMetrics, err := mc.getMetrics() + if err != nil { + log.Printf("get metrics err: %v", err.Error()) + } + if mdebug { + log.Printf("metrics to be uploaded: %v", curMetrics) + } + // 上传指标 + err = mc.uploadMetrics(curMetrics) + if err != nil { + log.Printf("upload metrics err: %v", err.Error()) + } + } + } + } + } + }() + +} + +func (mc *metricsCollector) init() { + log.Printf("init metrics collector") + err := mc.CheckAlive() + if err != nil { + // 尝试启动cadvisor + mc.tryStartCAdvisor() + } + // 初始化MetricsCollector + URLStr := fmt.Sprintf("http://%s:%d", mc.ip, mc.port) + client, err := CAdvClient.NewClient(URLStr) + mc.cad_client = client + if err != nil { + log.Printf("init cadvisor client err: %v", err.Error()) + } + + mc.kube_cli = kubeclient.NewClient("192.168.1.10") + + mc.conLastTime = make(map[string]time.Time) + mc.podStats = nil + +} + +func (mc *metricsCollector) SetPodInfo(podStats []*runtime.PodStatus) { + // 由kubelet设置pod信息 + // 注意是Pod 到 容器非人的字符串 的映射 + if mdebug { + log.Printf("set pod info: %v", podStats) + } + mc.podStatsLock.Lock() + if mc.podStats == nil { + mc.podStats = make([]*runtime.PodStatus, 0) + } + mc.podStats = podStats + mc.podStatsLock.Unlock() +} + +// containername/containerid,type, timeinterval +func (mc *metricsCollector) getMetrics() ([]*v1.PodRawMetrics, error) { + // 指定字段从cadvisor中获取容器指标 + // Count是获取的 + const NANOCORES float32 = 1e9 + const MEGABYTE float32 = 1024 * 1024 + var interval int = 10 + 1 // 第一个必然是nil + + request := Info.RequestOptions{ + IdType: Info.TypeName, + Count: interval, + Recursive: false, + MaxAge: new(time.Duration), + } + *request.MaxAge = time.Duration(0) + + // 保存所有的pod指标 + var AllPodMetrics []*v1.PodRawMetrics + + mc.podStatsLock.Lock() + // 对于每一个Pod:都得获取指标 + for _, podStat := range mc.podStats { + var podMetrics v1.PodRawMetrics + podId := podStat.ID + podMetrics.UID = podId + podMetrics.ContainerInfo = make(map[string][]v1.PodRawMetricsItem) + + // 对于每一个容器 + for _, container := range podStat.ContainerStatuses { + // 只统计running的容器 + if container.State != runtime.ContainerStateRunning { + continue + } + // 容器的非人的字符串 + var dockerId string = container.ID + // 如果容器对应的map不存在,则创建 + if _, ok := podMetrics.ContainerInfo[dockerId]; !ok { + podMetrics.ContainerInfo[dockerId] = make([]v1.PodRawMetricsItem, 0) + } + var PrefixDockerId string = fmt.Sprintf("/docker/%s", container.ID) + MapPfxIdStats, err := mc.cad_client.Stats(PrefixDockerId, &request) + + if err != nil { + log.Printf("get container info err: %v", err.Error()) + } + // fmt.Printf("container info: %v\n", sInfo) + // Map_PfxId_Stats的key是PrefixDockerId + // 这样就可以获得cpu瞬时的占用率了,total_usage以nano core为单位. + for _, item := range MapPfxIdStats[PrefixDockerId].Stats { + var cMetricItem v1.PodRawMetricsItem + // 第一个item必然是nil + if item.CpuInst == nil || item.Memory == nil { + continue + } + //如果比上次时间戳还小,就不用上传 + if lastTime, ok := mc.conLastTime[dockerId]; ok { + if !item.Timestamp.After(lastTime) { + if mdebug { + fmt.Printf("Timestamp is not later than last time, no update\n") + } + continue + } + } + // 更新时间戳 + if mdebug { + fmt.Printf("update Timestamp\n") + } + mc.conLastTime[dockerId] = item.Timestamp + + cMetricItem.TimeStamp = item.Timestamp + + { + total_usage := item.CpuInst.Usage.Total + + // 需要转化成利用率,有点反直觉,但是不用除以物理核心数 + cpu_usage := (float32(total_usage) / NANOCORES) + cMetricItem.CPUUsage = cpu_usage + // fmt.Printf("container stats:%v\n", cpu_usage) + + } + + { + memory_usage := item.Memory.Usage + // 以mb计量 + mb_usage := float32(memory_usage) / MEGABYTE + cMetricItem.MemoryUsage = mb_usage + // fmt.Printf("container memory stats:%v\n", mb_usage) + } + + // TODO: 增加对于网络占用率的统计 + // 合理:容器的网络占用率[积累量] + // for _, item := range sInfo[dockerId].Stats { + // if item.Network != nil { + // // 以byte为单位 + // rx_bytes := item.Network.Interfaces[0].RxBytes // 接受的byte数量 + // tx_bytes := item.Network.Interfaces[0].TxBytes // 传出去的byte数量 + // fmt.Printf("container network stats: rx:%v, tx:%v\n", rx_bytes, tx_bytes) + // } + // } + + // TODO: 增加对于容器的disk io占用率的统计 + // 待测量 + // for _, item := range sInfo[dockerId].Stats { + // if item.DiskIo != nil { + // // 以byte为单位 + // read_bytes := item.DiskIo.IoServiceBytes[0].Stats + // write_bytes := item.DiskIo.IoServiceBytes[1].Stats + // fmt.Printf("container disk io stats: read:%v, write:%v\n", read_bytes, write_bytes) + // } + // } + podMetrics.ContainerInfo[dockerId] = append(podMetrics.ContainerInfo[dockerId], cMetricItem) + } + + } + // var dockerId string = "/docker/8416a8e063e9a30961be4f88d5bfbad3b8b6911fc4a6c36ec7bd73729ecd5e38" + + AllPodMetrics = append(AllPodMetrics, &podMetrics) + } + mc.podStatsLock.Unlock() + return AllPodMetrics, nil +} + +func (mc *metricsCollector) CheckAlive() error { + // 检查cadvisor是否存活 + cmd := exec.Command("docker", "ps") + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + // cadvisor未启动 + return fmt.Errorf("cadvisor err: %v, stderr: %v", err.Error(), stderr.String()) + } + + // 检查是否有cadvisor 字段 + if !strings.Contains(stdout.String(), "cadvisor") { + return fmt.Errorf("cadvisor not found in docker ps") + } + + // 检查是否能够获取端口信息 + cmd = exec.Command("nc", "-vz", mc.ip, fmt.Sprintf("%d", mc.port)) + stdout.Reset() + stderr.Reset() + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + if err != nil { + return fmt.Errorf("ping cadvisor err: %v, %v , %v", err.Error(), stderr.String(), stdout.String()) + } + return nil +} + +func (mc *metricsCollector) uploadMetrics(metrics []*v1.PodRawMetrics) error { + // 将容器指标上传到apiserver + // 调用kubeclient中的上传函数 + err := mc.kube_cli.UploadPodMetrics(metrics) + if err != nil { + return err + } + return nil +} diff --git a/pkg/kubelet/network/weave_adapt.go b/pkg/kubelet/network/weave_adapt.go new file mode 100644 index 0000000..83ba962 --- /dev/null +++ b/pkg/kubelet/network/weave_adapt.go @@ -0,0 +1,43 @@ +package network + +import ( + "bytes" + "fmt" + "net" + "os/exec" + "strings" +) + +func Attach(containerId string) (string, error) { + cmd := exec.Command("weave", "attach", containerId) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("weave attach err: %v, stderr: %v", err.Error(), stderr.String()) + } + IPString := stdout.String() + IPString = strings.TrimSpace(IPString) + if address := net.ParseIP(IPString); address == nil { + return "", fmt.Errorf("weave returns invalid IP address: %s", IPString) + } + return IPString, nil +} + +func Detach(containerId string) error { + cmd := exec.Command("weave", "detach", containerId) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return fmt.Errorf("weave detach err: %v, stderr: %v", err.Error(), stderr.String()) + } + IPString := stdout.String() + IPString = strings.TrimSpace(IPString) + if address := net.ParseIP(IPString); address == nil { + return fmt.Errorf("weave detach failed, expect detached IP, actual: %s", IPString) + } + return nil +} diff --git a/pkg/kubelet/pleg/pleg.go b/pkg/kubelet/pleg/pleg.go new file mode 100644 index 0000000..fdc0686 --- /dev/null +++ b/pkg/kubelet/pleg/pleg.go @@ -0,0 +1,291 @@ +package pleg + +import ( + "context" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubelet/runtime" + "sync" + "time" +) + +type PodLifeCycleEventType string + +const ( + // ContainerStarted any -> running + ContainerStarted PodLifeCycleEventType = "ContainerStarted" + // ContainerDied any -> exited + ContainerDied PodLifeCycleEventType = "ContainerDied" + // ContainerRemoved exited -> non-existent + ContainerRemoved PodLifeCycleEventType = "ContainerRemoved" + // ContainerChanged any -> unknown + ContainerChanged PodLifeCycleEventType = "ContainerChanged" +) + +const ( + RelistPeriod time.Duration = 5 * time.Second +) + +type PodLifecycleEvent struct { + // Pod ID + PodId v1.UID + // 事件类型 + Type PodLifeCycleEventType +} + +type PodLifecycleEventGenerator interface { + Start() + Watch() chan *PodLifecycleEvent + Stop() +} + +type pleg struct { + runtimeManager runtime.RuntimeManager + eventCh chan *PodLifecycleEvent + podRecords map[v1.UID]*podRecord + cache runtime.Cache + + // 用于同步停止 + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + cacheLock sync.Mutex +} + +type podRecord struct { + old *runtime.Pod + current *runtime.Pod +} + +func NewPLEG(runtimeManager runtime.RuntimeManager, cache runtime.Cache) PodLifecycleEventGenerator { + ctx, cancel := context.WithCancel(context.Background()) + return &pleg{ + runtimeManager: runtimeManager, + eventCh: make(chan *PodLifecycleEvent, 100), + podRecords: make(map[v1.UID]*podRecord), + cache: cache, + ctx: ctx, + cancel: cancel, + } +} + +func (p *pleg) Start() { + log.Printf("Starting PLEG...") + p.wg.Add(1) + go func() { + timer := time.NewTimer(RelistPeriod) + isStopped := false + for !isStopped { + select { + case <-timer.C: + p.Relist() + timer.Reset(RelistPeriod) + case <-p.ctx.Done(): + isStopped = true + } + } + log.Printf("PLEG stopped.") + p.wg.Done() + }() +} + +func (p *pleg) Watch() chan *PodLifecycleEvent { + return p.eventCh +} + +func (p *pleg) Relist() { + log.Printf("PLEG: Relisting...") + // TODO: Relist + ts := time.Now() + + // 获取runtime pods + pods, err := p.runtimeManager.GetAllPods() + if err != nil { + log.Printf("PLEG: Failed to get pods: %v", err) + return + } + //var pods []*runtime.Pod + p.setCurrentPods(pods) + + eventMap := make(map[v1.UID][]*PodLifecycleEvent) + for id := range p.podRecords { + oldPod := p.getOldPodById(id) + currentPod := p.getCurrentPodById(id) + events := computeEvents(oldPod, currentPod) + // 若一个pod没有任何事件,此次relist无需处理 + if len(events) != 0 { + eventMap[id] = events + } + } + + for id, events := range eventMap { + // 更新全局缓存 + currentPod := p.getCurrentPodById(id) + if err, _ := p.updateCache(currentPod, id); err != nil { + log.Printf("PLEG: Failed to update cache for pod %v: %v", id, err) + // TODO reinspect in next relist + continue + } + // 把current移入old,准备下一次relist + p.shiftCurrentToOldById(id) + // 发送事件 + for i := range events { + if events[i].Type == ContainerChanged { + continue + } + p.eventCh <- events[i] + } + } + p.cache.UpdateTime(ts) + log.Printf("PLEG: Relisting done.") +} + +func (p *pleg) Stop() { + log.Printf("Stopping PLEG...") + p.cancel() + p.wg.Wait() +} + +func (p *pleg) getOldPodById(id v1.UID) *runtime.Pod { + if record, ok := p.podRecords[id]; ok { + return record.old + } + return nil +} + +func (p *pleg) getCurrentPodById(id v1.UID) *runtime.Pod { + if record, ok := p.podRecords[id]; ok { + return record.current + } + return nil +} + +func (p *pleg) setCurrentPods(pods []*runtime.Pod) { + for id := range p.podRecords { + p.podRecords[id].current = nil + } + for _, pod := range pods { + id := pod.ID + record, ok := p.podRecords[id] + if !ok { + record = &podRecord{} + p.podRecords[id] = record + } + record.current = pod + } +} + +func computeEvents(oldPod, currentPod *runtime.Pod) []*PodLifecycleEvent { + var id v1.UID + if oldPod != nil { + id = oldPod.ID + } else if currentPod != nil { + id = currentPod.ID + } + + // 获取新旧pod所有容器 + cidMap := make(map[string]struct{}) + if oldPod != nil { + for _, c := range oldPod.Containers { + cidMap[c.ID] = struct{}{} + } + } + if currentPod != nil { + for _, c := range currentPod.Containers { + cidMap[c.ID] = struct{}{} + } + } + + events := make([]*PodLifecycleEvent, 0) + for cid := range cidMap { + oldState := getPlegContainerState(oldPod, &cid) + currentState := getPlegContainerState(currentPod, &cid) + if oldState == currentState { + continue + } + switch currentState { + case plegContainerRunning: + events = append(events, &PodLifecycleEvent{PodId: id, Type: ContainerStarted}) + case plegContainerExited: + events = append(events, &PodLifecycleEvent{PodId: id, Type: ContainerDied}) + case plegContainerUnknown: + events = append(events, &PodLifecycleEvent{PodId: id, Type: ContainerChanged}) + case plegContainerNonExistent: + if oldState == plegContainerExited { + events = append(events, &PodLifecycleEvent{PodId: id, Type: ContainerRemoved}) + } else { + events = append(events, + &PodLifecycleEvent{PodId: id, Type: ContainerDied}, + &PodLifecycleEvent{PodId: id, Type: ContainerRemoved}) + } + } + } + + return events +} + +type plegContainerState string + +const ( + plegContainerRunning plegContainerState = "running" + plegContainerExited plegContainerState = "exited" + plegContainerUnknown plegContainerState = "unknown" + plegContainerNonExistent plegContainerState = "non-existent" +) + +func getPlegContainerState(pod *runtime.Pod, cid *string) plegContainerState { + if pod == nil { + return plegContainerNonExistent + } + for _, c := range pod.Containers { + if c.ID == *cid { + switch c.State { + case runtime.ContainerStateCreated: + return plegContainerUnknown + case runtime.ContainerStateRunning: + return plegContainerRunning + case runtime.ContainerStateExited: + return plegContainerExited + case runtime.ContainerStateUnknown: + return plegContainerUnknown + } + } + } + return plegContainerNonExistent +} + +func (p *pleg) updateCache(currentPod *runtime.Pod, pid v1.UID) (error, bool) { + // Relist与UpdateCache接口存在并发调用 + p.cacheLock.Lock() + defer p.cacheLock.Unlock() + + // pid对应的pod存在事件,同时current为nil,直接删除 + if currentPod == nil { + p.cache.Delete(pid) + return nil, true + } + ts := time.Now() + status, err := p.runtimeManager.GetPodStatus(currentPod.ID, currentPod.Name, currentPod.Namespace) + //var status *runtime.PodStatus + //var err error + // if err == nil { + // 源码这里为status保留了旧ip地址,暂时不知道为什么,先不管 + // status.IPs = p.getPodIPs(pid, status) + // } + return err, p.cache.Set(currentPod.ID, status, err, ts) +} + +func (p *pleg) shiftCurrentToOldById(id v1.UID) { + record, ok := p.podRecords[id] + if !ok { + return + } + // 若current为nil,shift后就是两个nil,直接删除 + if record.current == nil { + delete(p.podRecords, id) + return + } + record.old = record.current + record.current = nil +} diff --git a/pkg/kubelet/pod/pod_manager.go b/pkg/kubelet/pod/pod_manager.go new file mode 100644 index 0000000..1a8dc32 --- /dev/null +++ b/pkg/kubelet/pod/pod_manager.go @@ -0,0 +1,66 @@ +package pod + +import ( + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubelet/utils" + "sync" +) + +type Manager interface { + GetPodByFullName(podFullName string) (*v1.Pod, bool) + GetPodByUid(podUid v1.UID) (*v1.Pod, bool) + GetPods() []*v1.Pod + UpdatePod(pod *v1.Pod) + DeletePod(pod *v1.Pod) +} + +type podManager struct { + lock sync.RWMutex + podUidMap map[v1.UID]*v1.Pod + podFullNameMap map[string]*v1.Pod +} + +func NewPodManager() Manager { + return &podManager{ + podUidMap: make(map[v1.UID]*v1.Pod), + podFullNameMap: make(map[string]*v1.Pod), + } +} + +func (pm *podManager) GetPodByFullName(podFullName string) (*v1.Pod, bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + pod, ok := pm.podFullNameMap[podFullName] + return pod, ok +} + +func (pm *podManager) GetPodByUid(podUid v1.UID) (*v1.Pod, bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + pod, ok := pm.podUidMap[podUid] + return pod, ok +} + +func (pm *podManager) GetPods() []*v1.Pod { + pm.lock.RLock() + defer pm.lock.RUnlock() + pods := make([]*v1.Pod, 0, len(pm.podUidMap)) + for _, pod := range pm.podUidMap { + pods = append(pods, pod) + } + return pods +} + +func (pm *podManager) UpdatePod(pod *v1.Pod) { + pm.lock.Lock() + defer pm.lock.Unlock() + pm.podUidMap[pod.ObjectMeta.UID] = pod + pm.podFullNameMap[utils.GetPodFullName(pod)] = pod +} + +func (pm *podManager) DeletePod(pod *v1.Pod) { + pm.lock.Lock() + defer pm.lock.Unlock() + delete(pm.podUidMap, pod.ObjectMeta.UID) + delete(pm.podFullNameMap, utils.GetPodFullName(pod)) +} diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go new file mode 100644 index 0000000..d34eca1 --- /dev/null +++ b/pkg/kubelet/pod_workers.go @@ -0,0 +1,102 @@ +package kubelet + +import ( + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubelet/runtime" + "minikubernetes/pkg/kubelet/types" + "sync" + "time" +) + +type PodWorkerState string + +const ( + SyncPod PodWorkerState = "SyncPod" + TerminatingPod PodWorkerState = "TerminatingPod" + TerminatedPod PodWorkerState = "TerminatedPod" +) + +type UpdatePodOptions struct { + SyncPodType types.SyncPodType + Pod *v1.Pod +} + +type PodWorkers interface { + UpdatePod(pod *v1.Pod, syncPodType types.SyncPodType) +} + +type PodSyncer interface { + SyncPod(pod *v1.Pod, syncPodType types.SyncPodType, podStatus *runtime.PodStatus) +} + +type podWorkers struct { + lock sync.Mutex + podSyncer PodSyncer + podUpdates map[v1.UID]chan UpdatePodOptions + cache runtime.Cache +} + +func NewPodWorkers(podSyncer PodSyncer, cache runtime.Cache) PodWorkers { + return &podWorkers{ + podSyncer: podSyncer, + podUpdates: make(map[v1.UID]chan UpdatePodOptions), + cache: cache, + } +} + +func (pw *podWorkers) UpdatePod(pod *v1.Pod, syncPodType types.SyncPodType) { + pw.lock.Lock() + defer pw.lock.Unlock() + if updateCh, ok := pw.podUpdates[pod.ObjectMeta.UID]; !ok { + if syncPodType != types.SyncPodCreate { + log.Printf("Pod worker goroutine for pod %s does not exist.", pod.ObjectMeta.UID) + return + } + updates := make(chan UpdatePodOptions, 10) + pw.podUpdates[pod.ObjectMeta.UID] = updates + go pw.workerLoop(updates) + updates <- UpdatePodOptions{ + SyncPodType: syncPodType, + Pod: pod, + } + } else { + if syncPodType == types.SyncPodCreate { + log.Printf("Pod worker goroutine for pod %s already exists.", pod.ObjectMeta.UID) + return + } + if syncPodType == types.SyncPodSync || syncPodType == types.SyncPodKill || syncPodType == types.SyncPodRecreate { + updateCh <- UpdatePodOptions{ + SyncPodType: syncPodType, + Pod: pod, + } + } + } +} + +func (pw *podWorkers) workerLoop(updates <-chan UpdatePodOptions) { + log.Println("Pod worker started.") + var lastSyncTime time.Time + for update := range updates { + if update.SyncPodType == types.SyncPodCreate { + pw.podSyncer.SyncPod(update.Pod, update.SyncPodType, nil) + } else if update.SyncPodType == types.SyncPodSync || update.SyncPodType == types.SyncPodRecreate { + status, err := pw.cache.GetNewerThan(update.Pod.ObjectMeta.UID, lastSyncTime) + if err != nil { + log.Printf("Failed to get pod status for pod %s: %v", update.Pod.ObjectMeta.UID, err) + continue + } + pw.podSyncer.SyncPod(update.Pod, update.SyncPodType, status) + } else if update.SyncPodType == types.SyncPodKill { + pw.podSyncer.SyncPod(update.Pod, update.SyncPodType, nil) + pw.lock.Lock() + delete(pw.podUpdates, update.Pod.ObjectMeta.UID) + pw.lock.Unlock() + break + } else { + continue + } + lastSyncTime = time.Now() + } + log.Println("Pod worker stopped.") +} diff --git a/pkg/kubelet/runtime/cache.go b/pkg/kubelet/runtime/cache.go new file mode 100644 index 0000000..6d4e1dd --- /dev/null +++ b/pkg/kubelet/runtime/cache.go @@ -0,0 +1,100 @@ +package runtime + +import ( + "fmt" + v1 "minikubernetes/pkg/api/v1" + "sync" + "time" +) + +type Cache interface { + // 非阻塞获取 + Get(v1.UID) (*PodStatus, error) + // 如果数据比缓存新,则更新缓存 + Set(v1.UID, *PodStatus, error, time.Time) (updated bool) + // 阻塞获取 + GetNewerThan(v1.UID, time.Time) (*PodStatus, error) + Delete(v1.UID) + UpdateTime(time.Time) +} + +type cacheData struct { + // Pod的状态 + status *PodStatus + // 获取Pod状态时遇到的错误 + err error + // 最后一次更新时间 + modified time.Time +} + +type cache struct { + lock sync.RWMutex + pods map[v1.UID]*cacheData + // 所有缓存内容至少比timestamp新。 + timestamp *time.Time +} + +func NewCache() Cache { + return &cache{pods: map[v1.UID]*cacheData{}} +} + +func (c *cache) Get(id v1.UID) (*PodStatus, error) { + c.lock.RLock() + defer c.lock.RUnlock() + data, ok := c.pods[id] + if !ok { + return nil, fmt.Errorf("cache: pod %v not found", id) + } + return data.status, data.err +} + +func (c *cache) GetNewerThan(id v1.UID, minTime time.Time) (*PodStatus, error) { + c.lock.RLock() + maxRetry := 3 + for i := 0; i < maxRetry; i++ { + data, ok := c.pods[id] + isGlobalNewer := c.timestamp != nil && (c.timestamp.After(minTime) || c.timestamp.Equal(minTime)) + if !ok && isGlobalNewer { + // should not be here + c.lock.RUnlock() + return nil, fmt.Errorf("cache: global timestamp is newer but pod %v not exist", id) + } + if ok && (data.modified.After(minTime) || data.modified.Equal(minTime)) { + c.lock.RUnlock() + return data.status, data.err + } + // 可以期待之后会有更新 + c.lock.RUnlock() + if i == maxRetry-1 { + break + } + time.Sleep(1 * time.Second) + c.lock.RLock() + } + // BUGFIX: 一次Lifecycle可能包含多个容器事件,但一个pod只做一次update cache, + // 第一个事件被worker处理后,worker时间戳新于cache时间戳,后续事件处理时到这里全部阻塞 + // 若cache后面不会被更新(例如pod long running),则会永久阻塞 + return nil, fmt.Errorf("cache: pod %v newer than %v not found after %vs", id, minTime, maxRetry-1) +} + +func (c *cache) Set(id v1.UID, status *PodStatus, err error, timestamp time.Time) (updated bool) { + c.lock.Lock() + defer c.lock.Unlock() + if data, ok := c.pods[id]; ok && data.modified.After(timestamp) { + return false + } + c.pods[id] = &cacheData{status: status, err: err, modified: timestamp} + return true +} + +func (c *cache) Delete(id v1.UID) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.pods, id) +} + +func (c *cache) UpdateTime(timestamp time.Time) { + c.lock.Lock() + defer c.lock.Unlock() + c.timestamp = ×tamp +} diff --git a/pkg/kubelet/runtime/cache_test.go b/pkg/kubelet/runtime/cache_test.go new file mode 100644 index 0000000..f302cdf --- /dev/null +++ b/pkg/kubelet/runtime/cache_test.go @@ -0,0 +1,52 @@ +package runtime + +import ( + "fmt" + v1 "minikubernetes/pkg/api/v1" + "sync" + "testing" + "time" +) + +func TestCache(t *testing.T) { + c := NewCache() + id := "test-id" + ts := time.Now() + + status, err := c.Get(v1.UID(id)) + if status != nil || err == nil { + t.Fatalf("Get() = %v, %v, want nil, non-nil", status, err) + } + + c.Set(v1.UID(id), &PodStatus{}, fmt.Errorf(""), ts) + status, err = c.Get(v1.UID(id)) + if status == nil || err == nil { + t.Fatalf("Get() = %v, %v, want non-nil, non-nil", status, err) + } + + c.UpdateTime(ts) + status, err = c.GetNewerThan(v1.UID(id), ts.Add(-time.Second)) + if status == nil || err == nil { + t.Fatalf("GetNewerThan() = %v, %v, want non-nil, non-nil", status, err) + } + + wg := sync.WaitGroup{} + wg.Add(2) + go func(wg *sync.WaitGroup) { + t.Log("GetNewerThan() start") + c.GetNewerThan(v1.UID(id), ts.Add(2*time.Second)) + t.Log("GetNewerThan() done") + wg.Done() + }(&wg) + go func(wg *sync.WaitGroup) { + t.Log("Set1 start") + c.Set(v1.UID(id), &PodStatus{}, nil, ts.Add(time.Second)) + t.Log("Set1 done") + time.Sleep(time.Second) + t.Log("Set2 start") + c.Set(v1.UID(id), &PodStatus{}, nil, ts.Add(2*time.Second)) + t.Log("Set2 done") + wg.Done() + }(&wg) + wg.Wait() +} diff --git a/pkg/kubelet/runtime/runtime.go b/pkg/kubelet/runtime/runtime.go new file mode 100644 index 0000000..4a93a7e --- /dev/null +++ b/pkg/kubelet/runtime/runtime.go @@ -0,0 +1,107 @@ +package runtime + +import ( + "context" + "fmt" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + v1 "minikubernetes/pkg/api/v1" + "time" +) + +// 底层pod表示 +type Pod struct { + // pod的UID + ID v1.UID + Name string + Namespace string + + // 纳秒 + CreatedAt uint64 + + Containers []*Container +} + +type Container struct { + // docker为container生成的id + ID string + // 和v1.Container.Name相同 + Name string + + Image string + + State ContainerState +} + +// State represents the state of a container +type ContainerState string + +const ( + ContainerStateCreated ContainerState = "created" + + ContainerStateRunning ContainerState = "running" + + ContainerStateExited ContainerState = "exited" + + ContainerStateUnknown ContainerState = "unknown" +) + +// 底层的pod状态表示 +type PodStatus struct { + // pod的UID + ID v1.UID + Name string + Namespace string + // CNI赋予的ip + IPs []string + // 所有容器状态 + ContainerStatuses []*ContainerStatus + // 生成该条status记录的时间 + TimeStamp time.Time +} + +type ContainerStatus struct { + // docker为container生成的id + ID string + Name string + State ContainerState + CreatedAt time.Time + StartedAt time.Time + FinishedAt time.Time + ExitCode int + Image string + Resources *ContainerResources +} + +type ContainerResources struct { + CPURequest string + CPULimit string + MemoryRequest string + MemoryLimit string +} + +func GetContainerBridgeIP(containerName string) (string, error) { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return "", err + } + defer cli.Close() + f := filters.NewArgs() + f.Add("name", containerName) + containers, err := cli.ContainerList(context.Background(), container.ListOptions{ + Filters: f, + }) + if err != nil { + return "", err + } + if len(containers) == 0 { + return "", fmt.Errorf("container %s not found", containerName) + } + c := containers[0] + if ep, ok := c.NetworkSettings.Networks["bridge"]; !ok { + return "", fmt.Errorf("container %s does not have bridge network", containerName) + } else { + return ep.IPAddress, nil + } +} diff --git a/pkg/kubelet/runtime/runtime_manager.go b/pkg/kubelet/runtime/runtime_manager.go new file mode 100644 index 0000000..47d0536 --- /dev/null +++ b/pkg/kubelet/runtime/runtime_manager.go @@ -0,0 +1,800 @@ +package runtime + +import ( + "context" + "fmt" + "io" + "minikubernetes/pkg/microservice/envoy" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types/mount" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + + v1 "minikubernetes/pkg/api/v1" + nw "minikubernetes/pkg/kubelet/network" +) + +type RuntimeManager interface { + AddPod(pod *v1.Pod) error + GetAllPods() ([]*Pod, error) + GetPodStatus(ID v1.UID, PodName string, PodSpace string) (*PodStatus, error) + DeletePod(ID v1.UID) error + RestartPod(pod *v1.Pod) error +} + +type runtimeManager struct { + lock sync.Mutex + IpMap map[v1.UID]string + nameserverIP string +} + +func (rm *runtimeManager) GetAllPods() ([]*Pod, error) { + rm.lock.Lock() + defer rm.lock.Unlock() + containers, err := rm.getAllContainers() + if err != nil { + // panic(err) + return nil, err + } + var ret []*Pod + + for _, container := range containers { + flag := false + tempContainer := new(Container) + tempContainer.ID = container.ID + tempContainer.Image = container.Image + tempContainer.Name = container.Labels["Name"] + rm.checkContainerState(container, tempContainer) + + podBelonged, ok := container.Labels["PodName"] + if !ok { + continue + } + + for _, podexist := range ret { + if podexist.Name == podBelonged { + //fmt.Println("yes " + podBelonged) + podexist.Containers = append(podexist.Containers, tempContainer) + flag = true + break + } + } + if !flag { + //fmt.Println("no " + podBelonged) + tempPod := new(Pod) + tempPod.Name = podBelonged + tempPod.Namespace = container.Labels["PodNamespace"] + tempPod.Containers = append(tempPod.Containers, tempContainer) + tempPod.ID = v1.UID(container.Labels["PodID"]) + ret = append(ret, tempPod) + } + + } + return ret, nil +} + +func (rm *runtimeManager) GetPodStatus(ID v1.UID, PodName string, PodSpace string) (*PodStatus, error) { + rm.lock.Lock() + defer rm.lock.Unlock() + containers, err := rm.getPodContainers(PodName) + if err != nil { + //panic(err) + return nil, err + } + podStatus := &PodStatus{ + ID: ID, + IPs: nil, + Name: PodName, + Namespace: PodSpace, + ContainerStatuses: containers, + TimeStamp: time.Now(), + } + ip, ok := rm.IpMap[ID] + if ok { + podStatus.IPs = append(podStatus.IPs, ip) + } + return podStatus, nil +} + +func (rm *runtimeManager) getPodContainers(PodName string) ([]*ContainerStatus, error) { + containers, err := rm.getAllContainers() + if err != nil { + //panic(err) + return nil, err + } + var ret []*ContainerStatus + for _, container := range containers { + tempContainer := new(ContainerStatus) + if container.Labels["PodName"] == PodName { + tempContainer.ID = container.ID + tempContainer.Image = container.Image + tempContainer.Name = container.Labels["Name"] + tempContainer.CreatedAt = time.Unix(container.Created, 0) + rm.checkContainerStatus(container, tempContainer) + + ret = append(ret, tempContainer) + } + } + return ret, nil +} + +func NewRuntimeManager(nameserverIP string) RuntimeManager { + manager := &runtimeManager{} + manager.IpMap = make(map[v1.UID]string) + manager.nameserverIP = nameserverIP + return manager +} + +func (rm *runtimeManager) AddPod(pod *v1.Pod) error { + rm.lock.Lock() + defer rm.lock.Unlock() + //PauseId, err := rm.CreatePauseContainer(pod.UID, pod.Name, pod.Namespace) + PauseId, err := rm.CreatePauseContainer(pod) + if err != nil { + //panic(err) + return err + } + + for _, c := range pod.Spec.InitContainers { + err = rm.createInitContainer(&c, PauseId) + if err != nil { + return err + } + } + + containerList := pod.Spec.Containers + for _, container := range containerList { + volumes, err := rm.createVolumeDir(pod) + if err != nil { + return err + } + _, err = rm.createContainer(&container, PauseId, pod.UID, pod.Name, pod.Namespace, volumes) + if err != nil { + //panic(err) + return err + } + } + + return nil +} + +// func (rm *runtimeManager) CreatePauseContainer(PodID v1.UID, PodName string, PodNameSpace string) (string, error) { +func (rm *runtimeManager) CreatePauseContainer(pod *v1.Pod) (string, error) { + PodID := pod.UID + PodName := pod.Name + PodNameSpace := pod.Namespace + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + //panic(err) + return "", err + } + defer cli.Close() + PauseContainerImage := "registry.aliyuncs.com/google_containers/pause:3.6" + exi, err := rm.checkImages(PauseContainerImage) + if err != nil { + //panic(err) + return "", err + } + if !exi { + //fmt.Println("yes") + reader, err := cli.ImagePull(ctx, PauseContainerImage, image.PullOptions{}) + if err != nil { + //panic(err) + return "", err + } + defer reader.Close() + io.Copy(os.Stdout, reader) + } else { + //fmt.Println("already exist") + } + label := make(map[string]string) + label["PodID"] = string(PodID) + label["PodName"] = PodName + label["PauseType"] = "pause" + label["Name"] = PodName + ":PauseContainer" + label["PodNamespace"] = PodNameSpace + resp, err := cli.ContainerCreate(ctx, &container.Config{ + Image: PauseContainerImage, + Tty: false, + Labels: label, + ExposedPorts: rm.getExposedPorts(pod.Spec.Containers), + }, &container.HostConfig{ + PortBindings: make(nat.PortMap), + DNS: []string{rm.nameserverIP}, + }, nil, nil, "") + if err != nil { + //panic(err) + return "", err + } + + if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { + //panic(err) + return "", err + } + + ip, err := nw.Attach(resp.ID) + if err != nil { + //panic(err) + return "", err + } + rm.IpMap[PodID] = ip + + return resp.ID, nil +} + +func (rm *runtimeManager) getExposedPorts(containers []v1.Container) nat.PortSet { + exposedPorts := nat.PortSet{} + for _, c := range containers { + ports := c.Ports + portKeys := make(map[string]struct{}) + for _, port := range ports { + var p string + switch port.Protocol { + case v1.ProtocolTCP: + p = "/tcp" + case v1.ProtocolUDP: + p = "/udp" + default: + p = "/tcp" + } + key := fmt.Sprint(port.ContainerPort) + p + portKeys[key] = struct{}{} + } + for key := range portKeys { + exposedPorts[nat.Port(key)] = struct{}{} + } + } + return exposedPorts +} + +func (rm *runtimeManager) getPortBindings(containers []v1.Container) nat.PortMap { + portBindings := make(nat.PortMap) + for _, c := range containers { + ports := c.Ports + for _, num := range ports { + key := nat.Port(fmt.Sprint(num.ContainerPort) + "/tcp") + portBindings[key] = []nat.PortBinding{ + { + HostIP: "0.0.0.0", + HostPort: "", + }, + } + } + } + return portBindings +} + +// 目前init container只是为了支持sidecar,简化了很多逻辑 +func (rm *runtimeManager) createInitContainer(c *v1.Container, pauseID string) error { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + defer cli.Close() + exist, err := rm.checkImages(c.Image) + if err != nil { + return err + } + if !exist { + readCloser, err := cli.ImagePull(context.Background(), c.Image, image.PullOptions{}) + if err != nil { + return err + } + // 读取pull的输出 + _, _ = io.ReadAll(readCloser) + _ = readCloser.Close() + } + hostConfig := &container.HostConfig{ + NetworkMode: container.NetworkMode("container:" + pauseID), + } + if c.SecurityContext != nil { + if c.SecurityContext.Privileged != nil && *c.SecurityContext.Privileged { + hostConfig.Privileged = *c.SecurityContext.Privileged + } + } + // run container + resp, err := cli.ContainerCreate(context.Background(), &container.Config{ + Image: c.Image, + Tty: true, + }, hostConfig, nil, nil, "") + if err != nil { + return err + } + if err = cli.ContainerStart(context.Background(), resp.ID, container.StartOptions{}); err != nil { + return err + } + // wait for init container to finish + statusCh, errCh := cli.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning) + select { + case err = <-errCh: + if err != nil { + return err + } + case status := <-statusCh: + if status.StatusCode != 0 { + return fmt.Errorf("init container exited with status %d", status.StatusCode) + } + } + // delete init container + if err = cli.ContainerRemove(context.Background(), resp.ID, container.RemoveOptions{}); err != nil { + return err + } + return nil +} + +func (rm *runtimeManager) createContainer(ct *v1.Container, PauseId string, PodID v1.UID, PodName string, PodNameSpace string, volumes map[string]string) (string, error) { + repotag := ct.Image + cmd := ct.Command + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + //panic(err) + return "", err + } + defer cli.Close() + + exi, err := rm.checkImages(repotag) + if err != nil { + //panic(err) + return "", err + } + if !exi { + //fmt.Println("yes") + reader, err := cli.ImagePull(ctx, repotag, image.PullOptions{}) + if err != nil { + //panic(err) + return "", err + } + defer reader.Close() + io.Copy(os.Stdout, reader) + } else { + //fmt.Println("already exist") + } + // + //ports := ct.Ports + //exposedPortSet := nat.PortSet{} + //portKeys := make(map[string]struct{}) + // + //for _, num := range ports { + // //fmt.Println(num.ContainerPort) + // + // key := fmt.Sprint(num.ContainerPort) + "/tcp" + // //fmt.Println(key) + // portKeys[key] = struct{}{} + //} + //for key := range portKeys { + // exposedPortSet[nat.Port(key)] = struct{}{} + //} + + pauseRef := "container:" + PauseId + label := make(map[string]string) + label["PodID"] = string(PodID) + label["PodName"] = PodName + label["Name"] = ct.Name + label["PodNamespace"] = PodNameSpace + + var mounts []mount.Mount + for _, volume := range ct.VolumeMounts { + if dir, ok := volumes[volume.Name]; !ok { + return "", fmt.Errorf("create container: volume %s not declared", volume.Name) + } else { + mounts = append(mounts, mount.Mount{ + Type: mount.TypeBind, + Source: dir, + Target: volume.MountPath, + }) + } + } + + config := &container.Config{ + Image: repotag, + Cmd: cmd, + Tty: true, + Labels: label, + } + + hostConfig := &container.HostConfig{ + NetworkMode: container.NetworkMode(pauseRef), + PidMode: container.PidMode(pauseRef), + Mounts: mounts, + } + + if ct.SecurityContext != nil { + if ct.SecurityContext.Privileged != nil && *ct.SecurityContext.Privileged { + hostConfig.Privileged = *ct.SecurityContext.Privileged + } else if ct.SecurityContext.RunAsUser != nil { + config.User = strconv.FormatInt(*ct.SecurityContext.RunAsUser, 10) + } + } + + resp, err := cli.ContainerCreate(ctx, config, hostConfig, nil, nil, "") + if err != nil { + //panic(err) + return "", err + } + + if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { + //panic(err) + return "", err + } + + // statusCh, errCh := cli.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning) + // select { + // case err := <-errCh: + // if err != nil { + // panic(err) + // } + // case <-statusCh: + // } + + //out, err := cli.ContainerLogs(ctx, resp.ID, container.LogsOptions{ShowStdout: true}) + //if err != nil { + // //panic(err) + // return "", err + //} + // + //stdcopy.StdCopy(os.Stdout, os.Stderr, out) + return resp.ID, nil +} + +func (rm *runtimeManager) checkImages(repotag string) (bool, error) { + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + //panic(err) + return false, err + } + defer cli.Close() + images, err := cli.ImageList(ctx, image.ListOptions{}) + if err != nil { + //fmt.Println("fail to get images", err) + //panic(err) + return false, err + } + //fmt.Println("Docker Images:") + for _, image := range images { + //fmt.Printf("ID: %s\n", image.ID) + //fmt.Printf("RepoTags: %v\n", image.RepoTags) + //if len(image.RepoTags) == 0 { + // continue + //} + //if image.RepoTags[0] == repotag { + // return true, nil + //} + for _, rt := range image.RepoTags { + if rt == repotag { + return true, nil + } + } + //fmt.Printf("Size: %d\n", image.Size) + //fmt.Println("------------------------") + } + return false, nil +} + +// func (rm *runtimeManager) getAllRunningContainers() ([]types.Container, error) { +// ctx := context.Background() +// cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) +// if err != nil { +// panic(err) +// } +// defer cli.Close() + +// containers, err := cli.ContainerList(ctx, container.ListOptions{}) +// if err != nil { +// panic(err) +// } + +// return containers, nil + +// } + +func (rm *runtimeManager) getAllContainers() ([]types.Container, error) { + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + //panic(err) + return nil, err + } + defer cli.Close() + + containers, err := cli.ContainerList(ctx, container.ListOptions{All: true}) + if err != nil { + //panic(err) + return nil, err + } + var ret []types.Container + for _, container := range containers { + //fmt.Println("container's state" + container.State) + //fmt.Println("container's status" + container.Status) + if _, ok := container.Labels["PauseType"]; !ok { + ret = append(ret, container) + } + } + + return ret, nil +} + +func (rm *runtimeManager) checkContainerState(ct types.Container, ct_todo *Container) { + state := ct.State + //status := ct.Status + if state == "exited" { + ct_todo.State = ContainerStateExited + return + } + if state == "running" { + ct_todo.State = ContainerStateRunning + return + } + if state == "created" { + ct_todo.State = ContainerStateCreated + return + } + ct_todo.State = ContainerStateUnknown +} + +func (rm *runtimeManager) checkContainerStatus(ct types.Container, ct_todo *ContainerStatus) { + state := ct.State + status := ct.Status + if state == "exited" { + ct_todo.State = ContainerStateExited + + left := strings.Index(status, "(") + if left == -1 { + return + } + right := strings.Index(status, ")") + if right == -1 { + return + } + strNumber := status[left+1 : right] + number, _ := strconv.Atoi(strNumber) + + ct_todo.ExitCode = number + return + } + if state == "running" { + ct_todo.State = ContainerStateRunning + return + } + if state == "created" { + ct_todo.State = ContainerStateCreated + return + } + ct_todo.State = ContainerStateUnknown +} + +func (rm *runtimeManager) DeletePod(ID v1.UID) error { + rm.lock.Lock() + defer rm.lock.Unlock() + containers, err := rm.getAllContainersIncludingPause() + if err != nil { + return err + } + for _, container := range containers { + if container.Labels["PodID"] == string(ID) { + if _, ok := container.Labels["PauseType"]; ok { + // log.Printf("Container ID: %s\n", container.ID) + err = nw.Detach(container.ID) + if err != nil { + return err + } + delete(rm.IpMap, v1.UID(container.Labels["PodID"])) + } + err = rm.deleteContainer(container) + if err != nil { + return err + } + } + //if _, ok := container.Labels["IsEnvoy"]; ok { + // if container.Labels["EnvoyPodID"] == string(ID) { + // err = rm.deleteContainer(container) + // if err != nil { + // return err + // } + // } + //} + } + return nil +} + +func (rm *runtimeManager) getAllContainersIncludingPause() ([]types.Container, error) { + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + defer cli.Close() + + containers, err := cli.ContainerList(ctx, container.ListOptions{All: true}) + if err != nil { + return nil, err + } + //for _, container := range containers { + // if _, ok := container.Labels["PauseType"]; ok { + // log.Printf("Container ID: %s\n", container.ID) + // err := nw.Detach(container.ID) + // if err != nil { + // panic(err) + // } + // delete(rm.IpMap, v1.UID(container.Labels["PodID"])) + // } + //} + + return containers, nil +} + +func (rm *runtimeManager) deleteContainer(ct types.Container) error { + ctx := context.Background() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + //panic(err) + return err + } + defer cli.Close() + noWaitTimeout := 0 + if ct.State == "running" { + if err := cli.ContainerStop(ctx, ct.ID, container.StopOptions{Timeout: &noWaitTimeout}); err != nil { + //panic(err) + return err + } + } + if err := cli.ContainerRemove(ctx, ct.ID, container.RemoveOptions{}); err != nil { + //panic(err) + return err + } + + return nil +} + +func (rm *runtimeManager) RestartPod(pod *v1.Pod) error { + //err := rm.DeletePod(pod.UID) + //if err != nil { + // return err + //} + //err = rm.AddPod(pod) + //if err != nil { + // return err + //} + //return nil + rm.lock.Lock() + defer rm.lock.Unlock() + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return err + } + defer cli.Close() + containers, err := rm.getAllContainers() + if err != nil { + return err + } + noWaitTimeout := 0 + for _, ct := range containers { + if ct.Labels["PodID"] == string(pod.UID) { + err = cli.ContainerRestart(context.Background(), ct.ID, container.StopOptions{Timeout: &noWaitTimeout}) + if err != nil { + return err + } + } + } + return nil +} + +// volume在主机上的管理由kubelet负责 +func (rm *runtimeManager) createVolumeDir(pod *v1.Pod) (map[string]string, error) { + ret := make(map[string]string) + for _, volume := range pod.Spec.Volumes { + if volume.EmptyDir == nil && volume.HostPath == nil { + return nil, fmt.Errorf("create volume dir: volume %s has no source", volume.Name) + } + if volume.HostPath != nil && volume.EmptyDir != nil { + return nil, fmt.Errorf("create volume dir: volume %s cannot have both hostPath and emptyDir", volume.Name) + } + if volume.EmptyDir != nil { + dir := fmt.Sprintf("/tmp/minikubernetes/volumes/%s/%s", string(pod.UID), volume.Name) + // MkdirAll会创建所有父目录,若目标目录已存在也不会报错 + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return nil, fmt.Errorf("create volume dir: failed to create volume dir %s: %v", dir, err) + } + ret[volume.Name] = dir + } else { + dir := volume.HostPath.Path + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return nil, fmt.Errorf("create volume dir: failed to create volume dir %s: %v", dir, err) + } + ret[volume.Name] = dir + } + } + return ret, nil +} + +func (rm *runtimeManager) injectSideCar(pauseID string, tag string, pod *v1.Pod) error { + if tag == "" { + tag = "latest" + } + envoyInitImage := "sjtuzc/envoy-init:" + tag + envoyImage := "sjtuzc/envoy:" + tag + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + defer cli.Close() + + exist, err := rm.checkImages(envoyInitImage) + if err != nil { + return err + } + if !exist { + _, err = cli.ImagePull(context.Background(), envoyInitImage, image.PullOptions{}) + if err != nil { + return err + } + } + exist, err = rm.checkImages(envoyImage) + if err != nil { + return err + } + if !exist { + _, err = cli.ImagePull(context.Background(), envoyImage, image.PullOptions{}) + if err != nil { + return err + } + } + + // docker run --rm --net container:pauseID --privileged envoy-init + resp, err := cli.ContainerCreate(context.Background(), &container.Config{ + Image: envoyInitImage, + Tty: true, + }, &container.HostConfig{ + NetworkMode: container.NetworkMode("container:" + pauseID), + Privileged: true, + }, nil, nil, "") + if err != nil { + return err + } + if err = cli.ContainerStart(context.Background(), resp.ID, container.StartOptions{}); err != nil { + return err + } + // wait for envoy-init to finish + statusCh, errCh := cli.ContainerWait(context.Background(), resp.ID, container.WaitConditionNotRunning) + select { + case err = <-errCh: + if err != nil { + return err + } + case status := <-statusCh: + if status.StatusCode != 0 { + return fmt.Errorf("envoy-init exited with status %d", status.StatusCode) + } + } + + // docker run --net container:pauseID -u 1337 envoy + labels := make(map[string]string) + labels["IsEnvoy"] = "true" + labels["EnvoyPodID"] = string(pod.UID) + resp, err = cli.ContainerCreate(context.Background(), &container.Config{ + Image: envoyImage, + Tty: true, + User: envoy.UID, + Labels: labels, + }, &container.HostConfig{ + NetworkMode: container.NetworkMode("container:" + pauseID), + }, nil, nil, "") + if err != nil { + return err + } + + if err = cli.ContainerStart(context.Background(), resp.ID, container.StartOptions{}); err != nil { + return err + } + return nil +} diff --git a/pkg/kubelet/types/pod_update.go b/pkg/kubelet/types/pod_update.go new file mode 100644 index 0000000..638dada --- /dev/null +++ b/pkg/kubelet/types/pod_update.go @@ -0,0 +1,26 @@ +package types + +import "minikubernetes/pkg/api/v1" + +type PodOperation string + +const ( + ADD PodOperation = "ADD" + DELETE PodOperation = "DELETE" + UPDATE PodOperation = "UPDATE" +) + +type PodUpdate struct { + Pods []*v1.Pod + Op PodOperation +} + +type SyncPodType string + +const ( + SyncPodCreate SyncPodType = "SyncPodCreate" + SyncPodUpdate SyncPodType = "SyncPodUpdate" + SyncPodKill SyncPodType = "SyncPodKill" + SyncPodSync SyncPodType = "SyncPodSync" + SyncPodRecreate SyncPodType = "SyncPodRecreate" +) diff --git a/pkg/kubelet/utils/host_ip.go b/pkg/kubelet/utils/host_ip.go new file mode 100644 index 0000000..855c8e7 --- /dev/null +++ b/pkg/kubelet/utils/host_ip.go @@ -0,0 +1,21 @@ +package utils + +import ( + "fmt" + "github.com/vishvananda/netlink" +) + +func GetHostIP() (string, error) { + link, err := netlink.LinkByName("ens3") + if err != nil { + return "", err + } + addrList, err := netlink.AddrList(link, netlink.FAMILY_V4) + if err != nil { + return "", err + } + if len(addrList) == 0 { + return "", fmt.Errorf("no ip address found") + } + return addrList[0].IP.String(), nil +} diff --git a/pkg/kubelet/utils/naming.go b/pkg/kubelet/utils/naming.go new file mode 100644 index 0000000..47653d1 --- /dev/null +++ b/pkg/kubelet/utils/naming.go @@ -0,0 +1,7 @@ +package utils + +import v1 "minikubernetes/pkg/api/v1" + +func GetPodFullName(pod *v1.Pod) string { + return pod.ObjectMeta.Name + "_" + pod.ObjectMeta.Namespace +} diff --git a/pkg/kubelet/utils/sort.go b/pkg/kubelet/utils/sort.go new file mode 100644 index 0000000..a4f691d --- /dev/null +++ b/pkg/kubelet/utils/sort.go @@ -0,0 +1,12 @@ +package utils + +import ( + v1 "minikubernetes/pkg/api/v1" + "sort" +) + +func SortPodsByCreationTime(pods []*v1.Pod) { + sort.Slice(pods, func(i, j int) bool { + return pods[i].CreationTimestamp.Before(pods[j].CreationTimestamp) + }) +} diff --git a/pkg/kubeproxy/app/server.go b/pkg/kubeproxy/app/server.go new file mode 100644 index 0000000..b11e4ea --- /dev/null +++ b/pkg/kubeproxy/app/server.go @@ -0,0 +1,427 @@ +package app + +import ( + "context" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "minikubernetes/pkg/kubeproxy" + "minikubernetes/pkg/kubeproxy/types" + "os" + "os/signal" + "sort" + "sync" + "time" +) + +type ProxyServer struct { + client kubeclient.Client + serviceUpdates chan *types.ServiceUpdate + dnsUpdates chan *types.DNSUpdate + latest map[v1.UID]*types.ServiceAndEndpoints + latestDNS map[v1.UID]*v1.DNS +} + +func NewProxyServer(apiServerIP string) (*ProxyServer, error) { + ps := &ProxyServer{} + ps.client = kubeclient.NewClient(apiServerIP) + ps.serviceUpdates = make(chan *types.ServiceUpdate, 1) + ps.dnsUpdates = make(chan *types.DNSUpdate, 1) + ps.latest = make(map[v1.UID]*types.ServiceAndEndpoints) + return ps, nil +} + +func (ps *ProxyServer) Run() { + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + wg.Add(2) + + signalCh := make(chan os.Signal, 10) + signal.Notify(signalCh, os.Interrupt) + + ps.RunProxy(ctx, &wg) + go ps.watchApiServer(ctx, &wg) + + <-signalCh + log.Println("Received interrupt signal, shutting down...") + cancel() + wg.Wait() +} + +func (ps *ProxyServer) RunProxy(ctx context.Context, wg *sync.WaitGroup) { + proxy, err := kubeproxy.NewKubeProxy() + if err != nil { + log.Printf("Failed to create proxy: %v", err) + return + } + go proxy.Run(ctx, wg, ps.serviceUpdates, ps.dnsUpdates) +} + +func (ps *ProxyServer) watchApiServer(ctx context.Context, wg *sync.WaitGroup) { + defer wg.Done() + SyncPeriod := 4 * time.Second + timer := time.NewTimer(SyncPeriod) + for { + select { + case <-timer.C: + ps.updateService() + ps.updateDNS() + timer.Reset(SyncPeriod) + case <-ctx.Done(): + log.Println("Shutting down api server watcher") + return + } + } +} + +func (ps *ProxyServer) updateDNS() { + dnsSlice, err := ps.client.GetAllDNS() + if err != nil { + log.Printf("Failed to get dns: %v", err) + return + } + //dnsSlice := getMockDNS() + newMap := make(map[v1.UID]*v1.DNS) + for _, dns := range dnsSlice { + newMap[dns.ObjectMeta.UID] = dns + } + defer func() { + ps.latestDNS = newMap + }() + + var dnsAdditions []*v1.DNS + var dnsDeletions []*v1.DNS + for uid, dns := range ps.latestDNS { + if _, ok := newMap[uid]; !ok { + // delete + dnsDeletions = append(dnsDeletions, dns) + } + } + for uid, dns := range newMap { + if _, ok := ps.latestDNS[uid]; !ok { + // add + dnsAdditions = append(dnsAdditions, dns) + } + } + if len(dnsAdditions) > 0 { + ps.dnsUpdates <- &types.DNSUpdate{ + DNS: dnsAdditions, + Op: types.DNSAdd, + } + } + if len(dnsDeletions) > 0 { + ps.dnsUpdates <- &types.DNSUpdate{ + DNS: dnsDeletions, + Op: types.DNSDelete, + } + } +} + +func (ps *ProxyServer) updateService() { + pods, err := ps.client.GetAllPods() + if err != nil { + log.Printf("Failed to get pods: %v", err) + return + } + services, err := ps.client.GetAllServices() + if err != nil { + log.Printf("Failed to get services: %v", err) + return + } + //pods := getMockPods() + //services := getMockServices() + + newMap := make(map[v1.UID]*types.ServiceAndEndpoints) + for _, service := range services { + var endpoints []*types.Endpoint + for _, pod := range pods { + if !isSelectorMatched(pod, service) { + continue + } + if pod.Status.Phase != v1.PodRunning { + continue + } + if pod.Status.PodIP == "" { + continue + } + + var ports []types.EndpointPort + for _, port := range service.Spec.Ports { + SearchLoop: + for _, container := range pod.Spec.Containers { + for _, containerPort := range container.Ports { + if containerPort.ContainerPort == port.TargetPort && containerPort.Protocol == port.Protocol { + ports = append(ports, types.EndpointPort{ + Port: port.TargetPort, + Protocol: port.Protocol, + }) + break SearchLoop + } + } + } + } + + endpoints = append(endpoints, &types.Endpoint{ + IP: pod.Status.PodIP, + Ports: ports, + }) + } + newMap[service.ObjectMeta.UID] = &types.ServiceAndEndpoints{ + Service: service, + Endpoints: endpoints, + } + } + + defer func() { + ps.latest = newMap + }() + + var additions []*types.ServiceUpdateSingle + var deletions []*types.ServiceUpdateSingle + var endpointUpdates []*types.ServiceUpdateSingle + + for uid, oldItem := range ps.latest { + if _, ok := newMap[uid]; !ok { + deletions = append(deletions, &types.ServiceUpdateSingle{ + Service: oldItem.Service, + }) + } + } + for uid, newItem := range newMap { + oldItem := ps.latest[uid] + if diff, update, op := calculateDiff(oldItem, newItem); diff { + if op == types.ServiceAdd { + additions = append(additions, update) + } else { + endpointUpdates = append(endpointUpdates, update) + } + } + } + + if len(additions) > 0 { + ps.serviceUpdates <- &types.ServiceUpdate{ + Updates: additions, + Op: types.ServiceAdd, + } + } + if len(deletions) > 0 { + ps.serviceUpdates <- &types.ServiceUpdate{ + Updates: deletions, + Op: types.ServiceDelete, + } + } + if len(endpointUpdates) > 0 { + ps.serviceUpdates <- &types.ServiceUpdate{ + Updates: endpointUpdates, + Op: types.ServiceEndpointsUpdate, + } + } +} + +func isSelectorMatched(pod *v1.Pod, svc *v1.Service) bool { + for key, value := range svc.Spec.Selector { + if label, ok := pod.Labels[key]; !ok || label != value { + return false + } + } + return true +} + +func calculateDiff(oldItem, newItem *types.ServiceAndEndpoints) (bool, *types.ServiceUpdateSingle, types.ServiceOperation) { + var op types.ServiceOperation + var diff bool + if oldItem == nil { + op = types.ServiceAdd + oldItem = &types.ServiceAndEndpoints{} + diff = true + } else { + op = types.ServiceEndpointsUpdate + diff = false + } + update := &types.ServiceUpdateSingle{ + Service: newItem.Service, + } + oldIPMap := make(map[string][]types.EndpointPort) + newIPMap := make(map[string][]types.EndpointPort) + for _, ep := range oldItem.Endpoints { + oldIPMap[ep.IP] = ep.Ports + } + for _, ep := range newItem.Endpoints { + newIPMap[ep.IP] = ep.Ports + } + for ip, oldPorts := range oldIPMap { + if newPorts, ok := newIPMap[ip]; !ok || !isEndpointPortsEqual(oldPorts, newPorts) { + diff = true + update.EndpointDeletions = append(update.EndpointDeletions, &types.Endpoint{ + IP: ip, + Ports: oldPorts, + }) + if ok { + update.EndpointAdditions = append(update.EndpointAdditions, &types.Endpoint{ + IP: ip, + Ports: newPorts, + }) + } + } + } + for ip, newPorts := range newIPMap { + if _, ok := oldIPMap[ip]; !ok { + diff = true + update.EndpointAdditions = append(update.EndpointAdditions, &types.Endpoint{ + IP: ip, + Ports: newPorts, + }) + } + } + return diff, update, op +} + +// 以下为fake数据 +var cnt int = 0 + +func getMockPods() []*v1.Pod { + cnt++ + if cnt == 1 { + return []*v1.Pod{} + } + if cnt == 2 { + return []*v1.Pod{ + { + ObjectMeta: v1.ObjectMeta{ + UID: "abcde", + Labels: map[string]string{"app": "nginx"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Ports: []v1.ContainerPort{ + { + ContainerPort: 8080, + Protocol: v1.ProtocolTCP, + }, + { + ContainerPort: 9090, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.32.0.1", + }, + }, + } + } + return []*v1.Pod{ + { + ObjectMeta: v1.ObjectMeta{ + UID: "abcde", + Labels: map[string]string{"app": "nginx"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Ports: []v1.ContainerPort{ + { + ContainerPort: 8080, + Protocol: v1.ProtocolTCP, + }, + { + ContainerPort: 9091, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + PodIP: "10.32.0.1", + }, + }, + } +} + +func getMockServices() []*v1.Service { + //if cnt > 2 { + // return []*v1.Service{} + //} + return []*v1.Service{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "nginx", + UID: "bdafsdfjlasdfas", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Selector: map[string]string{"app": "nginx"}, + ClusterIP: "100.1.1.0", + Ports: []v1.ServicePort{ + { + Port: 80, + TargetPort: 8080, + Protocol: v1.ProtocolTCP, + }, + { + Port: 90, + TargetPort: 9090, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + } +} + +func getMockDNS() []*v1.DNS { + if cnt >= 2 && cnt <= 6 { + return []*v1.DNS{ + { + ObjectMeta: v1.ObjectMeta{ + Name: "nginx", + UID: "bdafsdfjlasdfas", + Namespace: "default", + }, + Spec: v1.DNSSpec{ + Rules: []v1.DNSRule{ + { + Host: "myservice.test.com", + Paths: []v1.DNSPath{ + { + Path: "/service1", + Backend: v1.DNSBackend{ + Service: v1.DNSServiceBackend{ + Name: "nginx", + Port: 80, + }, + }, + }, + }, + }, + }, + }, + }, + } + } + return []*v1.DNS{} +} + +func isEndpointPortsEqual(a, b []types.EndpointPort) bool { + if len(a) != len(b) { + return false + } + sort.Slice(a, func(i, j int) bool { + return a[i].Port < a[j].Port + }) + sort.Slice(b, func(i, j int) bool { + return b[i].Port < b[j].Port + }) + for i := range a { + if a[i].Port != b[i].Port || a[i].Protocol != b[i].Protocol { + return false + } + } + return true +} diff --git a/pkg/kubeproxy/proxy.go b/pkg/kubeproxy/proxy.go new file mode 100644 index 0000000..b157ec1 --- /dev/null +++ b/pkg/kubeproxy/proxy.go @@ -0,0 +1,511 @@ +package kubeproxy + +import ( + "bufio" + "context" + "fmt" + "github.com/vishvananda/netlink" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubelet/runtime" + "minikubernetes/pkg/kubeproxy/route" + "minikubernetes/pkg/kubeproxy/types" + "minikubernetes/pkg/utils" + "os" + "os/exec" + "strings" + "sync" +) + +type Proxy struct { + ipvs route.IPVS + hostIP string + nameserverIP string + // 缓存service full name到ClusterIP的映射 + serviceCache map[string]string +} + +func NewKubeProxy() (*Proxy, error) { + p := &Proxy{} + ipvs, err := route.NewIPVS() + if err != nil { + return nil, err + } + p.ipvs = ipvs + p.serviceCache = make(map[string]string) + return p, nil +} + +func (p *Proxy) Run(ctx context.Context, wg *sync.WaitGroup, + serviceUpdates <-chan *types.ServiceUpdate, dnsUpdates <-chan *types.DNSUpdate) { + log.Printf("KubeProxy running...") + err := p.ipvs.Init() + if err != nil { + log.Printf("Failed to init ipvs: %v", err) + return + } + log.Printf("IPVS initialized.") + // get host ip + hostIP, err := getHostIP() + if err != nil { + log.Printf("Failed to get host ip: %v", err) + return + } + p.hostIP = hostIP + nameserverIP, err := runtime.GetContainerBridgeIP("coredns") + if err != nil { + log.Printf("Failed to get nameserver ip: %v", err) + return + } + p.nameserverIP = nameserverIP + // 写入/etc/resolv.conf + err = writeResolvConf(nameserverIP) + if err != nil { + log.Printf("Failed to write resolv.conf: %v", err) + return + } + p.syncLoop(ctx, wg, serviceUpdates, dnsUpdates) +} + +func writeResolvConf(nameserverIP string) error { + filename := "/etc/resolv.conf" + defaultNameserver := "127.0.0.53" + content := fmt.Sprintf("nameserver %s\nnameserver %s\n", nameserverIP, defaultNameserver) + err := os.WriteFile(filename, []byte(content), 0644) + return err +} + +func getHostIP() (string, error) { + link, err := netlink.LinkByName("ens3") + if err != nil { + return "", err + } + addrList, err := netlink.AddrList(link, netlink.FAMILY_V4) + if err != nil { + return "", err + } + if len(addrList) == 0 { + return "", fmt.Errorf("no ip address found") + } + return addrList[0].IP.String(), nil +} + +func (p *Proxy) syncLoop(ctx context.Context, wg *sync.WaitGroup, + serviceUpdates <-chan *types.ServiceUpdate, dnsUpdates <-chan *types.DNSUpdate) { + defer wg.Done() + log.Printf("Sync loop started.") + for { + if !p.syncLoopIteration(ctx, serviceUpdates, dnsUpdates) { + break + } + } + log.Printf("Sync loop ended.") + p.DoCleanUp() +} + +func (p *Proxy) syncLoopIteration(ctx context.Context, + updateCh <-chan *types.ServiceUpdate, dnsCh <-chan *types.DNSUpdate) bool { + select { + case update, ok := <-updateCh: + if !ok { + log.Printf("Service update channel closed.") + return false + } + switch update.Op { + case types.ServiceAdd: + p.HandleServiceAdditions(update.Updates) + case types.ServiceDelete: + p.HandleServiceDeletions(update.Updates) + case types.ServiceEndpointsUpdate: + p.HandleServiceEndpointsUpdate(update.Updates) + } + case update, ok := <-dnsCh: + if !ok { + log.Printf("DNS update channel closed.") + return false + } + switch update.Op { + case types.DNSAdd: + p.HandleDNSAdditions(update.DNS) + case types.DNSDelete: + p.HandleDNSDeletions(update.DNS) + } + case <-ctx.Done(): + return false + } + return true +} + +func (p *Proxy) HandleDNSAdditions(dns []*v1.DNS) { + log.Println("Handling dns additions...") + for _, d := range dns { + log.Printf("Adding dns %s.", d.Name) + // step 1: 新建nginx配置文件 /etc/nginx/conf.d/.conf + err := p.addNginxConfig(d) + if err != nil { + log.Printf("Failed to write nginx config: %v", err) + continue + } + // step 2: nginx -s reload + err = exec.Command("nginx", "-s", "reload").Run() + if err != nil { + log.Printf("Failed to reload nginx: %v", err) + continue + } + // step 3: 更新coredns配置文件 /etc/coredns/hosts + err = p.addCorednsHosts(d) + if err != nil { + log.Printf("Failed to write coredns hosts: %v", err) + continue + } + log.Printf("DNS %s added.", d.Name) + } +} + +func (p *Proxy) addCorednsHosts(dns *v1.DNS) error { + filename := "/etc/coredns/hosts" + file, err := os.Open(filename) + if err != nil { + return err + } + scanner := bufio.NewScanner(file) + // 检查重复 + hosts := make(map[string]struct{}) + content := "" + for scanner.Scan() { + line := scanner.Text() + host := strings.Fields(line)[1] + hosts[host] = struct{}{} + content += line + "\n" + } + err = file.Close() + if err != nil { + return err + } + for _, rule := range dns.Spec.Rules { + if _, ok := hosts[rule.Host]; ok { + log.Printf("Host %s already exists in coredns hosts.", rule.Host) + } else { + content += fmt.Sprintf("%s %s\n", p.hostIP, rule.Host) + } + } + err = os.WriteFile(filename, []byte(content), 0644) + return err +} + +func (p *Proxy) addNginxConfig(dns *v1.DNS) error { + confFmt := "server{listen 80;server_name %v;%v}" + locationFmt := "location %v {proxy_pass http://%v:%v/;}" + filenameFmt := "/etc/nginx/conf.d/%v.conf" + for _, rule := range dns.Spec.Rules { + filename := fmt.Sprintf(filenameFmt, rule.Host) + // 检查是否存在重名host + if _, err := os.Stat(filename); err == nil || !os.IsNotExist(err) { + return fmt.Errorf("nginx config file %s already exists", filename) + } + location := "" + for _, path := range rule.Paths { + svcFullName := dns.Namespace + "_" + path.Backend.Service.Name + if clusterIP, ok := p.serviceCache[svcFullName]; ok { + singleLocation := fmt.Sprintf(locationFmt, path.Path, clusterIP, path.Backend.Service.Port) + location += singleLocation + } else { + return fmt.Errorf("service %s not found in cache", svcFullName) + } + } + if location == "" { + return fmt.Errorf("no service found for dns %v", dns.Name) + } + conf := fmt.Sprintf(confFmt, rule.Host, location) + err := os.WriteFile(filename, []byte(conf), 0644) + if err != nil { + return fmt.Errorf("failed to write nginx config: %v", err) + } + } + return nil +} + +func (p *Proxy) HandleDNSDeletions(dns []*v1.DNS) { + log.Println("Handling dns deletions...") + for _, d := range dns { + log.Printf("Deleting dns %s.", d.Name) + // step 1: 更新coredns配置文件 /etc/coredns/hosts + err := p.deleteCorednsHosts(d) + if err != nil { + log.Printf("Failed to delete coredns hosts: %v", err) + continue + } + // step 2: 删除nginx配置文件 /etc/nginx/conf.d/.conf + err = p.deleteNginxConfig(d) + if err != nil { + log.Printf("Failed to delete nginx config: %v", err) + continue + } + // step 3: nginx -s reload + err = exec.Command("nginx", "-s", "reload").Run() + if err != nil { + log.Printf("Failed to reload nginx: %v", err) + continue + } + log.Printf("DNS %s deleted.", d.Name) + } +} + +func (p *Proxy) deleteCorednsHosts(dns *v1.DNS) error { + filename := "/etc/coredns/hosts" + file, err := os.Open(filename) + if err != nil { + return err + } + scanner := bufio.NewScanner(file) + hostsToDelete := make(map[string]struct{}) + for _, rule := range dns.Spec.Rules { + hostsToDelete[rule.Host] = struct{}{} + } + content := "" + for scanner.Scan() { + line := scanner.Text() + host := strings.Fields(line)[1] + if _, ok := hostsToDelete[host]; !ok { + content += line + "\n" + } + } + err = file.Close() + if err != nil { + return err + } + err = os.WriteFile(filename, []byte(content), 0644) + return err +} + +func (p *Proxy) addServiceNameDNS(service *v1.Service) error { + filename := "/etc/coredns/hosts" + file, err := os.Open(filename) + if err != nil { + return err + } + scanner := bufio.NewScanner(file) + // 检查重复 + hosts := make(map[string]struct{}) + content := "" + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 2 { + return fmt.Errorf("invalid coredns hosts file: %s", line) + } + host := fields[1] + hosts[host] = struct{}{} + content += line + "\n" + } + err = file.Close() + if err != nil { + return err + } + if _, ok := hosts[service.Name]; ok { + return fmt.Errorf("service name %s already exists in coredns hosts", service.Name) + } else if service.Spec.ClusterIP == "" { + return fmt.Errorf("service %s has no cluster ip", service.Name) + } else { + content += fmt.Sprintf("%s %s\n", service.Spec.ClusterIP, service.Name) + } + err = os.WriteFile(filename, []byte(content), 0644) + return err +} + +func (p *Proxy) deleteServiceNameDNS(service *v1.Service) error { + filename := "/etc/coredns/hosts" + file, err := os.Open(filename) + if err != nil { + return err + } + scanner := bufio.NewScanner(file) + content := "" + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 2 { + return fmt.Errorf("invalid coredns hosts file: %s", line) + } + host := fields[1] + if host != service.Name { + content += line + "\n" + } else if fields[0] != service.Spec.ClusterIP { + return fmt.Errorf("cluster ip mismatch for service %s", service.Name) + } + } + err = file.Close() + if err != nil { + return err + } + err = os.WriteFile(filename, []byte(content), 0644) + return err +} + +func (p *Proxy) deleteNginxConfig(dns *v1.DNS) error { + filenameFmt := "/etc/nginx/conf.d/%v.conf" + for _, rule := range dns.Spec.Rules { + filename := fmt.Sprintf(filenameFmt, rule.Host) + err := os.Remove(filename) + if err != nil { + return err + } + } + return nil +} + +func (p *Proxy) HandleServiceAdditions(updates []*types.ServiceUpdateSingle) { + log.Println("Handling service additions...") + for _, update := range updates { + log.Printf("Adding service %s.", update.Service.Name) + svc := update.Service + vip := svc.Spec.ClusterIP + // 反向映射:targetPort -> port / nodePort + reverseSvcPortMap := make(map[int32]int32) + reverseNodePortMap := make(map[int32]int32) + isNodePort := svc.Spec.Type == v1.ServiceTypeNodePort + for _, svcPort := range svc.Spec.Ports { + err := p.ipvs.AddVirtual(vip, uint16(svcPort.Port), svcPort.Protocol, true) + if err != nil { + log.Printf("Failed to add virtual server: %v", err) + continue + } + if isNodePort { + err = p.ipvs.AddVirtual(p.hostIP, uint16(svcPort.NodePort), svcPort.Protocol, false) + if err != nil { + log.Printf("Failed to add virtual server: %v", err) + continue + } + reverseNodePortMap[svcPort.TargetPort] = svcPort.NodePort + } + reverseSvcPortMap[svcPort.TargetPort] = svcPort.Port + } + for _, endpoint := range update.EndpointAdditions { + for _, endpointPort := range endpoint.Ports { + if svcPort, ok := reverseSvcPortMap[endpointPort.Port]; ok { + err := p.ipvs.AddRoute(vip, uint16(svcPort), endpoint.IP, uint16(endpointPort.Port), endpointPort.Protocol) + if err != nil { + log.Printf("Failed to add route: %v", err) + } + } + if nodePort, ok := reverseNodePortMap[endpointPort.Port]; ok { + err := p.ipvs.AddRoute(p.hostIP, uint16(nodePort), endpoint.IP, uint16(endpointPort.Port), endpointPort.Protocol) + if err != nil { + log.Printf("Failed to add route: %v", err) + } + } + } + } + p.serviceCache[utils.GetObjectFullName(&svc.ObjectMeta)] = vip + err := p.addServiceNameDNS(svc) + if err != nil { + log.Printf("Failed to add service name dns: %v", err) + } + log.Printf("Service %s added.", svc.Name) + } +} + +func (p *Proxy) HandleServiceDeletions(updates []*types.ServiceUpdateSingle) { + log.Println("Handling service deletions...") + for _, update := range updates { + log.Printf("Deleting service %s.", update.Service.Name) + svc := update.Service + vip := svc.Spec.ClusterIP + isNodePort := svc.Spec.Type == v1.ServiceTypeNodePort + for _, svcPort := range svc.Spec.Ports { + err := p.ipvs.DeleteVirtual(vip, uint16(svcPort.Port), svcPort.Protocol, true) + if err != nil { + log.Printf("Failed to delete virtual server: %v", err) + } + if isNodePort { + err = p.ipvs.DeleteVirtual(p.hostIP, uint16(svcPort.NodePort), svcPort.Protocol, false) + if err != nil { + log.Printf("Failed to delete virtual server: %v", err) + } + } + } + delete(p.serviceCache, utils.GetObjectFullName(&svc.ObjectMeta)) + err := p.deleteServiceNameDNS(svc) + if err != nil { + log.Printf("Failed to delete service name dns: %v", err) + } + log.Printf("Service %s deleted.", svc.Name) + } +} + +func (p *Proxy) HandleServiceEndpointsUpdate(updates []*types.ServiceUpdateSingle) { + log.Println("Handling service endpoints update...") + for _, update := range updates { + log.Printf("Updating service %s.", update.Service.Name) + svc := update.Service + vip := svc.Spec.ClusterIP + reverseSvcPortMap := make(map[int32]int32) + reverseNodePortMap := make(map[int32]int32) + isNodePort := svc.Spec.Type == v1.ServiceTypeNodePort + for _, svcPort := range svc.Spec.Ports { + reverseSvcPortMap[svcPort.TargetPort] = svcPort.Port + if isNodePort { + reverseNodePortMap[svcPort.TargetPort] = svcPort.NodePort + } + } + for _, endpoint := range update.EndpointDeletions { + for _, endpointPort := range endpoint.Ports { + if svcPort, ok := reverseSvcPortMap[endpointPort.Port]; ok { + err := p.ipvs.DeleteRoute(vip, uint16(svcPort), endpoint.IP, uint16(endpointPort.Port), endpointPort.Protocol) + if err != nil { + log.Printf("Failed to delete route: %v", err) + } + } + if nodePort, ok := reverseNodePortMap[endpointPort.Port]; ok { + err := p.ipvs.DeleteRoute(p.hostIP, uint16(nodePort), endpoint.IP, uint16(endpointPort.Port), endpointPort.Protocol) + if err != nil { + log.Printf("Failed to delete route: %v", err) + } + } + } + } + for _, endpoint := range update.EndpointAdditions { + for _, endpointPort := range endpoint.Ports { + if svcPort, ok := reverseSvcPortMap[endpointPort.Port]; ok { + err := p.ipvs.AddRoute(vip, uint16(svcPort), endpoint.IP, uint16(endpointPort.Port), endpointPort.Protocol) + if err != nil { + log.Printf("Failed to add route: %v", err) + } + } + if nodePort, ok := reverseNodePortMap[endpointPort.Port]; ok { + err := p.ipvs.AddRoute(p.hostIP, uint16(nodePort), endpoint.IP, uint16(endpointPort.Port), endpointPort.Protocol) + if err != nil { + log.Printf("Failed to add route: %v", err) + } + } + } + } + log.Printf("Service %s updated.", svc.Name) + } +} + +func cleanConfigFiles() { + nginxFiles, err := os.ReadDir("/etc/nginx/conf.d") + if err != nil { + log.Printf("Failed to read nginx conf.d: %v", err) + } + for _, file := range nginxFiles { + err = os.Remove("/etc/nginx/conf.d/" + file.Name()) + if err != nil { + log.Printf("Failed to remove nginx config file: %v", err) + } + } + err = os.Truncate("/etc/coredns/hosts", 0) + if err != nil { + log.Printf("Failed to truncate coredns hosts: %v", err) + } +} + +func (p *Proxy) DoCleanUp() { + err := p.ipvs.Clear() + if err != nil { + log.Printf("Failed to clear ipvs: %v", err) + } + cleanConfigFiles() + log.Printf("Proxy cleanup done.") +} diff --git a/pkg/kubeproxy/route/ipvs.go b/pkg/kubeproxy/route/ipvs.go new file mode 100644 index 0000000..424c5b1 --- /dev/null +++ b/pkg/kubeproxy/route/ipvs.go @@ -0,0 +1,245 @@ +package route + +import ( + "fmt" + "github.com/moby/ipvs" + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" + "log" + v1 "minikubernetes/pkg/api/v1" + "net" + "os/exec" +) + +type IPVS interface { + Init() error + AddVirtual(vip string, port uint16, protocol v1.Protocol, isVirtualDummy bool) error + AddRoute(vip string, vport uint16, rip string, rport uint16, protocol v1.Protocol) error + DeleteVirtual(vip string, port uint16, protocol v1.Protocol, isVirtualDummy bool) error + DeleteRoute(vip string, vport uint16, rip string, rport uint16, protocol v1.Protocol) error + Clear() error +} + +type basicIPVS struct { + handle *ipvs.Handle +} + +func NewIPVS() (IPVS, error) { + ret := &basicIPVS{} + handle, err := ipvs.New("") + if err != nil { + return nil, err + } + ret.handle = handle + return ret, nil +} + +func createDummy() error { + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "minik8s-dummy", + }, + } + err := netlink.LinkAdd(dummy) + return err +} + +func deleteDummy() error { + link, err := netlink.LinkByName("minik8s-dummy") + if err != nil { + return err + } + err = netlink.LinkDel(link) + return err +} + +func addAddrToDummy(cidr string) error { + link, err := netlink.LinkByName("minik8s-dummy") + if err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return err + } + ipNet.IP = ip + addr := &netlink.Addr{ + IPNet: ipNet, + } + err = netlink.AddrAdd(link, addr) + return err +} + +func delAddrFromDummy(cidr string) error { + link, err := netlink.LinkByName("minik8s-dummy") + if err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return err + } + ipNet.IP = ip + addr := &netlink.Addr{ + IPNet: ipNet, + } + err = netlink.AddrDel(link, addr) + return err +} + +func (b *basicIPVS) Init() error { + // 为了能使用ipvs的完整功能,使用jcloud镜像时,至少执行以下命令: + // modprobe br_netfilter + // ip link add dev minik8s-dummy type dummy + // sysctl --write net.bridge.bridge-nf-call-iptables=1 + // sysctl --write net.ipv4.ip_forward=1 + // sysctl --write net.ipv4.vs.conntrack=1 + err := exec.Command("modprobe", "br_netfilter").Run() + if err != nil { + return fmt.Errorf("modprobe br_netfilter failed: %v", err) + } + err = createDummy() + if err != nil { + if err.Error() == "file exists" { + log.Println("dummy already exists") + } else { + return fmt.Errorf("create dummy failed: %v", err) + } + } + err = exec.Command("sysctl", "--write", "net.bridge.bridge-nf-call-iptables=1").Run() + if err != nil { + return fmt.Errorf("sysctl --write net.bridge.bridge-nf-call-iptables=1 failed: %v", err) + } + err = exec.Command("sysctl", "--write", "net.ipv4.ip_forward=1").Run() + if err != nil { + return fmt.Errorf("sysctl --write net.ipv4.ip_forward=1 failed: %v", err) + } + err = exec.Command("sysctl", "--write", "net.ipv4.vs.conntrack=1").Run() + if err != nil { + return fmt.Errorf("sysctl --write net.ipv4.vs.conntrack=1 failed: %v", err) + } + return nil +} + +func (b *basicIPVS) AddVirtual(vip string, port uint16, protocol v1.Protocol, isVirtualDummy bool) error { + addr := net.ParseIP(vip) + if addr == nil { + return fmt.Errorf("invalid ip address: %s", vip) + } + svc := &ipvs.Service{ + Address: addr, + Protocol: protocol2Unix(protocol), + Port: port, + AddressFamily: unix.AF_INET, + SchedName: ipvs.RoundRobin, + } + err := b.handle.NewService(svc) + if err != nil { + return err + } + // ip addr add + if !isVirtualDummy { + return nil + } + err = addAddrToDummy(vip + "/32") + if err != nil { + if err.Error() == "file exists" { + log.Println("ip addr already exists") + } else { + return err + } + } + return nil +} + +func (b *basicIPVS) AddRoute(vip string, vport uint16, rip string, rport uint16, protocol v1.Protocol) error { + vaddr := net.ParseIP(vip) + if vaddr == nil { + return fmt.Errorf("invalid ip address: %s", vip) + } + raddr := net.ParseIP(rip) + if raddr == nil { + return fmt.Errorf("invalid ip address: %s", rip) + } + svc := &ipvs.Service{ + Address: vaddr, + Protocol: protocol2Unix(protocol), + Port: vport, + AddressFamily: unix.AF_INET, + } + dest := &ipvs.Destination{ + Address: raddr, + Port: rport, + AddressFamily: unix.AF_INET, + Weight: 1, + ConnectionFlags: ipvs.ConnectionFlagMasq, + } + err := b.handle.NewDestination(svc, dest) + return err +} + +func (b *basicIPVS) DeleteVirtual(vip string, port uint16, protocol v1.Protocol, isVirtualDummy bool) error { + addr := net.ParseIP(vip) + if addr == nil { + return fmt.Errorf("invalid ip address: %s", vip) + } + svc := &ipvs.Service{ + Address: addr, + Protocol: protocol2Unix(protocol), + Port: port, + AddressFamily: unix.AF_INET, + } + err := b.handle.DelService(svc) + if err != nil { + return err + } + // ip addr del + if !isVirtualDummy { + return nil + } + err = delAddrFromDummy(vip + "/32") + return err +} + +func (b *basicIPVS) DeleteRoute(vip string, vport uint16, rip string, rport uint16, protocol v1.Protocol) error { + vaddr := net.ParseIP(vip) + if vaddr == nil { + return fmt.Errorf("invalid ip address: %s", vip) + } + raddr := net.ParseIP(rip) + if raddr == nil { + return fmt.Errorf("invalid ip address: %s", rip) + } + svc := &ipvs.Service{ + Address: vaddr, + Protocol: protocol2Unix(protocol), + Port: vport, + AddressFamily: unix.AF_INET, + } + dest := &ipvs.Destination{ + Address: raddr, + Port: rport, + AddressFamily: unix.AF_INET, + } + err := b.handle.DelDestination(svc, dest) + return err +} + +func (b *basicIPVS) Clear() error { + err := b.handle.Flush() + if err != nil { + return err + } + return deleteDummy() +} + +func protocol2Unix(protocol v1.Protocol) uint16 { + switch protocol { + case v1.ProtocolTCP: + return unix.IPPROTO_TCP + case v1.ProtocolUDP: + return unix.IPPROTO_UDP + default: + return unix.IPPROTO_TCP + } +} diff --git a/pkg/kubeproxy/types/dns_update.go b/pkg/kubeproxy/types/dns_update.go new file mode 100644 index 0000000..4229081 --- /dev/null +++ b/pkg/kubeproxy/types/dns_update.go @@ -0,0 +1,15 @@ +package types + +import v1 "minikubernetes/pkg/api/v1" + +type DNSOperation string + +const ( + DNSAdd DNSOperation = "DNSAdd" + DNSDelete DNSOperation = "DNSDelete" +) + +type DNSUpdate struct { + DNS []*v1.DNS + Op DNSOperation +} diff --git a/pkg/kubeproxy/types/endpoint.go b/pkg/kubeproxy/types/endpoint.go new file mode 100644 index 0000000..80beab8 --- /dev/null +++ b/pkg/kubeproxy/types/endpoint.go @@ -0,0 +1,20 @@ +package types + +import v1 "minikubernetes/pkg/api/v1" + + +// 一个Endpoint指向一个Pod +type Endpoint struct { + IP string + Ports []EndpointPort +} + +type EndpointPort struct { + Port int32 + Protocol v1.Protocol +} + +type ServiceAndEndpoints struct { + Service *v1.Service + Endpoints []*Endpoint +} diff --git a/pkg/kubeproxy/types/service_update.go b/pkg/kubeproxy/types/service_update.go new file mode 100644 index 0000000..7ce637b --- /dev/null +++ b/pkg/kubeproxy/types/service_update.go @@ -0,0 +1,26 @@ +package types + +import v1 "minikubernetes/pkg/api/v1" + +type ServiceOperation string + +const ( + ServiceAdd ServiceOperation = "ServiceAdd" + ServiceDelete ServiceOperation = "ServiceDelete" + // delete和add放在同一个事件中,防止冲突 + ServiceEndpointsUpdate ServiceOperation = "ServiceEndpointsUpdate" +) + +type ServiceUpdate struct { + //Service *v1.Service + //EndpointAdditions []*Endpoint + //EndpointDeletions []*Endpoint + Updates []*ServiceUpdateSingle + Op ServiceOperation +} + +type ServiceUpdateSingle struct { + Service *v1.Service + EndpointAdditions []*Endpoint + EndpointDeletions []*Endpoint +} diff --git a/pkg/microservice/envoy/envoy.go b/pkg/microservice/envoy/envoy.go new file mode 100644 index 0000000..ab4b789 --- /dev/null +++ b/pkg/microservice/envoy/envoy.go @@ -0,0 +1,345 @@ +package envoy + +import ( + "fmt" + "github.com/gin-gonic/gin" + "log" + "math/rand" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "os/signal" + "regexp" + "time" +) + +const ( + InboundPort = "15006" + OutboundPort = "15001" + UID = "1337" + GID = "1337" +) + +type Envoy struct { + inboundRouter *gin.Engine + outboundRouter *gin.Engine + mapping v1.SidecarMapping + kubeClient kubeclient.Client + nameMapping v1.SidecarServiceNameMapping +} + +func NewEnvoy(apiServerIP string) (*Envoy, error) { + envoy := &Envoy{} + envoy.inboundRouter = gin.Default() + envoy.outboundRouter = gin.Default() + envoy.mapping = make(v1.SidecarMapping) + envoy.nameMapping = make(v1.SidecarServiceNameMapping) + envoy.kubeClient = kubeclient.NewClient(apiServerIP) + return envoy, nil +} + +func (e *Envoy) inboundProxy(c *gin.Context) { + log.Printf("Inbound proxy receive request: %v %v %v\n", c.Request.Method, c.Request.URL, c.Request.Host) + host := c.Request.Host + ip, port, err := net.SplitHostPort(host) + if err != nil { + if net.ParseIP(host) != nil { + // host仅有ip,使用默认端口 + ip = host + port = "80" + } else { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Domain name not supported in inbound proxy"}) + return + } + } + + // 查找端口映射 + key := fmt.Sprintf("%s:%s", ip, port) + if endpoints, ok := e.mapping[key]; ok { + if len(endpoints) != 0 && len(endpoints[0].Endpoints) != 0 { + port = fmt.Sprintf("%v", endpoints[0].Endpoints[0].TargetPort) + } + } + + ip = "127.0.0.1" + target := c.Request.URL + if target.Scheme == "" { + target.Scheme = "http" + } + target.Host = fmt.Sprintf("%s:%s", ip, port) + log.Printf("Target: %v\n", target) + reverseProxy := httputil.NewSingleHostReverseProxy(target) + reverseProxy.Director = func(req *http.Request) { + //req.URL = target + req.Host = target.Host + //req.Method = c.Request.Method + //req.Header = c.Request.Header + } + reverseProxy.ServeHTTP(c.Writer, c.Request) +} + +func (e *Envoy) outboundProxy(c *gin.Context) { + log.Printf("Outbound proxy receive request: %v %v %v\n", c.Request.Method, c.Request.URL, c.Request.Host) + host := c.Request.Host + ip, port, err := net.SplitHostPort(host) + useOriginal := false + if err != nil { + if net.ParseIP(host) != nil { + // host仅有ip,使用默认端口 + ip = host + port = "80" + } else if svcIP, ok := e.nameMapping[ip]; !ok { + // host为域名 + useOriginal = true + } else { + // host为服务名 + ip = svcIP + port = "80" + } + } else { + if net.ParseIP(ip) != nil { + // do nothing + } else if svcIP, ok := e.nameMapping[ip]; !ok { + // host为域名 + useOriginal = true + } else { + // host为服务名 + ip = svcIP + } + } + if !useOriginal { + if _, ok := e.mapping[fmt.Sprintf("%s:%s", ip, port)]; !ok { + useOriginal = true + } + } + var target *url.URL + if useOriginal { + // 不进行转发 + target = c.Request.URL + if target.Scheme == "" { + target.Scheme = "http" + } + if target.Host == "" { + target.Host = c.Request.Host + } + } else { + endpoints := e.mapping[fmt.Sprintf("%s:%s", ip, port)] + if len(endpoints) == 0 { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "No available endpoints"}) + return + } + useWeight := endpoints[0].Weight != nil + var dest string + if useWeight { + dest, err = getDestinationWithWeight(endpoints) + if err != nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": err.Error()}) + return + } + } else { + dest, err = getDestinationWithURL(endpoints, c.Param("path")) + if err != nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": err.Error()}) + return + } + } + target = c.Request.URL + if target.Scheme == "" { + target.Scheme = "http" + } + target.Host = dest + } + log.Printf("Target: %v\n", target) + //c.JSON(http.StatusOK, "OK") + reverseProxy := httputil.NewSingleHostReverseProxy(target) + reverseProxy.Director = func(req *http.Request) { + //req.URL = target + req.Host = target.Host + //req.Method = c.Request.Method + //req.Header = c.Request.Header + } + reverseProxy.ServeHTTP(c.Writer, c.Request) +} + +func getDestinationWithWeight(endpoints []v1.SidecarEndpoints) (string, error) { + var totalWeight int32 = 0 + for _, endpoint := range endpoints { + if endpoint.Weight == nil { + return "", fmt.Errorf("weight not set") + } + totalWeight += *endpoint.Weight * int32(len(endpoint.Endpoints)) + } + randWeight := int32(rand.Intn(int(totalWeight))) + for _, endpoint := range endpoints { + for _, ep := range endpoint.Endpoints { + if randWeight < *endpoint.Weight { + return fmt.Sprintf("%v:%v", ep.IP, ep.TargetPort), nil + } + randWeight -= *endpoint.Weight + } + } + return "", fmt.Errorf("no available endpoint") +} + +func getDestinationWithURL(endpoints []v1.SidecarEndpoints, requestURL string) (string, error) { + for _, endpoint := range endpoints { + if endpoint.URL == nil { + return "", fmt.Errorf("url not set") + } + match, err := regexp.MatchString(*endpoint.URL, requestURL) + if err != nil { + return "", err + } + if match { + if len(endpoint.Endpoints) == 0 { + return "", fmt.Errorf("no available endpoint") + } + randIdx := rand.Intn(len(endpoint.Endpoints)) + return fmt.Sprintf("%v:%v", endpoint.Endpoints[randIdx].IP, endpoint.Endpoints[randIdx].TargetPort), nil + } + } + return "", fmt.Errorf("no available endpoint") +} + +func getMockMapping() v1.SidecarMapping { + return v1.SidecarMapping{ + "100.0.0.0:80": []v1.SidecarEndpoints{ + { + Weight: &[]int32{1}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 801, + }, + { + IP: "127.0.0.1", + TargetPort: 802, + }, + }, + }, + { + Weight: &[]int32{2}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 803, + }, + }, + }, + }, + "100.0.0.0:90": []v1.SidecarEndpoints{ + { + Weight: &[]int32{0}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 801, + }, + { + IP: "127.0.0.1", + TargetPort: 802, + }, + }, + }, + { + Weight: &[]int32{1}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 803, + }, + }, + }, + }, + "100.0.0.1:80": []v1.SidecarEndpoints{ + { + URL: &[]string{"^/api/v1/.*$"}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 801, + }, + { + IP: "127.0.0.1", + TargetPort: 802, + }, + }, + }, + { + URL: &[]string{"^/api/v2/.*$"}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 803, + }, + }, + }, + }, + "100.0.0.1:90": []v1.SidecarEndpoints{ + { + URL: &[]string{"^/api/v1/.*$"}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 801, + }, + { + IP: "127.0.0.1", + TargetPort: 802, + }, + }, + }, + { + URL: &[]string{"^/api/v2/.*$"}[0], + Endpoints: []v1.SingleEndpoint{ + { + IP: "127.0.0.1", + TargetPort: 803, + }, + }, + }, + }, + } +} + +func (e *Envoy) watchMapping() { + for { + mapping, err := e.kubeClient.GetSidecarMapping() + if err != nil { + log.Printf("Get sidecar mapping failed: %v\n", err) + } else { + e.mapping = mapping + } + nameMapping, err := e.kubeClient.GetSidecarServiceNameMapping() + if err != nil { + log.Printf("Get sidecar service name mapping failed: %v\n", err) + } else { + e.nameMapping = nameMapping + } + // mock + //mapping := getMockMapping() + time.Sleep(3560 * time.Millisecond) + } +} + +func (e *Envoy) Run() { + e.inboundRouter.Any("/*path", e.inboundProxy) + e.outboundRouter.Any("/*path", e.outboundProxy) + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + go func() { + log.Printf("Listening inbound port %s\n", InboundPort) + log.Fatal(e.inboundRouter.Run(fmt.Sprintf(":%s", InboundPort))) + }() + go func() { + log.Printf("Listening outbound port %s\n", OutboundPort) + log.Fatal(e.outboundRouter.Run(fmt.Sprintf(":%s", OutboundPort))) + }() + go e.watchMapping() + <-signalChan + log.Printf("Envoy shut down.") +} diff --git a/pkg/microservice/envoy/init/envoy_init.go b/pkg/microservice/envoy/init/envoy_init.go new file mode 100644 index 0000000..b949bb4 --- /dev/null +++ b/pkg/microservice/envoy/init/envoy_init.go @@ -0,0 +1,132 @@ +package init + +import ( + "github.com/coreos/go-iptables/iptables" + "minikubernetes/pkg/microservice/envoy" +) + +type EnvoyInit struct { + ipt *iptables.IPTables +} + +func NewEnvoyInit() (*EnvoyInit, error) { + e := &EnvoyInit{} + ipt, err := iptables.New(iptables.IPFamily(iptables.ProtocolIPv4)) + if err != nil { + return nil, err + } + e.ipt = ipt + return e, nil +} + +// 在当前容器的网络命名空间中配置iptables规则,使得所有出入站流量都被重定向到Envoy代理 +func (e *EnvoyInit) Init() error { + // 生成MISTIO_REDIRECT链 + err := e.ipt.NewChain("nat", "MISTIO_REDIRECT") + if err != nil { + return err + } + // 在MISTIO_REDIRECT链中追加一条REDIRECT规则,重定向到Envoy的Outbound端口 + err = e.ipt.Append("nat", "MISTIO_REDIRECT", "-p", "tcp", "-j", "REDIRECT", "--to-port", envoy.OutboundPort) + if err != nil { + return err + } + // 生成MISTIO_IN_REDIRECT链 + err = e.ipt.NewChain("nat", "MISTIO_IN_REDIRECT") + if err != nil { + return err + } + // 在MISTIO_IN_REDIRECT链中追加一条REDIRECT规则,重定向到Envoy的Inbound端口 + err = e.ipt.Append("nat", "MISTIO_IN_REDIRECT", "-p", "tcp", "-j", "REDIRECT", "--to-port", envoy.InboundPort) + if err != nil { + return err + } + // 生成MISTIO_INBOUND链 + err = e.ipt.NewChain("nat", "MISTIO_INBOUND") + if err != nil { + return err + } + // 在PREROUTING链中追加一条规则,将所有入站流量重定向到MISTIO_INBOUND链 + err = e.ipt.Append("nat", "PREROUTING", "-p", "tcp", "-j", "MISTIO_INBOUND") + if err != nil { + return err + } + // 没做:ssh端口22 + // 在MISTIO_INBOUND链中追加一条规则,将所有入站流量重定向到MISTIO_IN_REDIRECT链 + err = e.ipt.Append("nat", "MISTIO_INBOUND", "-p", "tcp", "-j", "MISTIO_IN_REDIRECT") + if err != nil { + return err + } + // 生成MISTIO_OUTPUT链 + err = e.ipt.NewChain("nat", "MISTIO_OUTPUT") + if err != nil { + return err + } + // 在OUTPUT链中追加一条规则,将所有出站流量重定向到MISTIO_OUTPUT链 + err = e.ipt.Append("nat", "OUTPUT", "-p", "tcp", "-j", "MISTIO_OUTPUT") + if err != nil { + return err + } + + // 没做:对源ip为127.0.0.6的处理 + + // Redirect app calls back to itself via Envoy when using the service VIP + // e.g. appN => Envoy (client) => Envoy (server) => appN. + err = e.ipt.Append("nat", "MISTIO_OUTPUT", + "-o", "lo", + "!", "-d", "127.0.0.1/32", + "-m", "owner", "--uid-owner", envoy.UID, + "-j", "MISTIO_IN_REDIRECT") + if err != nil { + return err + } + // Do not redirect app calls to back itself via Envoy when using the endpoint address + // e.g. appN => appN by lo + err = e.ipt.Append("nat", "MISTIO_OUTPUT", + "-o", "lo", + "-m", "owner", "!", "--uid-owner", envoy.UID, + "-j", "RETURN") + if err != nil { + return err + } + // Avoid infinite loops. Don't redirect Envoy traffic directly back to + // Envoy for non-loopback traffic. + err = e.ipt.Append("nat", "MISTIO_OUTPUT", + "-m", "owner", "--uid-owner", envoy.UID, + "-j", "RETURN") + if err != nil { + return err + } + + // 对于gid进行相同操作 + err = e.ipt.Append("nat", "MISTIO_OUTPUT", + "-o", "lo", + "!", "-d", "127.0.0.1/32", + "-m", "owner", "--gid-owner", envoy.GID, + "-j", "MISTIO_IN_REDIRECT") + if err != nil { + return err + } + err = e.ipt.Append("nat", "MISTIO_OUTPUT", + "-o", "lo", + "-m", "owner", "!", "--gid-owner", envoy.GID, + "-j", "RETURN") + if err != nil { + return err + } + err = e.ipt.Append("nat", "MISTIO_OUTPUT", + "-m", "owner", "--gid-owner", envoy.GID, + "-j", "RETURN") + if err != nil { + return err + } + + // 对于目标地址为127.0.0.1的数据包,不进行任何处理并返回上一级链。 + err = e.ipt.Append("nat", "MISTIO_OUTPUT", "-d", "127.0.0.1/32", "-j", "RETURN") + if err != nil { + return err + } + // 剩余的数据包重定向到MISTIO_REDIRECT链 + err = e.ipt.Append("nat", "MISTIO_OUTPUT", "-j", "MISTIO_REDIRECT") + return nil +} diff --git a/pkg/microservice/pilot/pilot.go b/pkg/microservice/pilot/pilot.go new file mode 100644 index 0000000..6d45a96 --- /dev/null +++ b/pkg/microservice/pilot/pilot.go @@ -0,0 +1,554 @@ +package pilot + +import ( + "fmt" + "log" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "minikubernetes/pkg/utils" + "strconv" + "strings" + "time" +) + +type Pilot interface { + Start() error + SyncLoop() error +} + +type pilot struct { + client kubeclient.Client +} + +func NewPilot(apiServerIP string) Pilot { + manager := &pilot{} + manager.client = kubeclient.NewClient(apiServerIP) + return manager +} + +func (p *pilot) Start() error { + err := p.SyncLoop() + if err != nil { + return err + } + return nil +} + +func (p *pilot) SyncLoop() error { + for { + err := p.syncLoopIteration() + if err != nil { + fmt.Println(err) + } + time.Sleep(5 * time.Second) + } + +} + +func (p *pilot) doRollingUpdate(rollingUpdates []*v1.RollingUpdate, serviceMap map[string]*v1.Service, pods []*v1.Pod) { + for _, ru := range rollingUpdates { + if ru.Status.Phase != v1.RollingUpdatePending { + continue + } + serviceFullName := ru.Namespace + "_" + ru.Spec.ServiceRef + if service, ok := serviceMap[serviceFullName]; ok { + var servicePods []*v1.Pod + for _, pod := range pods { + if !p.isSelectorMatched(pod, service) { + continue + } + if pod.Status.Phase != v1.PodRunning { + continue + } + if pod.Status.PodIP == "" { + continue + } + servicePods = append(servicePods, pod) + } + if len(servicePods) != 0 { + go p.rollingUpdateWorkerLoop(ru, service, servicePods) + } + } + } +} + +func (p *pilot) rollingUpdateWorkerLoop(rollingUpdate *v1.RollingUpdate, service *v1.Service, pods []*v1.Pod) { + updateNum := len(pods) - int(rollingUpdate.Spec.MinimumAlive) + if updateNum <= 0 { + log.Printf("cannot update now") + return + } + rollingUpdate.Status.Phase = v1.RollingUpdateRunning + err := p.client.UpdateRollingUpdateStatus(rollingUpdate.Name, rollingUpdate.Namespace, &rollingUpdate.Status) + if err != nil { + log.Printf("update rolling update status failed: %v", err) + return + } + subsetBlocked := &v1.Subset{ + TypeMeta: v1.TypeMeta{ + Kind: "Subset", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: service.Name + "-blocked", + Namespace: rollingUpdate.Namespace, + }, + Spec: v1.SubsetSpec{ + Pods: nil, + }, + } + subsetAvailable := &v1.Subset{ + TypeMeta: v1.TypeMeta{ + Kind: "Subset", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: service.Name + "-available", + Namespace: rollingUpdate.Namespace, + }, + Spec: v1.SubsetSpec{ + Pods: nil, + }, + } + err = p.client.AddSubset(subsetBlocked) + if err != nil { + log.Printf("add blocked subset failed: %v", err) + return + } + err = p.client.AddSubset(subsetAvailable) + if err != nil { + log.Printf("add available subset failed: %v", err) + return + } + vs := &v1.VirtualService{ + TypeMeta: v1.TypeMeta{ + Kind: "VirtualService", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: service.Name + "-rolling-update", + Namespace: rollingUpdate.Namespace, + }, + Spec: v1.VirtualServiceSpec{ + ServiceRef: rollingUpdate.Spec.ServiceRef, + Port: rollingUpdate.Spec.Port, + Subsets: []v1.VirtualServiceSubset{ + { + Name: service.Name + "-available", + Weight: &[]int32{1}[0], + }, + { + Name: service.Name + "-blocked", + Weight: &[]int32{0}[0], + }, + }, + }, + } + err = p.client.AddVirtualService(vs) + if err != nil { + log.Printf("add virtual service failed: %v", err) + return + } + for i := 0; i < len(pods); i += updateNum { + log.Printf("rolling update: %d-%d", i, i+updateNum) + j := i + updateNum + if j > len(pods) { + j = len(pods) + } + blockedPodNames := make([]string, 0) + availablePodNames := make([]string, 0) + for k := 0; k < len(pods); k++ { + if k >= i && k < j { + log.Printf("blocked pod: %s", pods[k].Name) + blockedPodNames = append(blockedPodNames, pods[k].Name) + } else { + log.Printf("available pod: %s", pods[k].Name) + availablePodNames = append(availablePodNames, pods[k].Name) + } + } + subsetBlocked.Spec.Pods = blockedPodNames + subsetAvailable.Spec.Pods = availablePodNames + err = p.client.AddSubset(subsetBlocked) + if err != nil { + log.Printf("update blocked subset failed: %v", err) + } + err = p.client.AddSubset(subsetAvailable) + if err != nil { + log.Printf("update available subset failed: %v", err) + } + for _, blockedPodName := range blockedPodNames { + err = p.client.DeletePod(blockedPodName, rollingUpdate.Namespace) + if err != nil { + log.Printf("delete pod failed: %v", err) + } + } + time.Sleep(time.Duration(rollingUpdate.Spec.Interval) * time.Second / 2) + blockedPods := pods[i:j] + for _, blockedPod := range blockedPods { + newPod := v1.Pod{ + TypeMeta: blockedPod.TypeMeta, + ObjectMeta: blockedPod.ObjectMeta, + Spec: rollingUpdate.Spec.NewPodSpec, + } + err = p.client.AddPod(newPod) + if err != nil { + log.Printf("create pod failed: %v", err) + } + } + time.Sleep(time.Duration(rollingUpdate.Spec.Interval) * time.Second / 2) + } + rollingUpdate.Status.Phase = v1.RollingUpdateFinished + err = p.client.UpdateRollingUpdateStatus(rollingUpdate.Name, rollingUpdate.Namespace, &rollingUpdate.Status) + if err != nil { + log.Printf("update rolling update status failed: %v", err) + } + err = p.client.DeleteVirtualService(vs) + if err != nil { + log.Printf("delete virtual service failed: %v", err) + } + err = p.client.DeleteSubset(subsetBlocked) + if err != nil { + log.Printf("delete blocked subset failed: %v", err) + } + err = p.client.DeleteSubset(subsetAvailable) + if err != nil { + log.Printf("delete available subset failed: %v", err) + } +} + +func (p *pilot) syncLoopIteration() error { + var sideCarMap v1.SidecarMapping = make(v1.SidecarMapping) + pods, err := p.client.GetAllPods() + if err != nil { + return err + } + services, err := p.client.GetAllServices() + if err != nil { + return err + } + virtualServices, err := p.client.GetAllVirtualServices() + if err != nil { + return err + } + rollingUpdates, err := p.client.GetAllRollingUpdates() + if err != nil { + return err + } + + servicesMap := p.makeFullMap(services) + endpointMap := p.getEndpoints(pods, services) + markedMap := p.makeMarkedMap(services) + + p.doRollingUpdate(rollingUpdates, servicesMap, pods) + + for _, vs := range virtualServices { + serviceNamespace := vs.Namespace + serviceName := vs.Spec.ServiceRef + serviceFullName := serviceNamespace + "_" + serviceName + serviceUID := servicesMap[serviceFullName].UID + serviceAndEndpointIncludingPodName := endpointMap[serviceUID] + + markedMap[p.getFullPort(serviceUID, vs.Spec.Port)] = true + var sidecarEndpoints []v1.SidecarEndpoints + var targetPort int32 + for _, port := range serviceAndEndpointIncludingPodName.Service.Spec.Ports { + if port.Port == vs.Spec.Port { + targetPort = port.TargetPort + break + } + } + if vs.Spec.Subsets[0].URL != nil { + + for _, sbs := range vs.Spec.Subsets { + + subset, err := p.client.GetSubsetByName(sbs.Name, vs.Namespace) + if err != nil { + return err + } + + url := sbs.URL + for _, podName := range subset.Spec.Pods { + var singleEndPoints []v1.SingleEndpoint + endpoints := serviceAndEndpointIncludingPodName.EndpointsMapWithPodName[podName] + for _, endpoint := range endpoints.Ports { + if endpoint.Port != targetPort { + continue + } + singleEndPoints = append(singleEndPoints, v1.SingleEndpoint{ + IP: endpoints.IP, + TargetPort: endpoint.Port, + }) + } + sidecarEndpoints = append(sidecarEndpoints, v1.SidecarEndpoints{ + URL: url, + Weight: nil, + Endpoints: singleEndPoints, + }) + } + + } + + } else { + var VsToSbsWeight []int32 + var SbsToPodNum []int32 + for _, sbs := range vs.Spec.Subsets { + + subset, err := p.client.GetSubsetByName(sbs.Name, vs.Namespace) + if err != nil { + return err + } + + VsToSbsWeight = append(VsToSbsWeight, *sbs.Weight) + SbsToPodNum = append(SbsToPodNum, int32(len(subset.Spec.Pods))) + } + + RealWeight := p.calculate(VsToSbsWeight, SbsToPodNum) + + for i, sbs := range vs.Spec.Subsets { + + subset, err := p.client.GetSubsetByName(sbs.Name, vs.Namespace) + if err != nil { + return err + } + + for _, podName := range subset.Spec.Pods { + endpoints := serviceAndEndpointIncludingPodName.EndpointsMapWithPodName[podName] + var singleEndPoints []v1.SingleEndpoint + for _, endpoint := range endpoints.Ports { + if endpoint.Port != targetPort { + continue + } + singleEndPoints = append(singleEndPoints, v1.SingleEndpoint{ + IP: endpoints.IP, + TargetPort: endpoint.Port, + }) + } + sidecarEndpoints = append(sidecarEndpoints, v1.SidecarEndpoints{ + URL: nil, + Weight: &(RealWeight[i]), + Endpoints: singleEndPoints, + }) + } + + } + + } + + stringIP := serviceAndEndpointIncludingPodName.Service.Spec.ClusterIP + fmt.Sprintf(":%d", vs.Spec.Port) + sideCarMap[stringIP] = sidecarEndpoints + + } + + var waitedServiceUidAndPorts map[v1.UID][]int32 = make(map[v1.UID][]int32) + for serviceUIDAndPort, isDone := range markedMap { + if !isDone { + //println(serviceUIDAndPort) + uid, port := p.splitFullPort(serviceUIDAndPort) + waitedServiceUidAndPorts[uid] = append(waitedServiceUidAndPorts[uid], port) + //println(uid) + //println(fmt.Sprintf(":%d", port)) + } + } + newMap := make(map[string]*v1.ServiceAndEndpoints) + for uid, portSs := range waitedServiceUidAndPorts { + service := endpointMap[uid].Service + for _, port_i := range portSs { + endpointsWithPodName := make(map[string]v1.Endpoint) + var port v1.ServicePort + for _, portt := range service.Spec.Ports { + if port_i == portt.Port { + port = portt + } + } + + for _, pod := range pods { + if !p.isSelectorMatched(pod, service) { + continue + } + if pod.Status.Phase != v1.PodRunning { + continue + } + if pod.Status.PodIP == "" { + continue + } + + var ports []v1.EndpointPort + + SearchLoop: + for _, container := range pod.Spec.Containers { + for _, containerPort := range container.Ports { + if containerPort.ContainerPort == port.TargetPort && containerPort.Protocol == port.Protocol { + ports = append(ports, v1.EndpointPort{ + Port: port.TargetPort, + Protocol: port.Protocol, + }) + break SearchLoop + } + } + } + + endpointsWithPodName[pod.Name] = v1.Endpoint{ + IP: pod.Status.PodIP, + Ports: ports, + } + } + newMap[p.getFullPort(service.UID, port_i)] = &v1.ServiceAndEndpoints{ + Service: service, + EndpointsMapWithPodName: endpointsWithPodName, + } + } + } + + defaultWeight := int32(1) + + for port, serviceAndEndpoints := range newMap { + _, port_i := p.splitFullPort(port) + stringIP := serviceAndEndpoints.Service.Spec.ClusterIP + fmt.Sprintf(":%d", port_i) + var sidecarEndpoints []v1.SidecarEndpoints + for _, endpoint := range serviceAndEndpoints.EndpointsMapWithPodName { + var singleEndPoints []v1.SingleEndpoint + for _, endPointPort := range endpoint.Ports { + singleEndPoints = append(singleEndPoints, v1.SingleEndpoint{ + IP: endpoint.IP, + TargetPort: endPointPort.Port, + }) + } + if singleEndPoints == nil { + continue + } + sidecarEndpoints = append(sidecarEndpoints, v1.SidecarEndpoints{ + Weight: &defaultWeight, + URL: nil, + Endpoints: singleEndPoints, + }) + } + sideCarMap[stringIP] = sidecarEndpoints + + } + err = p.client.AddSidecarMapping(sideCarMap) + if err != nil { + return err + } + + //for stringIP, sidecarEndpoints := range sideCarMap { + // fmt.Println("IP IS :" + stringIP) + // for i, sidecarEndpoint := range sidecarEndpoints { + // fmt.Println(i) + // fmt.Println(*sidecarEndpoint.Weight) + // for _, endpoint := range sidecarEndpoint.Endpoints { + // fmt.Println(endpoint.IP + ": " + fmt.Sprintf("%d", endpoint.TargetPort)) + // } + // } + // fmt.Println(" ") + //} + //return nil + + return nil +} + +func (p *pilot) getEndpoints(pods []*v1.Pod, services []*v1.Service) map[v1.UID]*v1.ServiceAndEndpoints { + newMap := make(map[v1.UID]*v1.ServiceAndEndpoints) + for _, service := range services { + endpointsWithPodName := make(map[string]v1.Endpoint) + for _, pod := range pods { + if !p.isSelectorMatched(pod, service) { + continue + } + if pod.Status.Phase != v1.PodRunning { + continue + } + if pod.Status.PodIP == "" { + continue + } + + var ports []v1.EndpointPort + for _, port := range service.Spec.Ports { + SearchLoop: + for _, container := range pod.Spec.Containers { + for _, containerPort := range container.Ports { + if containerPort.ContainerPort == port.TargetPort && containerPort.Protocol == port.Protocol { + ports = append(ports, v1.EndpointPort{ + Port: port.TargetPort, + Protocol: port.Protocol, + }) + break SearchLoop + } + } + } + } + + endpointsWithPodName[pod.Name] = v1.Endpoint{ + IP: pod.Status.PodIP, + Ports: ports, + } + } + newMap[service.ObjectMeta.UID] = &v1.ServiceAndEndpoints{ + Service: service, + EndpointsMapWithPodName: endpointsWithPodName, + } + } + return newMap +} + +func (p *pilot) isSelectorMatched(pod *v1.Pod, svc *v1.Service) bool { + for key, value := range svc.Spec.Selector { + if label, ok := pod.Labels[key]; !ok || label != value { + return false + } + } + return true +} + +func (p *pilot) makeFullMap(services []*v1.Service) map[string]*v1.Service { + maps := make(map[string]*v1.Service) + + for _, service := range services { + maps[utils.GetObjectFullName(&service.ObjectMeta)] = service + } + + return maps +} + +func (p *pilot) makeUIDMap(services []*v1.Service) map[v1.UID]*v1.Service { + uidMap := make(map[v1.UID]*v1.Service) + for _, service := range services { + uidMap[service.ObjectMeta.UID] = service + } + return uidMap +} + +func (p *pilot) makeMarkedMap(services []*v1.Service) map[string]bool { + maps := make(map[string]bool) + for _, service := range services { + for _, port := range service.Spec.Ports { + maps[p.getFullPort(service.UID, port.Port)] = false + } + } + return maps +} + +func (p *pilot) calculate(VsToSbsWeight []int32, SbsToPodNum []int32) []int32 { + var result []int32 + for i, VTSW := range VsToSbsWeight { + temp := VTSW + for j, STPW := range SbsToPodNum { + if i != j && STPW != 0 { + temp = temp * STPW + } + } + result = append(result, temp) + } + return result +} + +func (p *pilot) getFullPort(uid v1.UID, port int32) string { + return string(uid) + "_" + fmt.Sprintf("%d", port) +} + +func (p *pilot) splitFullPort(fp string) (uid v1.UID, port int32) { + parts := strings.Split(fp, "_") + uid = v1.UID(parts[0]) + portInt, _ := strconv.Atoi(parts[1]) + port = int32(portInt) + return uid, port +} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go new file mode 100644 index 0000000..61d03c0 --- /dev/null +++ b/pkg/scheduler/scheduler.go @@ -0,0 +1,203 @@ +package scheduler + +import ( + "log" + "math/rand" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeclient" + "time" +) + +const ( + Round_Policy = "Round_Policy" + Random_Policy = "Random_Policy" + NodeAffinity_Policy = "NodeAffinity_Policy" +) + +type Scheduler interface { + Run() +} + +type scheduler struct { + client kubeclient.Client + roundRobinCount int + policy string +} + +func NewScheduler(apiServerIP string, policy string) Scheduler { + manager := &scheduler{} + manager.client = kubeclient.NewClient(apiServerIP) + manager.roundRobinCount = 0 + manager.policy = policy + return manager +} + +func (sc *scheduler) Run() { + for { + err := sc.syncLoop() + if err != nil { + log.Println("sync loop err:", err) + break + } + time.Sleep(1000 * time.Millisecond) + } + +} + +func (sc *scheduler) syncLoop() error { + pods, err := sc.informPods() + if err != nil { + return err + } + for _, pod := range pods { + policy := sc.policy + + nodes, err := sc.informNodes() + if err != nil { + return err + } + reqs, limit, err := sc.getResources(pod.Spec.Containers) + if err != nil { + return err + } + //filteredNodes, err := sc.filterNodes(reqs, limit, nodes) + //if err != nil { + // return err + //} + switch policy { + case Round_Policy: + SelectedNodes, err := sc.nodesInRoundPolicy(reqs, limit, nodes) + + err = sc.addPodToNode(SelectedNodes, pod) + if err != nil { + return err + } + break + case Random_Policy: + SelectedNodes, err := sc.nodesInRandomPolicy(reqs, limit, nodes) + if err != nil { + return err + } + err = sc.addPodToNode(SelectedNodes, pod) + if err != nil { + return err + } + break + case NodeAffinity_Policy: + SelectedNodes, err := sc.nodesInNodeAffinityPolicy(reqs, limit, nodes, pod) + if err != nil { + return err + } + err = sc.addPodToNode(SelectedNodes, pod) + if err != nil { + return err + } + break + } + + } + return nil +} + +func (sc *scheduler) nodesInNodeAffinityPolicy(rqs []v1.ResourceList, lim []v1.ResourceList, nodes []*v1.Node, pod *v1.Pod) (*v1.Node, error) { + if pod.Labels == nil { + return sc.nodesInRandomPolicy(rqs, lim, nodes) + } + var selectedNode *v1.Node + for _, node := range nodes { + if node.Labels == nil { + continue + } + allLabelsMatch := true + for key, value := range pod.Labels { + if v, ok := node.Labels[key]; !ok || v != value { + allLabelsMatch = false + break + } + } + if allLabelsMatch { + selectedNode = node + break + } + } + if selectedNode == nil { + return sc.nodesInRandomPolicy(rqs, lim, nodes) + } + return selectedNode, nil +} + +func (sc *scheduler) informPods() ([]*v1.Pod, error) { + pods, err := sc.client.GetAllUnscheduledPods() + if err != nil { + return nil, err + } + //var res []*v1.Pod + //for _, pod := range pods { + // if pod.Status.Phase == v1.PodPending { + // res = append(res, pod) + // } + //} + return pods, nil +} + +func (sc *scheduler) informNodes() ([]*v1.Node, error) { + + nodes, err := sc.client.GetAllNodes() + if err != nil { + return nil, err + } + return nodes, nil +} + +func (sc *scheduler) getResources(cts []v1.Container) ([]v1.ResourceList, []v1.ResourceList, error) { + var reqs []v1.ResourceList + var limit []v1.ResourceList + for _, ct := range cts { + reqs = append(reqs, ct.Resources.Requests) + limit = append(limit, ct.Resources.Limits) + } + return reqs, limit, nil +} + +func (sc *scheduler) filterNodes(rqs []v1.ResourceList, lim []v1.ResourceList, nodes []*v1.Node) ([]*v1.Node, error) { + var res []*v1.Node + for _, node := range nodes { + //TODO:进行过滤 + res = append(res, node) + } + return res, nil +} + +func (sc *scheduler) nodesInRoundPolicy(rqs []v1.ResourceList, lim []v1.ResourceList, nodes []*v1.Node) (*v1.Node, error) { + if nodes == nil { + return nil, nil + } + lens := len(nodes) + if lens == 0 { + return nil, nil + } + num := sc.roundRobinCount % lens + sc.roundRobinCount++ + return nodes[num], nil +} + +func (sc *scheduler) nodesInRandomPolicy(rqs []v1.ResourceList, lim []v1.ResourceList, nodes []*v1.Node) (*v1.Node, error) { + if nodes == nil { + return nil, nil + } + lens := len(nodes) + rand.Seed(time.Now().UnixNano()) + num := rand.Intn(lens) + return nodes[num], nil +} + +func (sc *scheduler) addPodToNode(node *v1.Node, pod *v1.Pod) error { + if node == nil { + return nil + } + err := sc.client.AddPodToNode(*pod, *node) + if err != nil { + return err + } + return nil +} diff --git a/pkg/utils/naming.go b/pkg/utils/naming.go new file mode 100644 index 0000000..57bf4f4 --- /dev/null +++ b/pkg/utils/naming.go @@ -0,0 +1,11 @@ +package utils + +import v1 "minikubernetes/pkg/api/v1" + +func GetObjectFullName(meta *v1.ObjectMeta) string { + namespace := meta.Namespace + if namespace == "" { + namespace = "default" + } + return namespace + "_" + meta.Name +} diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100644 index 0000000..f1c2444 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +go build ./cmd/kube-apiserver/apiserver.go +go build ./cmd/kubelet/kubelet.go +go build ./cmd/kubeproxy/kubeproxy.go +go build ./cmd/scheduler/scheduler.go +go build ./cmd/kube-controller-manager/controllerManager.go +go build ./cmd/pilot/pilot.go +go build ./cmd/kubectl/kubectl.go diff --git a/scripts/clean.sh b/scripts/clean.sh new file mode 100644 index 0000000..baaaf14 --- /dev/null +++ b/scripts/clean.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +./kubectl delete pod ratings-v1 +./kubectl delete pod reviews-v1 +./kubectl delete pod reviews-v2 +./kubectl delete pod reviews-v3 +./kubectl delete pod details-v1 +./kubectl delete pod productpage + +./kubectl delete service ratings +./kubectl delete service reviews +./kubectl delete service details +./kubectl delete service productpage diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100644 index 0000000..f810411 --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +./kubectl apply -f ./test/kubectl/microservice/details.yaml +./kubectl apply -f ./test/kubectl/microservice/productpage.yaml +./kubectl apply -f ./test/kubectl/microservice/ratings.yaml +./kubectl apply -f ./test/kubectl/microservice/reviews-v1.yaml +./kubectl apply -f ./test/kubectl/microservice/reviews-v2.yaml +./kubectl apply -f ./test/kubectl/microservice/reviews-v3.yaml + +./kubectl apply -f ./test/kubectl/microservice/details-svc.yaml +./kubectl apply -f ./test/kubectl/microservice/productpage-svc.yaml +./kubectl apply -f ./test/kubectl/microservice/ratings-svc.yaml +./kubectl apply -f ./test/kubectl/microservice/reviews-svc.yaml diff --git a/test/controllermanager_test/main.go b/test/controllermanager_test/main.go new file mode 100644 index 0000000..dc3eebd --- /dev/null +++ b/test/controllermanager_test/main.go @@ -0,0 +1,11 @@ +// 简单测试 +package main + +import ( + "minikubernetes/pkg/controller" +) + +func main() { + cm := controller.NewControllerManager("192.168.1.10") + cm.Run() +} diff --git a/test/ipvs_test/main.go b/test/ipvs_test/main.go new file mode 100644 index 0000000..edcfab9 --- /dev/null +++ b/test/ipvs_test/main.go @@ -0,0 +1,33 @@ +package main + +import ( + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubeproxy/route" +) + +func main() { + ipvs, err := route.NewIPVS() + if err != nil { + panic(err) + } + err = ipvs.Init() + if err != nil { + panic(err) + } + err = ipvs.AddVirtual("100.100.100.10", 30080, v1.ProtocolTCP, true) + if err != nil { + panic(err) + } + err = ipvs.AddRoute("100.100.100.10", 30080, "10.32.0.1", 80, v1.ProtocolTCP) + if err != nil { + panic(err) + } + err = ipvs.DeleteRoute("100.100.100.10", 30080, "10.32.0.1", 80, v1.ProtocolTCP) + if err != nil { + panic(err) + } + err = ipvs.DeleteVirtual("100.100.100.10", 30080, v1.ProtocolTCP, true) + if err != nil { + panic(err) + } +} diff --git a/test/kubectl/dns/dns.yaml b/test/kubectl/dns/dns.yaml new file mode 100644 index 0000000..8383bd5 --- /dev/null +++ b/test/kubectl/dns/dns.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: DNS +metadata: + name: my-dns +spec: + rules: + - host: myservice.com + paths: + - path: /nginx + backend: + service: + name: nginx-service + port: 800 + - path: /python + backend: + service: + name: python-service + port: 900 diff --git a/test/kubectl/dns/pod1.yaml b/test/kubectl/dns/pod1.yaml new file mode 100644 index 0000000..7422ff7 --- /dev/null +++ b/test/kubectl/dns/pod1.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: python-pod + namespace: default + labels: + app: python +spec: + containers: + - name: c1 + image: python:latest + command: ["python", "-m", "http.server", "1024"] + ports: + - containerPort: 1024 + protocol: tcp diff --git a/test/kubectl/dns/pod2.yaml b/test/kubectl/dns/pod2.yaml new file mode 100644 index 0000000..a3ceefb --- /dev/null +++ b/test/kubectl/dns/pod2.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx-pod + namespace: default + labels: + app: nginx +spec: + containers: + - name: c1 + image: nginx:latest + ports: + - containerPort: 80 + protocol: tcp diff --git a/test/kubectl/dns/service1.yaml b/test/kubectl/dns/service1.yaml new file mode 100644 index 0000000..5369fb1 --- /dev/null +++ b/test/kubectl/dns/service1.yaml @@ -0,0 +1,12 @@ +kind: Service +apiVersion: v1 +metadata: + name: nginx-service +spec: + type: NodePort + ports: + - port: 800 + targetPort: 80 + nodePort: 30080 + selector: + app: nginx diff --git a/test/kubectl/dns/service2.yaml b/test/kubectl/dns/service2.yaml new file mode 100644 index 0000000..f9c770b --- /dev/null +++ b/test/kubectl/dns/service2.yaml @@ -0,0 +1,12 @@ +kind: Service +apiVersion: v1 +metadata: + name: python-service +spec: + type: NodePort + ports: + - port: 900 + targetPort: 1024 + nodePort: 30090 + selector: + app: python diff --git a/test/kubectl/microservice/details-svc.yaml b/test/kubectl/microservice/details-svc.yaml new file mode 100644 index 0000000..bba6e61 --- /dev/null +++ b/test/kubectl/microservice/details-svc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: details + namespace: default +spec: + ports: + - port: 9080 + targetPort: 9080 + selector: + app: details diff --git a/test/kubectl/microservice/details.yaml b/test/kubectl/microservice/details.yaml new file mode 100644 index 0000000..04eb1b2 --- /dev/null +++ b/test/kubectl/microservice/details.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: details-v1 + namespace: default + labels: + app: details +spec: + containers: + - name: productpage + image: istio/examples-bookinfo-details-v1:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.2 + securityContext: + runAsUser: 1337 + initContainers: + - name: envoy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/test/kubectl/microservice/productpage-svc.yaml b/test/kubectl/microservice/productpage-svc.yaml new file mode 100644 index 0000000..e284eab --- /dev/null +++ b/test/kubectl/microservice/productpage-svc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: productpage + namespace: default +spec: + ports: + - port: 9080 + targetPort: 9080 + selector: + app: productpage diff --git a/test/kubectl/microservice/productpage.yaml b/test/kubectl/microservice/productpage.yaml new file mode 100644 index 0000000..4cd2c12 --- /dev/null +++ b/test/kubectl/microservice/productpage.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: productpage + namespace: default + labels: + app: productpage +spec: + containers: + - name: productpage + image: istio/examples-bookinfo-productpage-v1:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.2 + securityContext: + runAsUser: 1337 + initContainers: + - name: envoy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/test/kubectl/microservice/ratings-svc.yaml b/test/kubectl/microservice/ratings-svc.yaml new file mode 100644 index 0000000..2aa090b --- /dev/null +++ b/test/kubectl/microservice/ratings-svc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: ratings + namespace: default +spec: + ports: + - port: 9080 + targetPort: 9080 + selector: + app: ratings diff --git a/test/kubectl/microservice/ratings.yaml b/test/kubectl/microservice/ratings.yaml new file mode 100644 index 0000000..e00668a --- /dev/null +++ b/test/kubectl/microservice/ratings.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: ratings-v1 + namespace: default + labels: + app: ratings +spec: + containers: + - name: productpage + image: istio/examples-bookinfo-ratings-v1:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.2 + securityContext: + runAsUser: 1337 + initContainers: + - name: envoy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/test/kubectl/microservice/reviews-svc.yaml b/test/kubectl/microservice/reviews-svc.yaml new file mode 100644 index 0000000..ae82acf --- /dev/null +++ b/test/kubectl/microservice/reviews-svc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: reviews + namespace: default +spec: + ports: + - port: 9080 + targetPort: 9080 + selector: + app: reviews diff --git a/test/kubectl/microservice/reviews-v1.yaml b/test/kubectl/microservice/reviews-v1.yaml new file mode 100644 index 0000000..300cb10 --- /dev/null +++ b/test/kubectl/microservice/reviews-v1.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: reviews-v1 + namespace: default + labels: + app: reviews +spec: + containers: + - name: productpage + image: istio/examples-bookinfo-reviews-v1:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.2 + securityContext: + runAsUser: 1337 + initContainers: + - name: envoy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/test/kubectl/microservice/reviews-v2.yaml b/test/kubectl/microservice/reviews-v2.yaml new file mode 100644 index 0000000..b8b5201 --- /dev/null +++ b/test/kubectl/microservice/reviews-v2.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: reviews-v2 + namespace: default + labels: + app: reviews +spec: + containers: + - name: productpage + image: istio/examples-bookinfo-reviews-v2:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.2 + securityContext: + runAsUser: 1337 + initContainers: + - name: envoy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/test/kubectl/microservice/reviews-v3.yaml b/test/kubectl/microservice/reviews-v3.yaml new file mode 100644 index 0000000..b43414d --- /dev/null +++ b/test/kubectl/microservice/reviews-v3.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: reviews-v3 + namespace: default + labels: + app: reviews +spec: + containers: + - name: productpage + image: istio/examples-bookinfo-reviews-v3:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.2 + securityContext: + runAsUser: 1337 + initContainers: + - name: envoy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/test/kubectl/microservice/rollingupdate.yaml b/test/kubectl/microservice/rollingupdate.yaml new file mode 100644 index 0000000..b0b8207 --- /dev/null +++ b/test/kubectl/microservice/rollingupdate.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: RollingUpdate +metadata: + name: my-ru +spec: + serviceRef: reviews + port: 9080 + minimumAlive: 1 + interval: 15 + newPodSpec: + containers: + - name: reviews + image: istio/examples-bookinfo-reviews-v3:1.19.1 + ports: + - containerPort: 9080 + protocol: tcp + - name: envoy-proxy + image: sjtuzc/envoy:1.2 + securityContext: + runAsUser: 1337 + initContainers: + - name: proxy-init + image: sjtuzc/envoy-init:latest + securityContext: + privileged: true diff --git a/test/kubectl/microservice/subset-v1.yaml b/test/kubectl/microservice/subset-v1.yaml new file mode 100644 index 0000000..fb38aaa --- /dev/null +++ b/test/kubectl/microservice/subset-v1.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Subset +metadata: + name: subset-v1 + namespace: default +spec: + pods: ["reviews-v1"] diff --git a/test/kubectl/microservice/subset-v2.yaml b/test/kubectl/microservice/subset-v2.yaml new file mode 100644 index 0000000..5df5c96 --- /dev/null +++ b/test/kubectl/microservice/subset-v2.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Subset +metadata: + name: subset-v2 + namespace: default +spec: + pods: ["reviews-v2", "reviews-v3"] diff --git a/test/kubectl/microservice/vs_med.yaml b/test/kubectl/microservice/vs_med.yaml new file mode 100644 index 0000000..7b4e194 --- /dev/null +++ b/test/kubectl/microservice/vs_med.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: VirtualService +metadata: + name: my-vs + namespace: default +spec: + serviceRef: reviews + port: 9080 + subsets: + - name: subset-v1 + weight: 1 + - name: subset-v2 + weight: 1 diff --git a/test/kubectl/microservice/vs_new.yaml b/test/kubectl/microservice/vs_new.yaml new file mode 100644 index 0000000..a1dcfa2 --- /dev/null +++ b/test/kubectl/microservice/vs_new.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: VirtualService +metadata: + name: my-vs + namespace: default +spec: + serviceRef: reviews + port: 9080 + subsets: + - name: subset-v1 + weight: 0 + - name: subset-v2 + weight: 1 diff --git a/test/kubectl/microservice/vs_old.yaml b/test/kubectl/microservice/vs_old.yaml new file mode 100644 index 0000000..c4613a7 --- /dev/null +++ b/test/kubectl/microservice/vs_old.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: VirtualService +metadata: + name: my-vs + namespace: default +spec: + serviceRef: reviews + port: 9080 + subsets: + - name: subset-v1 + weight: 1 + - name: subset-v2 + weight: 0 diff --git a/test/kubectl/microservice/vs_url.yaml b/test/kubectl/microservice/vs_url.yaml new file mode 100644 index 0000000..e1b1e3d --- /dev/null +++ b/test/kubectl/microservice/vs_url.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: VirtualService +metadata: + name: my-vs + namespace: default +spec: + serviceRef: reviews + port: 9080 + subsets: + - name: subset-v1 + url: ^/reviews/1$ + - name: subset-v2 + url: ^/reviews/[2-3]$ diff --git a/test/kubectl/pod/1.yaml b/test/kubectl/pod/1.yaml new file mode 100644 index 0000000..c3d8a5a --- /dev/null +++ b/test/kubectl/pod/1.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + namespace: default +spec: + containers: + - name: c1 + image: python:latest + command: ["python", "-m", "http.server", "8000"] + ports: + - containerPort: 8000 + protocol: tcp + volumeMounts: + - name: volume1 + mountPath: /mnt/v1 + - name: c2 + image: python:latest + command: [ "python", "-m", "http.server", "8001" ] + volumeMounts: + - name: volume1 + mountPath: /mnt/v2 + ports: + - containerPort: 8001 + protocol: tcp + volumes: + - name: volume1 + emptyDir: {} diff --git a/test/kubectl/pod/2.yaml b/test/kubectl/pod/2.yaml new file mode 100644 index 0000000..1b62c94 --- /dev/null +++ b/test/kubectl/pod/2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod-1 + namespace: default + labels: + app: nginx +spec: + containers: + - name: c1 + image: alpine:latest diff --git a/test/kubectl/pod/3.yaml b/test/kubectl/pod/3.yaml new file mode 100644 index 0000000..d1b93f4 --- /dev/null +++ b/test/kubectl/pod/3.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod-2 + namespace: default + labels: + app: nginx +spec: + containers: + - name: c1 + image: alpine:latest diff --git a/test/kubectl/replicaset/1.yaml b/test/kubectl/replicaset/1.yaml new file mode 100644 index 0000000..59f728d --- /dev/null +++ b/test/kubectl/replicaset/1.yaml @@ -0,0 +1,24 @@ +kind: ReplicaSet +apiVersion: v1 +metadata: + name: nginx-replicaset + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + name: nginx-pod + namespace: default + labels: + app: nginx + spec: + containers: + - name: container + image: python:latest + command: ["python", "-m", "http.server", "1024"] + ports: + - containerPort: 1024 + protocol: tcp diff --git a/test/kubectl/service/pod1.yaml b/test/kubectl/service/pod1.yaml new file mode 100644 index 0000000..c5f147c --- /dev/null +++ b/test/kubectl/service/pod1.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx-pod-1 + namespace: default + labels: + app: nginx +spec: + containers: + - name: c1 + image: python:latest + command: ["python", "-m", "http.server", "1024"] + ports: + - containerPort: 1024 + protocol: tcp diff --git a/test/kubectl/service/pod2.yaml b/test/kubectl/service/pod2.yaml new file mode 100644 index 0000000..d5624db --- /dev/null +++ b/test/kubectl/service/pod2.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx-pod-2 + namespace: default + labels: + app: nginx +spec: + containers: + - name: c1 + image: python:latest + command: ["python", "-m", "http.server", "1024"] + ports: + - containerPort: 1024 + protocol: tcp diff --git a/test/kubectl/service/service1.yaml b/test/kubectl/service/service1.yaml new file mode 100644 index 0000000..921fec1 --- /dev/null +++ b/test/kubectl/service/service1.yaml @@ -0,0 +1,12 @@ +kind: Service +apiVersion: v1 +metadata: + name: nginx-service +spec: + type: NodePort + ports: + - port: 800 + targetPort: 1024 + nodePort: 30080 + selector: + app: nginx diff --git a/test/kubectl/test_pod.yaml b/test/kubectl/test_pod.yaml new file mode 100644 index 0000000..d077863 --- /dev/null +++ b/test/kubectl/test_pod.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + namespace: default +spec: + containers: + - name: c1 + image: alpine:latest + volumes: + - name: v1 + emptyDir: {} diff --git a/test/network_test/main.go b/test/network_test/main.go new file mode 100644 index 0000000..4802520 --- /dev/null +++ b/test/network_test/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "fmt" + "minikubernetes/pkg/kubelet/network" +) + +func main() { + IP1, err := network.Attach("9adb28a544e9") + if err != nil { + panic(err) + } + IP2, err := network.Attach("ed970ca4c658") + if err != nil { + panic(err) + } + fmt.Printf("IP1: %s, IP2: %s\n", IP1, IP2) + err = network.Detach("9adb28a544e9") + if err != nil { + panic(err) + } + err = network.Detach("ed970ca4c658") + if err != nil { + panic(err) + } +} diff --git a/test/node_config/node0.yaml b/test/node_config/node0.yaml new file mode 100644 index 0000000..700864b --- /dev/null +++ b/test/node_config/node0.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Node +metadata: + name: node-0 + labels: + app: nginx diff --git a/test/node_config/node1.yaml b/test/node_config/node1.yaml new file mode 100644 index 0000000..1b031a2 --- /dev/null +++ b/test/node_config/node1.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Node +metadata: + name: node-1 diff --git a/test/pilot_test/main.go b/test/pilot_test/main.go new file mode 100644 index 0000000..5633bb7 --- /dev/null +++ b/test/pilot_test/main.go @@ -0,0 +1,166 @@ +package main + +import ( + "fmt" + "minikubernetes/pkg/microservice/pilot" +) + +func main() { + rsm := pilot.NewPilot("192.168.1.10") + err := rsm.Start() + if err != nil { + fmt.Println(err) + } +} + +//weight1 := int32(1) +//weight2 := int32(2) +////url1 := "/a" +////url2 := "/b" +//var services []*v1.Service +//var pods []*v1.Pod +//var virtualServices []*v1.VirtualService +//var subsets []*v1.Subset +//pods = append(pods, &v1.Pod{ +//ObjectMeta: v1.ObjectMeta{ +//Name: "pod-test0", +//Labels: map[string]string{ +//"app": "tz", +//}, +//}, +//Spec: v1.PodSpec{ +//Containers: []v1.Container{ +//{ +//Ports: []v1.ContainerPort{ +//{ +//ContainerPort: 8080, +//Protocol: "TCP", +//}, +//}, +//}, +//}, +//}, +//Status: v1.PodStatus{ +//Phase: "Running", +//PodIP: "111.11.11.11", +//}, +//}) +//pods = append(pods, &v1.Pod{ +//ObjectMeta: v1.ObjectMeta{ +//Name: "pod-test1", +//Labels: map[string]string{ +//"app": "tz", +//}, +//}, +//Spec: v1.PodSpec{ +//Containers: []v1.Container{ +//{ +//Ports: []v1.ContainerPort{ +//{ +//ContainerPort: 8090, +//Protocol: "TCP", +//}, +//}, +//}, +//}, +//}, +//Status: v1.PodStatus{ +//Phase: "Running", +//PodIP: "111.11.11.12", +//}, +//}) +//pods = append(pods, &v1.Pod{ +//ObjectMeta: v1.ObjectMeta{ +//Name: "pod-test2", +//Labels: map[string]string{ +//"app": "tz", +//}, +//}, +//Spec: v1.PodSpec{ +//Containers: []v1.Container{ +//{ +//Ports: []v1.ContainerPort{ +//{ +//ContainerPort: 8080, +//Protocol: "TCP", +//}, +//}, +//}, +//}, +//}, +//Status: v1.PodStatus{ +//Phase: "Running", +//PodIP: "111.11.11.13", +//}, +//}) +//services = append(services, &v1.Service{ +//ObjectMeta: v1.ObjectMeta{ +//UID: v1.UID("1234"), +//Name: "service-test0", +//Namespace: "default", +//}, +//Spec: v1.ServiceSpec{ +//Ports: []v1.ServicePort{{ +//Name: "port0", +//Protocol: "TCP", +//Port: 80, +//TargetPort: 8080, +//}, { +//Name: "port1", +//Protocol: "TCP", +//Port: 90, +//TargetPort: 8090, +//}}, +//Selector: map[string]string{ +//"app": "tz", +//}, +//ClusterIP: "100.2.2.3", +//}, +//}) +//pods = append(pods, &v1.Pod{}) +//virtualServices = append(virtualServices, &v1.VirtualService{ +//TypeMeta: v1.TypeMeta{}, +//ObjectMeta: v1.ObjectMeta{ +//Namespace: "default", +//}, +//Spec: v1.VirtualServiceSpec{ +//ServiceRef: "service-test0", +//Port: 80, +//Subsets: []v1.VirtualServiceSubset{ +//v1.VirtualServiceSubset{ +//URL: nil, +//Weight: &weight1, +//Name: "subset-test0", +//}, +//v1.VirtualServiceSubset{ +//URL: nil, +//Weight: &weight2, +//Name: "subset-test1", +//}, +//}, +//}, +//}) +//subsets = append(subsets, &v1.Subset{ +//TypeMeta: v1.TypeMeta{}, +//ObjectMeta: v1.ObjectMeta{ +//Namespace: "default", +//Name: "subset-test0", +//}, +//Spec: v1.SubsetSpec{ +//Pods: []string{ +//"pod-test0", +//}, +//}, +//}) +//subsets = append(subsets, &v1.Subset{ +//TypeMeta: v1.TypeMeta{}, +//ObjectMeta: v1.ObjectMeta{ +//Namespace: "default", +//Name: "subset-test1", +//}, +//Spec: v1.SubsetSpec{ +//Pods: []string{ +//"pod-test2", +//}, +//}, +//}) diff --git a/test/replicaset_test/main.go b/test/replicaset_test/main.go new file mode 100644 index 0000000..17198fc --- /dev/null +++ b/test/replicaset_test/main.go @@ -0,0 +1,14 @@ +package main + +import ( + "fmt" + "minikubernetes/pkg/controller/replicaset" +) + +func main() { + rsm := replicaset.NewReplicasetManager("192.168.1.10") + err := rsm.RunRSC() + if err != nil { + fmt.Println(err) + } +} diff --git a/test/runtime_test/main.go b/test/runtime_test/main.go new file mode 100644 index 0000000..27f6415 --- /dev/null +++ b/test/runtime_test/main.go @@ -0,0 +1,29 @@ +package main + +import ( + v1 "minikubernetes/pkg/api/v1" + rt "minikubernetes/pkg/kubelet/runtime" +) + +func main() { + conport := &v1.ContainerPort{ContainerPort: 8080} + conport2 := &v1.ContainerPort{ContainerPort: 8090} + contain := &v1.Container{ + Image: "alpine:latest", + Command: []string{}, + Ports: []v1.ContainerPort{*conport}, + } + contain2 := &v1.Container{ + Image: "alpine:latest", + Command: []string{}, + Ports: []v1.ContainerPort{*conport2}, + } + podSpec := &v1.PodSpec{ + Containers: []v1.Container{*contain, *contain2}, + } + pod := &v1.Pod{ + Spec: *podSpec, + } + rm := rt.NewRuntimeManager("1.1.1.1") + rt.RuntimeManager.AddPod(rm, pod) +} diff --git a/test/volume_test/volume.go b/test/volume_test/volume.go new file mode 100644 index 0000000..6115656 --- /dev/null +++ b/test/volume_test/volume.go @@ -0,0 +1,87 @@ +package main + +import ( + "github.com/google/uuid" + v1 "minikubernetes/pkg/api/v1" + "minikubernetes/pkg/kubelet/runtime" +) + +func main() { + pod1 := &v1.Pod{ + TypeMeta: v1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + UID: v1.UID(uuid.New().String()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume1", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "container1", + Image: "alpine:latest", + VolumeMounts: []v1.VolumeMount{ + { + Name: "volume1", + MountPath: "/tmp", + }, + }, + }, + }, + }, + } + pod2 := &v1.Pod{ + TypeMeta: v1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "pod2", + Namespace: "default", + UID: v1.UID(uuid.New().String()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume2", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/root/zc/mini_k8s", + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "container1", + Image: "alpine:latest", + VolumeMounts: []v1.VolumeMount{ + { + Name: "volume2", + MountPath: "/tmp", + }, + }, + }, + }, + }, + } + runtimeManager := runtime.NewRuntimeManager("1.1.1.1") + err := runtimeManager.AddPod(pod1) + if err != nil { + panic(err) + } + err = runtimeManager.AddPod(pod2) + if err != nil { + panic(err) + } +} diff --git a/tools/timestamp/timestamp.go b/tools/timestamp/timestamp.go new file mode 100644 index 0000000..2a3f1bb --- /dev/null +++ b/tools/timestamp/timestamp.go @@ -0,0 +1,13 @@ +package timestamp + +import ( + "time" +) + +func NewTimestamp() time.Time { + return time.Now() +} + +func FormatUTCZ(t time.Time) string { + return t.Format("2006-01-02T15:04:05.99999999Z") +} diff --git a/tools/uuid/uuid.go b/tools/uuid/uuid.go new file mode 100644 index 0000000..030b03b --- /dev/null +++ b/tools/uuid/uuid.go @@ -0,0 +1,13 @@ +package uuid + +import ( + "fmt" + + uuid "github.com/google/uuid" +) + +func NewUUID() string { + id := uuid.New() + str := fmt.Sprint(id) + return str +} diff --git a/vendor/README.md b/vendor/README.md deleted file mode 100644 index 8eaf64f..0000000 --- a/vendor/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# `/vendor` - -Application dependencies (managed manually or by your favorite dependency management tool or the built-in [`modules`](https://github.com/golang/go/wiki/Modules) feature). - -Don't commit your application dependencies if you are building a library. - -Note that since [`1.13`](https://golang.org/doc/go1.13#modules) Go also enabled the module proxy feature (using `https://proxy.golang.org` as their module proxy server by default). Read more about it [`here`](https://blog.golang.org/module-mirror-launch) to see if it fits all of your requirements and constraints. If it does, then you won't need the 'vendor' directory at all.