diff --git a/cmd/controller/controller.go b/cmd/controller/controller.go index d3190ad744b2..a062a4bd4543 100644 --- a/cmd/controller/controller.go +++ b/cmd/controller/controller.go @@ -263,11 +263,13 @@ func (c *command) start(ctx context.Context) error { } nodeComponents.Add(ctx, leaderElector) - nodeComponents.Add(ctx, &applier.Manager{ - K0sVars: c.K0sVars, - KubeClientFactory: adminClientFactory, - LeaderElector: leaderElector, - }) + if !slices.Contains(c.DisableComponents, constant.ApplierManagerComponentName) { + nodeComponents.Add(ctx, &applier.Manager{ + K0sVars: c.K0sVars, + KubeClientFactory: adminClientFactory, + LeaderElector: leaderElector, + }) + } if !c.SingleNode && !slices.Contains(c.DisableComponents, constant.ControlAPIComponentName) { nodeComponents.Add(ctx, &controller.K0SControlAPI{ diff --git a/docs/README.md b/docs/README.md index 0f218615ed83..dcabe1276746 100644 --- a/docs/README.md +++ b/docs/README.md @@ -31,7 +31,7 @@ Before that mishap we had 4776 stargazers, making k0s one of the most popular Ku - Scalable from a single node to large, [high-available](high-availability.md) clusters - Supports custom [Container Network Interface (CNI)](networking.md) plugins (Kube-Router is the default, Calico is offered as a preconfigured alternative) - Supports custom [Container Runtime Interface (CRI)](runtime.md) plugins (containerd is the default) -- Supports all Kubernetes storage options with [Container Storage Interface (CSI)](storage.md), includes [OpenEBS host-local storage provider](storage.md#bundled-openebs-storage) +- Supports all Kubernetes storage options with [Container Storage Interface (CSI)](storage.md), includes [OpenEBS host-local storage provider](examples/openebs.md) - Supports a variety of [datastore backends](configuration.md#specstorage): etcd (default for multi-node clusters), SQLite (default for single node clusters), MySQL, and PostgreSQL - Supports x86-64, ARM64 and ARMv7 - Includes [Konnectivity service](networking.md#controller-worker-communication), CoreDNS and Metrics Server diff --git a/docs/configuration.md b/docs/configuration.md index cdde10b77bbf..459103fad74a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -486,7 +486,7 @@ In the runtime the image names are calculated as `my.own.repo/calico/kube-contro `spec.extensions.storage` controls bundled storage provider. The default value `external` makes no storage deployed. -To enable [embedded host-local storage provider](storage.md#bundled-openebs-storage) use the following configuration: +To enable [embedded host-local storage provider](examples/openebs.md) use the following configuration: ```yaml spec: @@ -522,7 +522,7 @@ they need to fulfill their need for the control plane. Disabling the system components happens through a command line flag for the controller process: ```text ---disable-components strings disable components (valid items: autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config) +--disable-components strings disable components (valid items: applier-manager,autopilot,control-api,coredns,csr-approver,endpoint-reconciler,helm,konnectivity-server,kube-controller-manager,kube-proxy,kube-scheduler,metrics-server,network-provider,node-role,system-rbac,worker-config) ``` **Note:** As of k0s 1.26, the kubelet-config component has been replaced by the diff --git a/docs/examples/openebs.md b/docs/examples/openebs.md new file mode 100644 index 000000000000..e71513d0f8e6 --- /dev/null +++ b/docs/examples/openebs.md @@ -0,0 +1,184 @@ +# OpenEBS + +This tutorial covers the installation of OpenEBS as a Helm extension, both from +scratch and how to migrate it from a storage extension. + +## Installing OpenEBS from scratch + +**WARNING**: Do not configure OpenEBS as both a storage extension and a Helm +extension. It's considered an invalid configuration and k0s will entirely ignore +the configuration to prevent accidental upgrades or downgrades. The chart +objects defined in the API will still behave normally. + +OpenEBS can be installed as a helm chart by adding it as an extension to your configuration: + +```yaml + extensions: + helm: + repositories: + - name: openebs-internal + url: https://openebs.github.io/charts + charts: + - name: openebs + chartname: openebs-internal/openebs + version: "3.9.0" + namespace: openebs + order: 1 + values: | + localprovisioner: + hostpathClass: + enabled: true + isDefaultClass: false +``` + +If you want OpenEBS to be your default storage class, set `isDefaultClass` to `true`. + +## Migrating bundled OpenEBS to helm extension + +The bundled OpenEBS extension is already a helm extension installed as a +`chart.helm.k0sproject.io`. For this reason, all we have to do is to remove the +manifests and to clean up the object. However, this must be done in a specific order +to prevent data loss. + +**WARNING**: Not following the steps in the precise order presented by the +documentation may provoke data loss. + +The first step to perform the migration is to disable the `applier-manager` +component on all controllers. For each controller, restart the controller +with the flag `--disable-components=applier-manager`. If you already had this flag, +set it to `--disable-components=,applier-manager`. + +Once the `applier-manager` is disabled in every running controller, you need to modify +the configuration to use `external_storage` instead of `openebs_local_storage`. + +If you are using [dynamic configuration](../dynamic-configuration.md), you can +change it with this command: + +```shell +kubectl patch clusterconfig -n kube-system k0s --patch '{"spec":{"extensions":{"storage":{"type":"external_storage"}}}}' --type=merge +``` + +If you are using a static configuration file, replace `spec.extensions.storage.type` +from `openebs_local_storage` to `external_storage` in all control plane nodes and +restart all the control plane nodes one by one. + +When the configuration is set to `external_storage` and the servers are +restarted, you must manage the it as a chart object in the API: + +```shell +kubectl get chart -n kube-system k0s-addon-chart-openebs -o yaml +``` + +First, remove the labels and annotations related to the stack applier: + +```shell +k0s kc annotate -n kube-system chart k0s-addon-chart-openebs k0s.k0sproject.io/stack-checksum- +k0s kc label -n kube-system chart k0s-addon-chart-openebs k0s.k0sproject.io/stack- +``` + +After the annotations and labels are removed, remove the manifest file **on each +controller**. This file is located in +`/manifests/helm/_helm_extension_openebs.yaml`, which in +most installations defaults to +`/var/lib/k0s/manifests/helm/0_helm_extension_openebs.yaml`. + +**WARNING**: Not removing the old manifest file from all controllers may cause +the manifest to be reapplied, reverting your changes and potentially casuing +data loss. + +Finally, we want to re-enable the `applier-manager` and restart all controllers +without the `--disable-components=applier-manager` flag. + +Once the migration is coplete, you'll be able to update the OpenEBS chart. +Let's take v3.9.0 as an example: + +```shell +kubectl patch chart -n kube-system k0s-addon-chart-openebs --patch '{"spec":{"version":"3.9.0"}}' --type=merge +``` + +## Usage + +Once installed, the cluster will have two storage classes available for you to use: + +```shell +k0s kubectl get storageclass +``` + +```shell +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +openebs-device openebs.io/local Delete WaitForFirstConsumer false 24s +openebs-hostpath openebs.io/local Delete WaitForFirstConsumer false 24s +``` + +The `openebs-hostpath` is the storage class that maps to `/var/openebs/local`. + +The `openebs-device` is not configured and could be configured by [manifest deployer](../manifests.md) accordingly to the [OpenEBS documentation](https://docs.openebs.io/) + +### Example + +Use following manifests as an example of pod with mounted volume: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nginx-pvc + namespace: default +spec: + accessModes: + - ReadWriteOnce + storageClassName: openebs-hostpath + resources: + requests: + storage: 5Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + namespace: default + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + strategy: + type: Recreate + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx + volumeMounts: + - name: persistent-storage + mountPath: /var/lib/nginx + volumes: + - name: persistent-storage + persistentVolumeClaim: + claimName: nginx-pvc +``` + +```shell +k0s kubectl apply -f nginx.yaml +``` + +```shell +persistentvolumeclaim/nginx-pvc created +deployment.apps/nginx created +bash-5.1# k0s kc get pods +NAME READY STATUS RESTARTS AGE +nginx-d95bcb7db-gzsdt 1/1 Running 0 30s +``` + +```shell +k0s kubectl get pv +``` + +```shell +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-9a7fae2d-eb03-42c3-aaa9-1a807d5df12f 5Gi RWO Delete Bound default/nginx-pvc openebs-hostpath 30s +``` diff --git a/docs/storage.md b/docs/storage.md index 907f8e2ed83a..16a445098aef 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -1,14 +1,46 @@ # Storage -k0s supports any volume provider that implements the [CSI specification](https://github.com/container-storage-interface/spec). For convenience, k0s comes bundled in with support for [OpenEBS local path provisioner](https://openebs.io/docs/concepts/localpv). +## CSI + +k0s supports a wide range of different storage options by utilizing Container Storage Interface (CSI). All Kubernetes storage solutions are supported and users can easily select the storage that fits best for their needs. + +When the storage solution implements CSI, kubernetes can communicate with the storage to create and configure persistent volumes. This makes it easy to dynamically provision the requested volumes. It also expands the supported storage solutions from the previous generation, in-tree volume plugins. More information about the CSI concept is described on the [Kubernetes Blog](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/). + +![k0s storage](img/k0s_storage.png) + +### Installing 3rd party storage solutions + +Follow your storage driver's installation instructions. Note that by default the Kubelet installed by k0s uses a slightly different path for its working directory (`/varlib/k0s/kubelet` instead of `/var/lib/kubelet`). Consult the CSI driver's configuration documentation on how to customize this path. The actual path can differ if you defined the flag `--data-dir`. + +## Example storage solutions + +Different Kubernetes storage solutions are explained in the [official Kubernetes storage documentation](https://kubernetes.io/docs/concepts/storage/volumes/). All of them can be used with k0s. Here are some popular ones: + +- Rook-Ceph (Open Source) +- MinIO (Open Source) +- Gluster (Open Source) +- Longhorn (Open Source) +- Amazon EBS +- Google Persistent Disk +- Azure Disk +- Portworx + +If you are looking for a fault-tolerant storage with data replication, you can find a k0s tutorial for configuring Ceph storage with Rook [in here](examples/rook-ceph.md). + +## Bundled OpenEBS storage (deprecated) + +Bundled OpenEBS was deprecated in favor of running it [as a helm extension](./examples/openebs.md), +this documentation is maintained as a reference for existing installations. -The choise of which CSI provider to use depends heavily on the use case and infrastructure you're running on and the use case you have. +This was done for three reasons: -## Bundled OpenEBS storage +1. By installing it as a helm extension, users have more control and flexibility without adding complexity. +2. It allows users to choose the OpenEBS version independent of their k0s version. +3. It makes the k0s configuration more consistent. -K0s comes out with bundled OpenEBS installation which can be enabled by using [configuration file](./configuration.md) +For new installations or to migrate existing installations, please refer to the [OpenEBS extension page](./examples/openebs.md). -Use following configuration as an example: +The OpenEBS extension is enabled by setting [`spec.extensions.storage.type`](configuration.md#specextensionsstorage) to``openebs_local_storage`: ```yaml spec: @@ -101,30 +133,3 @@ k0s kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE pvc-9a7fae2d-eb03-42c3-aaa9-1a807d5df12f 5Gi RWO Delete Bound default/nginx-pvc openebs-hostpath 30s ``` - -## CSI - -k0s supports a wide range of different storage options by utilizing Container Storage Interface (CSI). All Kubernetes storage solutions are supported and users can easily select the storage that fits best for their needs. - -When the storage solution implements Container Storage Interface (CSI), containers can communicate with the storage for creation and configuration of persistent volumes. This makes it easy to dynamically provision the requested volumes. It also expands the supported storage solutions from the previous generation, in-tree volume plugins. More information about the CSI concept is described on the [Kubernetes Blog](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/). - -![k0s storage](img/k0s_storage.png) - -### Installing 3rd party storage solutions - -Follow your storage driver's installation instructions. Note that the Kubelet installed by k0s uses a slightly different path for its working directory (`/varlib/k0s/kubelet` instead of `/var/lib/kubelet`). Consult the CSI driver's configuration documentation on how to customize this path. - -## Example storage solutions - -Different Kubernetes storage solutions are explained in the [official Kubernetes storage documentation](https://kubernetes.io/docs/concepts/storage/volumes/). All of them can be used with k0s. Here are some popular ones: - -- Rook-Ceph (Open Source) -- MinIO (Open Source) -- Gluster (Open Source) -- Longhorn (Open Source) -- Amazon EBS -- Google Persistent Disk -- Azure Disk -- Portworx - -If you are looking for a fault-tolerant storage with data replication, you can find a k0s tutorial for configuring Ceph storage with Rook [in here](examples/rook-ceph.md). diff --git a/inttest/Makefile b/inttest/Makefile index 721dd835af90..84876aba777b 100644 --- a/inttest/Makefile +++ b/inttest/Makefile @@ -111,6 +111,8 @@ check-network-conformance-calico: TEST_PACKAGE=network-conformance check-nllb: TIMEOUT=15m +check-openebs: TIMEOUT=7m + .PHONY: $(smoketests) include Makefile.variables diff --git a/inttest/Makefile.variables b/inttest/Makefile.variables index ac5c579d84b1..5764c17a033c 100644 --- a/inttest/Makefile.variables +++ b/inttest/Makefile.variables @@ -52,6 +52,7 @@ smoketests := \ check-noderole \ check-noderole-no-taints \ check-noderole-single \ + check-openebs\ check-psp \ check-singlenode \ check-statussocket \ diff --git a/inttest/calico/calico_test.go b/inttest/calico/calico_test.go index 503edff899c8..1112cc6565ce 100644 --- a/inttest/calico/calico_test.go +++ b/inttest/calico/calico_test.go @@ -67,7 +67,7 @@ func (s *CalicoSuite) TestK0sGetsUp() { s.AssertSomeKubeSystemPods(kc) s.T().Log("waiting to see calico pods ready") - s.NoError(common.WaitForDaemonSet(s.Context(), kc, "calico-node"), "calico did not start") + s.NoError(common.WaitForDaemonSet(s.Context(), kc, "calico-node", "kube-system"), "calico did not start") s.NoError(common.WaitForPodLogs(s.Context(), kc, "kube-system")) createdTargetPod, err := kc.CoreV1().Pods("default").Create(s.Context(), &corev1.Pod{ diff --git a/inttest/cli/cli_test.go b/inttest/cli/cli_test.go index ed90a607a2b4..7ff368443658 100644 --- a/inttest/cli/cli_test.go +++ b/inttest/cli/cli_test.go @@ -108,7 +108,7 @@ func (s *CliSuite) TestK0sCliKubectlAndResetCommand() { s.AssertSomeKubeSystemPods(kc) // Wait till we see all pods running, otherwise we get into weird timing issues and high probability of leaked containerd shim processes - require.NoError(common.WaitForDaemonSet(s.Context(), kc, "kube-proxy")) + require.NoError(common.WaitForDaemonSet(s.Context(), kc, "kube-proxy", "kube-system")) require.NoError(common.WaitForKubeRouterReady(s.Context(), kc)) require.NoError(common.WaitForDeployment(s.Context(), kc, "coredns", "kube-system")) diff --git a/inttest/common/util.go b/inttest/common/util.go index 26a17b1b2f29..39ff3a84a4bc 100644 --- a/inttest/common/util.go +++ b/inttest/common/util.go @@ -60,7 +60,7 @@ func Poll(ctx context.Context, condition wait.ConditionWithContextFunc) error { // WaitForKubeRouterReady waits to see all kube-router pods healthy as long as // the context isn't canceled. func WaitForKubeRouterReady(ctx context.Context, kc *kubernetes.Clientset) error { - return WaitForDaemonSet(ctx, kc, "kube-router") + return WaitForDaemonSet(ctx, kc, "kube-router", "kube-system") } // WaitForCoreDNSReady waits to see all coredns pods healthy as long as the context isn't canceled. @@ -146,10 +146,10 @@ func WaitForNodeReadyStatus(ctx context.Context, clients kubernetes.Interface, n }) } -// WaitForDaemonset waits for the DaemonlSet with the given name to have +// WaitForDaemonSet waits for the DaemonlSet with the given name to have // as many ready replicas as defined in the spec. -func WaitForDaemonSet(ctx context.Context, kc *kubernetes.Clientset, name string) error { - return watch.DaemonSets(kc.AppsV1().DaemonSets("kube-system")). +func WaitForDaemonSet(ctx context.Context, kc *kubernetes.Clientset, name string, namespace string) error { + return watch.DaemonSets(kc.AppsV1().DaemonSets(namespace)). WithObjectName(name). WithErrorCallback(RetryWatchErrors(logfFrom(ctx))). Until(ctx, func(ds *appsv1.DaemonSet) (bool, error) { diff --git a/inttest/customports/customports_test.go b/inttest/customports/customports_test.go index 60cc476de28f..4493a573d73d 100644 --- a/inttest/customports/customports_test.go +++ b/inttest/customports/customports_test.go @@ -131,7 +131,7 @@ func (s *customPortsSuite) TestControllerJoinsWithCustomPort() { s.T().Log("waiting to see CNI pods ready") s.Require().NoError(common.WaitForKubeRouterReady(s.Context(), kc), "calico did not start") s.T().Log("waiting to see konnectivity-agent pods ready") - s.Require().NoError(common.WaitForDaemonSet(s.Context(), kc, "konnectivity-agent"), "konnectivity-agent did not start") + s.Require().NoError(common.WaitForDaemonSet(s.Context(), kc, "konnectivity-agent", "kube-system"), "konnectivity-agent did not start") s.T().Log("waiting to get logs from pods") s.Require().NoError(common.WaitForPodLogs(s.Context(), kc, "kube-system")) diff --git a/inttest/network-conformance/network_test.go b/inttest/network-conformance/network_test.go index 04f38b81a8ed..9a08e2538de6 100644 --- a/inttest/network-conformance/network_test.go +++ b/inttest/network-conformance/network_test.go @@ -70,7 +70,7 @@ func (s *networkSuite) TestK0sGetsUp() { daemonSetName = "kube-router" } s.T().Log("waiting to see CNI pods ready") - s.NoError(common.WaitForDaemonSet(s.Context(), kc, daemonSetName), fmt.Sprintf("%s did not start", daemonSetName)) + s.NoError(common.WaitForDaemonSet(s.Context(), kc, daemonSetName, "kube-system"), fmt.Sprintf("%s did not start", daemonSetName)) restConfig, err := s.GetKubeConfig("controller0") s.Require().NoError(err) diff --git a/inttest/nllb/nllb_test.go b/inttest/nllb/nllb_test.go index 536792c29628..1c1a8db5bd91 100644 --- a/inttest/nllb/nllb_test.go +++ b/inttest/nllb/nllb_test.go @@ -195,7 +195,7 @@ func (s *suite) TestNodeLocalLoadBalancing() { _, err = clients.AppsV1().DaemonSets("kube-system").Create(ctx, &dummyDaemons, metav1.CreateOptions{}) s.Require().NoError(err) - s.NoError(common.WaitForDaemonSet(s.Context(), clients, name)) + s.NoError(common.WaitForDaemonSet(s.Context(), clients, name, "kube-system")) s.T().Logf("Dummy DaemonSet %s is ready", name) }) @@ -303,7 +303,7 @@ func (s *suite) checkClusterReadiness(ctx context.Context, clients *kubernetes.C for _, daemonSet := range []string{"kube-proxy", "konnectivity-agent"} { daemonSet := daemonSet eg.Go(func() error { - if err := common.WaitForDaemonSet(ctx, clients, daemonSet); err != nil { + if err := common.WaitForDaemonSet(ctx, clients, daemonSet, "kube-system"); err != nil { return fmt.Errorf("%s is not ready: %w", daemonSet, err) } s.T().Log(daemonSet, "is ready") diff --git a/inttest/openebs/openebs_test.go b/inttest/openebs/openebs_test.go new file mode 100644 index 000000000000..cc1333c6a989 --- /dev/null +++ b/inttest/openebs/openebs_test.go @@ -0,0 +1,211 @@ +/* +Copyright 2024 k0s authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openebs + +import ( + "context" + "testing" + "time" + + "github.com/k0sproject/bootloose/pkg/config" + "github.com/k0sproject/k0s/inttest/common" + "github.com/k0sproject/k0s/pkg/kubernetes/watch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/kubernetes" + + helmv1beta1 "github.com/k0sproject/k0s/pkg/apis/helm/v1beta1" + helmclient "github.com/k0sproject/k0s/pkg/client/clientset/typed/helm/v1beta1" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type OpenEBSSuite struct { + common.BootlooseSuite +} + +func (s *OpenEBSSuite) TestK0sGetsUp() { + ctx := s.Context() + + s.T().Log("Start k0s with both storage and helm extensions enabled") + s.PutFile(s.ControllerNode(0), "/tmp/k0s.yaml", k0sConfigWithBoth) + s.Require().NoError(s.InitController(0, "--config=/tmp/k0s.yaml", "--disable-components=konnectivity-server,metrics-server")) + s.Require().NoError(s.RunWorkers()) + + kc, err := s.KubeClient(s.ControllerNode(0), "") + s.Require().NoError(err) + + s.Require().NoError(s.WaitForNodeReady(s.WorkerNode(0), kc)) + + // When both storage and helm are enabled, there should be no action. + // Unfortunately to check that, the best we can do is seeing that it's not + // created after a grace period + s.T().Log("Waiting 30 additional seconds of grace period to see if charts are created") + s.sleep(ctx, 30*time.Second) + + s.T().Log("Checking that the chart isn't created") + hc, err := s.HelmClient(s.ControllerNode(0), "") + s.Require().NoError(err) + _, err = hc.Charts("kube-system").Get(ctx, openEBSChart, metav1.GetOptions{}) + s.Require().True(errors.IsNotFound(err), "Chart was created when it shouldn't have been") + + // Verify Test as a storage extension + s.T().Log("Retarting k0s with only storage extension enabled") + s.Require().NoError(s.StopController(s.ControllerNode(0))) + s.PutFile(s.ControllerNode(0), "/tmp/k0s.yaml", k0sConfigWithStorage) + s.Require().NoError(s.StartController(s.ControllerNode(0))) + s.Require().NoError(s.WaitForNodeReady(s.WorkerNode(0), kc)) + + s.T().Log("Checking that the chart is created and ready") + s.Require().NoError(s.waitForChartUpdated(ctx, "3.3.0")) + s.waitForOpenEBSReady(ctx, kc) + + // Migrate to helm chart + s.T().Log("Restarting k0s without applier-manager and without extension") + s.Require().NoError(s.StopController(s.ControllerNode(0))) + s.PutFile(s.ControllerNode(0), "/tmp/k0s.yaml", k0sConfigNoExtension) + s.Require().NoError(s.InitController(0, "--config=/tmp/k0s.yaml", "--disable-components=konnectivity-server,metrics-server,applier-manager")) + s.Require().NoError(s.WaitForNodeReady(s.WorkerNode(0), kc)) + + s.T().Log("Removing Label and annotation") + c, err := hc.Charts("kube-system").Get(ctx, openEBSChart, metav1.GetOptions{}) + s.Require().NoError(err, "Error getting OpenEBS chart after removing the storage extension") + delete(c.Annotations, "k0s.k0sproject.io/stack-checksum") + delete(c.Labels, "k0s.k0sproject.io/stack") + _, err = hc.Charts("kube-system").Update(ctx, c, metav1.UpdateOptions{}) + s.Require().NoError(err, "Error removing stack applier information in OpenEBS chart") + + s.T().Log("Removing the manifest") + ssh, err := s.SSH(s.Context(), s.ControllerNode(0)) + s.Require().NoError(err) + defer ssh.Disconnect() + s.Require().NoError(ssh.Exec(ctx, "rm -f /var/lib/k0s/manifests/helm/0_helm_extension_openebs.yaml", common.SSHStreams{})) + + s.T().Log("Upgrading to 3.9.0") + c, err = hc.Charts("kube-system").Get(ctx, openEBSChart, metav1.GetOptions{}) + s.Require().NoError(err, "Error getting OpenEBS chart after removing the storage extension") + c.Spec.Version = "3.9.0" + _, err = hc.Charts("kube-system").Update(ctx, c, metav1.UpdateOptions{}) + s.Require().NoError(err, "Error upgrading OpenEBS chart") + + s.T().Log("Checking that the chart is upgrades to 3.9.0 and becomes ready") + s.Require().NoError(s.waitForChartUpdated(ctx, "3.9.0")) + s.waitForOpenEBSReady(ctx, kc) + + // Test that applier doesn't revert it back to 3.9.0 + s.T().Log("Restarting the controller with manifest applier") + s.Require().NoError(s.InitController(0, "--config=/tmp/k0s.yaml", "--disable-components=konnectivity-server,metrics-server")) + s.Require().NoError(s.WaitForNodeReady(s.WorkerNode(0), kc)) + + s.T().Log("Waiting 30 additional seconds of grace period to see if charts is deleted") + s.sleep(ctx, 30*time.Second) + + s.T().Log("Checking that the chart is still to 3.9.0 and ready") + s.Require().NoError(s.waitForChartUpdated(ctx, "3.9.0")) + s.waitForOpenEBSReady(ctx, kc) +} + +func TestOpenEBSSuite(t *testing.T) { + s := OpenEBSSuite{ + common.BootlooseSuite{ + ControllerCount: 1, + WorkerCount: 1, + ExtraVolumes: []config.Volume{{ + Type: "bind", + Source: "/run/udev", + Destination: "/run/udev", + ReadOnly: false, + }}, + }, + } + suite.Run(t, &s) +} + +func (s *OpenEBSSuite) waitForChartUpdated(ctx context.Context, version string) error { + hc, err := s.HelmClient(s.ControllerNode(0), "") + s.Require().NoError(err) + + return watch.Charts(hc.Charts("kube-system")). + WithObjectName(openEBSChart). + WithErrorCallback(common.RetryWatchErrors(s.T().Logf)). + Until(ctx, func(chart *helmv1beta1.Chart) (done bool, err error) { + // We don't need to actually deploy helm in this test + // we're just validation that the spec is correct + return chart.Spec.Version == version && + chart.Status.AppVersion == version && + chart.Status.Version == version, nil + }) + +} + +func (s *OpenEBSSuite) waitForOpenEBSReady(ctx context.Context, kc *kubernetes.Clientset) { + s.T().Log("Waiting for openEBS to be ready") + s.Require().NoError(common.WaitForDeployment(ctx, kc, "openebs-localpv-provisioner", "openebs")) + s.Require().NoError(common.WaitForDeployment(ctx, kc, "openebs-ndm-operator", "openebs")) + s.Require().NoError(common.WaitForDaemonSet(ctx, kc, "openebs-ndm", "openebs")) +} + +// HelmClient returns HelmV1beta1Client by loading the admin access config from given node +func (s *OpenEBSSuite) HelmClient(node string, k0sKubeconfigArgs ...string) (*helmclient.HelmV1beta1Client, error) { + cfg, err := s.GetKubeConfig(node, k0sKubeconfigArgs...) + if err != nil { + return nil, err + } + return helmclient.NewForConfig(cfg) +} + +func (s *OpenEBSSuite) sleep(ctx context.Context, d time.Duration) { + select { + case <-ctx.Done(): + s.Require().NoError(ctx.Err()) + case <-time.After(30 * time.Second): + } +} + +const k0sConfigWithBoth = ` +spec: + extensions: + storage: + type: openebs_local_storage + helm: + repositories: + - name: openebs-internal + url: https://openebs.github.io/charts + charts: + - name: openebs + chartname: openebs-internal/openebs + version: "3.9.0" + namespace: openebs + order: 1 + values: | + localprovisioner: + hostpathClass: + enabled: true + isDefaultClass: false +` + +const k0sConfigWithStorage = ` +spec: + extensions: + storage: + type: openebs_local_storage +` + +const k0sConfigNoExtension = ` +spec: + extensions: {} +` +const openEBSChart = "k0s-addon-chart-openebs" diff --git a/mkdocs.yml b/mkdocs.yml index b9a64756f9a6..29ec1f8be191 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -62,6 +62,7 @@ nav: - Ambassador API Gateway: examples/ambassador-ingress.md - Ceph Storage with Rook: examples/rook-ceph.md - GitOps with Flux: examples/gitops-flux.md + - OpenEBS storage: examples/openebs.md - Troubleshooting: - FAQ: FAQ.md - Common Pitfalls: troubleshooting.md diff --git a/pkg/apis/k0s/v1beta1/extensions.go b/pkg/apis/k0s/v1beta1/extensions.go index 5212d9f420ff..0ef163466fe4 100644 --- a/pkg/apis/k0s/v1beta1/extensions.go +++ b/pkg/apis/k0s/v1beta1/extensions.go @@ -28,6 +28,7 @@ var _ Validateable = (*ClusterExtensions)(nil) // ClusterExtensions specifies cluster extensions type ClusterExtensions struct { + //+kubebuilder:deprecatedversion:warning="storage is deprecated and will be ignored in 1.30. https://docs.k0sproject.io/stable/examples/openebs". Storage *StorageExtension `json:"storage"` Helm *HelmExtensions `json:"helm"` } diff --git a/pkg/applier/manager.go b/pkg/applier/manager.go index 5192990b6f50..4774b75fdf44 100644 --- a/pkg/applier/manager.go +++ b/pkg/applier/manager.go @@ -61,7 +61,7 @@ func (m *Manager) Init(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to create manifest bundle dir %s: %w", m.K0sVars.ManifestsDir, err) } - m.log = logrus.WithField("component", "applier-manager") + m.log = logrus.WithField("component", constant.ApplierManagerComponentName) m.stacks = make(map[string]stack) m.bundlePath = m.K0sVars.ManifestsDir @@ -97,7 +97,7 @@ func (m *Manager) Stop() error { } func (m *Manager) runWatchers(ctx context.Context) error { - log := logrus.WithField("component", "applier-manager") + log := logrus.WithField("component", constant.ApplierManagerComponentName) watcher, err := fsnotify.NewWatcher() if err != nil { diff --git a/pkg/component/controller/extensions_controller.go b/pkg/component/controller/extensions_controller.go index 41bd99f3987d..74726f7e31bb 100644 --- a/pkg/component/controller/extensions_controller.go +++ b/pkg/component/controller/extensions_controller.go @@ -86,15 +86,9 @@ func (ec *ExtensionsController) Reconcile(ctx context.Context, clusterConfig *k0 ec.L.Info("Extensions reconciliation started") defer ec.L.Info("Extensions reconciliation finished") - helmSettings := clusterConfig.Spec.Extensions.Helm - var err error - switch clusterConfig.Spec.Extensions.Storage.Type { - case k0sAPI.OpenEBSLocal: - helmSettings, err = addOpenEBSHelmExtension(helmSettings, clusterConfig.Spec.Extensions.Storage) - if err != nil { - ec.L.WithError(err).Error("Can't add openebs helm extension") - } - default: + helmSettings, err := ec.configureStorage(clusterConfig) + if err != nil { + return fmt.Errorf("cannot configure storage: %w", err) } if err := ec.reconcileHelmExtensions(helmSettings); err != nil { @@ -104,6 +98,24 @@ func (ec *ExtensionsController) Reconcile(ctx context.Context, clusterConfig *k0 return nil } +func (ec *ExtensionsController) configureStorage(clusterConfig *k0sAPI.ClusterConfig) (*k0sAPI.HelmExtensions, error) { + helmSettings := clusterConfig.Spec.Extensions.Helm + if clusterConfig.Spec.Extensions.Storage.Type != k0sAPI.OpenEBSLocal { + return helmSettings, nil + } + + for _, chart := range helmSettings.Charts { + if chart.ChartName == "openebs-internal/openebs" { + return nil, fmt.Errorf("openebs-internal/openebs is defined in spec.extensions.helm.charts and spec.extensions.storage.type is set to openebs_local_storage. https://docs.k0sproject.io/stable/examples/openebs") + } + } + helmSettings, err := addOpenEBSHelmExtension(helmSettings, clusterConfig.Spec.Extensions.Storage) + if err != nil { + return nil, fmt.Errorf("can't add openebs helm extension") + } + return helmSettings, nil +} + func addOpenEBSHelmExtension(helmSpec *k0sAPI.HelmExtensions, storageExtension *k0sAPI.StorageExtension) (*k0sAPI.HelmExtensions, error) { openEBSValues := map[string]interface{}{ "localprovisioner": map[string]interface{}{ diff --git a/pkg/component/controller/extensions_controller_test.go b/pkg/component/controller/extensions_controller_test.go index 9027da4c579e..44cbd4388b25 100644 --- a/pkg/component/controller/extensions_controller_test.go +++ b/pkg/component/controller/extensions_controller_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/k0sproject/k0s/pkg/apis/helm/v1beta1" + k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" "github.com/stretchr/testify/assert" ) @@ -128,9 +129,72 @@ func TestChartNeedsUpgrade(t *testing.T) { cr := new(ChartReconciler) for _, tc := range testCases { - t.Run("", func(t *testing.T) { + t.Run(tc.description, func(t *testing.T) { actual := cr.chartNeedsUpgrade(tc.chart) assert.Equal(t, tc.expected, actual) }) } } + +func addHelmExtension(config *k0sv1beta1.ClusterConfig) *k0sv1beta1.ClusterConfig { + config.Spec.Extensions.Storage.Type = k0sv1beta1.OpenEBSLocal + return config +} + +func addStorageExtension(config *k0sv1beta1.ClusterConfig) *k0sv1beta1.ClusterConfig { + config.Spec.Extensions.Helm, _ = addOpenEBSHelmExtension(config.Spec.Extensions.Helm, config.Spec.Extensions.Storage) + return config +} + +func TestConfigureStorage(t *testing.T) { + var testCases = []struct { + description string + clusterConfig *k0sv1beta1.ClusterConfig + expectedErr bool + expectedOpenEBS bool + }{ + { + "no_openebs", + k0sv1beta1.DefaultClusterConfig(), + false, + false, + }, + { + "openebs_helm_extension", + addHelmExtension(k0sv1beta1.DefaultClusterConfig()), + false, + true, + }, + { + "openebs_storage_extension", + addStorageExtension(k0sv1beta1.DefaultClusterConfig()), + false, + true, + }, + { + "openebs_both", + addStorageExtension(addHelmExtension(k0sv1beta1.DefaultClusterConfig())), + true, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + ec := ExtensionsController{} + helmSettings, err := ec.configureStorage(tc.clusterConfig) + + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + if tc.expectedOpenEBS { + assert.Equal(t, 1, len(helmSettings.Charts)) + assert.Equal(t, "openebs", helmSettings.Charts[0].Name) + } + }) + } + +} diff --git a/pkg/config/cli.go b/pkg/config/cli.go index ba4cbd7ed81c..06452519e521 100644 --- a/pkg/config/cli.go +++ b/pkg/config/cli.go @@ -168,6 +168,7 @@ func GetWorkerFlags() *pflag.FlagSet { } var availableComponents = []string{ + constant.ApplierManagerComponentName, constant.AutopilotComponentName, constant.ControlAPIComponentName, constant.CoreDNSComponentname, diff --git a/pkg/constant/constant_shared.go b/pkg/constant/constant_shared.go index e536646a9664..3406b95991cf 100644 --- a/pkg/constant/constant_shared.go +++ b/pkg/constant/constant_shared.go @@ -97,6 +97,7 @@ const ( /* Controller component names */ APIEndpointReconcilerComponentName = "endpoint-reconciler" + ApplierManagerComponentName = "applier-manager" ControlAPIComponentName = "control-api" CoreDNSComponentname = "coredns" CsrApproverComponentName = "csr-approver" diff --git a/pkg/kubernetes/watch/k0s.go b/pkg/kubernetes/watch/k0s.go index 2cf7e3681e2b..fb9ac3236503 100644 --- a/pkg/kubernetes/watch/k0s.go +++ b/pkg/kubernetes/watch/k0s.go @@ -18,6 +18,7 @@ package watch import ( autopilotv1beta2 "github.com/k0sproject/k0s/pkg/apis/autopilot/v1beta2" + helmv1beta1 "github.com/k0sproject/k0s/pkg/apis/helm/v1beta1" k0sv1beta1 "github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1" ) @@ -28,3 +29,7 @@ func ClusterConfigs(client Provider[*k0sv1beta1.ClusterConfigList]) *Watcher[k0s func Plans(client Provider[*autopilotv1beta2.PlanList]) *Watcher[autopilotv1beta2.Plan] { return FromClient[*autopilotv1beta2.PlanList, autopilotv1beta2.Plan](client) } + +func Charts(client Provider[*helmv1beta1.ChartList]) *Watcher[helmv1beta1.Chart] { + return FromClient[*helmv1beta1.ChartList, helmv1beta1.Chart](client) +}