From 917cb77b2880d49f20bec6fb736b6f5371e8bf4f Mon Sep 17 00:00:00 2001 From: Frank Sundermeyer Date: Mon, 26 Feb 2024 17:59:34 +0100 Subject: [PATCH] Fixed long line listings and syntax highlighting --- asciidoc/components/adoc/metallb.adoc | 12 +-- .../adoc/elemental-utm-aarch64.adoc | 51 ++++++---- asciidoc/demo_setup/adoc/k3s-on-slemicro.adoc | 93 ++++++++++++------- .../demo_setup/adoc/slemicro-utm-aarch64.adoc | 35 +++---- .../adoc/slemicro-virt-install-x86_64.adoc | 43 +++++---- .../integrations/adoc/create-package-obs.adoc | 22 ++--- asciidoc/integrations/adoc/nats.adoc | 50 ++++++---- .../integrations/adoc/nvidia-slemicro.adoc | 63 +++++++------ .../create-a-simple-container-image-obs.adoc | 4 +- .../create-a-single-iso-image-customized.adoc | 26 +++--- asciidoc/misc/adoc/metallb-kube-api.adoc | 26 +++--- asciidoc/misc/adoc/modify-sle-micro-iso.adoc | 8 +- .../misc/adoc/rancher-disambiguation.adoc | 22 ++--- asciidoc/misc/adoc/rke2-selinux.adoc | 28 +++--- ...-emulator-sles-certificates-container.adoc | 18 ++-- asciidoc/product/atip/adoc/atip.adoc | 1 + asciidoc/product/atip/adoc/features.adoc | 76 +++++++-------- .../product/atip/adoc/management-cluster.adoc | 63 ++++++------- asciidoc/product/atip/adoc/requirements.adoc | 6 +- asciidoc/quickstart/adoc/eib.adoc | 6 +- asciidoc/quickstart/adoc/elemental.adoc | 67 +++++++------ asciidoc/quickstart/adoc/metal3.adoc | 12 +-- 22 files changed, 404 insertions(+), 328 deletions(-) diff --git a/asciidoc/components/adoc/metallb.adoc b/asciidoc/components/adoc/metallb.adoc index 91a5a748..31ca9a14 100644 --- a/asciidoc/components/adoc/metallb.adoc +++ b/asciidoc/components/adoc/metallb.adoc @@ -34,7 +34,7 @@ K3S comes with its own service load balancer named Klipper. You https://metallb. MetalLB leverages Helm (and other methods as well), so: -[,console] +[,bash] ---- helm repo add metallb https://metallb.github.io/metallb helm install --create-namespace -n metallb-system metallb metallb/metallb @@ -50,7 +50,7 @@ done At this point, the installation is completed. Now it is time to https://metallb.universe.tf/configuration/[configure] using our example values: -[,console] +[,yaml] ---- cat <<-EOF | kubectl apply -f - apiVersion: metallb.io/v1beta1 @@ -66,7 +66,7 @@ spec: EOF ---- -[,console] +[,yaml] ---- cat <<-EOF | kubectl apply -f - apiVersion: metallb.io/v1beta1 @@ -113,7 +113,7 @@ We will leverage this <>. Let's create an example deployment: -[,console] +[,yaml] ---- cat <<- EOF | kubectl apply -f - --- @@ -188,7 +188,7 @@ EOF And finally, the service: -[,console] +[,yaml] ---- cat <<- EOF | kubectl apply -f - apiVersion: v1 @@ -265,7 +265,7 @@ curl http://192.168.122.11 As Traefik is already serving as an ingress controller, we can expose any http/https traffic via an `Ingress` object such as: -[,console] +[,yaml] ---- IP=$(kubectl get svc -n kube-system traefik -o jsonpath="{.status.loadBalancer.ingress[0].ip}") cat <<- EOF | kubectl apply -f - diff --git a/asciidoc/demo_setup/adoc/elemental-utm-aarch64.adoc b/asciidoc/demo_setup/adoc/elemental-utm-aarch64.adoc index 337cc875..bd7feacc 100644 --- a/asciidoc/demo_setup/adoc/elemental-utm-aarch64.adoc +++ b/asciidoc/demo_setup/adoc/elemental-utm-aarch64.adoc @@ -46,14 +46,19 @@ The trick here is there is no ARM64 image yet, but just a Raspberry Pi one... so This is an optional step to enable the Elemental UI extension in Rancher (see https://ranchermanager.docs.rancher.com/integrations-in-rancher/rancher-extensions[more about Rancher extensions]): -[,console] +[,bash] ---- helm repo add rancher-charts https://charts.rancher.io/ -helm upgrade --create-namespace -n cattle-ui-plugin-system --install ui-plugin-operator rancher-charts/ui-plugin-operator -helm upgrade --create-namespace -n cattle-ui-plugin-system --install ui-plugin-operator-crd rancher-charts/ui-plugin-operator-crd +helm upgrade --create-namespace -n cattle-ui-plugin-system \ + --install ui-plugin-operator rancher-charts/ui-plugin-operator +helm upgrade --create-namespace -n cattle-ui-plugin-system \ + --install ui-plugin-operator-crd rancher-charts/ui-plugin-operator-crd # Wait for the operator to be up -while ! kubectl wait --for condition=ready -n cattle-ui-plugin-system $(kubectl get pods -n cattle-ui-plugin-system -l app.kubernetes.io/instance=ui-plugin-operator -o name) --timeout=10s; do sleep 2 ; done +while ! kubectl wait --for condition=ready -n cattle-ui-plugin-system \ + $(kubectl get pods -n cattle-ui-plugin-system \ + -l app.kubernetes.io/instance=ui-plugin-operator -o name) \ + --timeout=10s; do sleep 2 ; done # Deploy the elemental UI plugin # NOTE: TABs and then spaces... @@ -84,16 +89,18 @@ image::elemental-ui-plugin.png[width=90%] Elemental is managed by an operator deployed via Helm as: -[,console] +[,bash] ---- -helm upgrade --create-namespace -n cattle-elemental-system --install --set image.imagePullPolicy=Always elemental-operator oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher/elemental-operator-chart +helm upgrade --create-namespace -n cattle-elemental-system --install \ + --set image.imagePullPolicy=Always elemental-operator \ + oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher/elemental-operator-chart ---- https://github.com/rancher/elemental-operator/blob/main/chart/values.yaml[The values.yaml file have some variables interesting to see] After a few seconds you should see the operator pod appear on the `cattle-elemental-system` namespace: -[,console] +[,shell] ---- kubectl get pods -n cattle-elemental-system NAME READY STATUS RESTARTS AGE @@ -114,7 +121,7 @@ It is out of the scope of this document to provide an explanation about the reso In order to deploy more than one elemental machine, be sure that `spec.config.elemental.registration.emulated-tpm-seed=-1` is set in your `MachineRegistration` so the seed used for the TPM emulation is randomized per machine. Otherwise, you will get the same TPM Hash for all deployed machines and only the last one to be registered will be valid. See the official docs for http://elemental.docs.rancher.com/tpm[tpm] and http://elemental.docs.rancher.com/machineregistration-reference/#configelementalregistration[machineregistration] for more information. ==== -[,console] +[,yaml] ---- cat <<- EOF | kubectl apply -f - apiVersion: elemental.cattle.io/v1beta1 @@ -133,7 +140,7 @@ spec: EOF ---- -[,console] +[,yaml] ---- cat <<- EOF | kubectl apply -f - kind: Cluster @@ -165,7 +172,7 @@ spec: EOF ---- -[,console] +[,yaml] ---- cat <<- 'EOF' | kubectl apply -f - apiVersion: elemental.cattle.io/v1beta1 @@ -216,16 +223,17 @@ The steps below should to be ran in a linux machine (`SLE Micro` for example). First step is to download the `machineregistration` object that will instruct where to get the config for the node to be installed: -[,console] +[,bash] ---- -curl -k $(kubectl get machineregistration -n fleet-default my-nodes -o jsonpath="{.status.registrationURL}") -o livecd-cloud-config.yaml +curl -k $(kubectl get machineregistration -n fleet-default my-nodes \ + -o jsonpath="{.status.registrationURL}") -o livecd-cloud-config.yaml ---- image::registration-endpoint.png[width=90%] Then, the `rpi.raw` image is downloaded and checked the integrity just to be safe: -[,console] +[,bash] ---- curl -Lk https://download.opensuse.org/repositories/isv:/Rancher:/Elemental:/Stable:/Teal53/images/rpi.raw -o rpi.raw curl -Lk https://download.opensuse.org/repositories/isv:/Rancher:/Elemental:/Stable:/Teal53/images/rpi.raw.sha256 -o rpi.raw.sha256 @@ -234,7 +242,7 @@ sha256sum -c rpi.raw.sha256 Finally, the `livecd-cloud-config.yaml` file is injected in the vanilla `rpi.raw` image: -[,console] +[,bash] ---- IMAGE=rpi.raw DEST=$(mktemp -d) @@ -282,7 +290,7 @@ The operating system disk device should be the first one, then the USB, so the U After a while, a new `machineinventory` host will be present: -[,console] +[,yaml] ---- kubectl get machineinventory -n fleet-default m-ed0a3f46-d6f8-4737-9884-e3a898094994 -o yaml @@ -326,14 +334,16 @@ status: Finally, labeling the `machineinventory` of the discovered new host will trigger the installation: -[,console] +[,bash] ---- -kubectl -n fleet-default label machineinventory $(kubectl get machineinventory -n fleet-default --no-headers -o custom-columns=":metadata.name") location=europe +kubectl -n fleet-default label machineinventory $(kubectl get \ + machineinventory -n fleet-default --no-headers \ + -o custom-columns=":metadata.name") location=europe ---- image::new-cluster.png[width=90%] -[,console] +[,shell] ---- kubectl get cluster -n fleet-default NAME READY KUBECONFIG @@ -342,9 +352,10 @@ my-cluster true my-cluster-kubeconfig Profit! -[,console] +[,shell] ---- -kubectl get secret -n fleet-default my-cluster-kubeconfig -o jsonpath='{.data.value}' | base64 -d >> ~/my-cluster-kubeconfig +kubectl get secret -n fleet-default my-cluster-kubeconfig \ + -o jsonpath='{.data.value}' | base64 -d >> ~/my-cluster-kubeconfig KUBECONFIG=~/my-cluster-kubeconfig kubectl get nodes NAME STATUS ROLES AGE VERSION diff --git a/asciidoc/demo_setup/adoc/k3s-on-slemicro.adoc b/asciidoc/demo_setup/adoc/k3s-on-slemicro.adoc index 40853599..45cd0dad 100644 --- a/asciidoc/demo_setup/adoc/k3s-on-slemicro.adoc +++ b/asciidoc/demo_setup/adoc/k3s-on-slemicro.adoc @@ -28,7 +28,7 @@ On SLE Micro, the install script doesn't start the `k3s` or `k3s-agent` service The simplest way to run K3s is an all-in-one server (not suited for production environments) is by running: -[,console] +[,bash] ---- curl -sfL https://get.k3s.io | sh - ---- @@ -36,20 +36,22 @@ curl -sfL https://get.k3s.io | sh - A few environment variables to tweak our installation can be used as well as: -[,console] +[,bash] ---- -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --cluster-init --write-kubeconfig-mode=644" K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server \ + --cluster-init --write-kubeconfig-mode=644" K3S_TOKEN=foobar sh - ---- https://docs.k3s.io/installation/configuration#configuration-with-install-script[The settings can be specified either as environment variables, command line flags], a https://docs.k3s.io/installation/configuration#configuration-file[configuration file], or both, it is just a personal choice: -[,console] +[,bash] ---- -curl -sfL https://get.k3s.io | sh -s - server --token foobar --cluster-init --write-kubeconfig-mode=644 +curl -sfL https://get.k3s.io | sh -s - server --token foobar \ + --cluster-init --write-kubeconfig-mode=644 ---- -[,console] +[,yaml] ---- write-kubeconfig-mode: "0644" cluster-init: true @@ -68,36 +70,45 @@ The https://docs.k3s.io/cli[official] documentation explains all the flags in de Adding an agent is as simple as running the install script with a few parameters, including the URL of the cluster as: -[,console] +[,bash] ---- -curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 \ + K3S_TOKEN=foobar sh - ---- == K3s HA The easiest way to run a K3s HA cluster is by installing a first node using the `--cluster-init` flag and then, start adding nodes. -[,console] + +First node:: ++ +[,bash] ---- -# First node -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --cluster-init --write-kubeconfig-mode=644" K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --cluster-init \ + --write-kubeconfig-mode=644" K3S_TOKEN=foobar sh - ---- - -[,console] ++ +Rest of the nodes:: ++ +[,bash] ---- -# Rest of the nodes -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --server https://myserver:6443 --write-kubeconfig-mode=644" K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server \ + --server https://myserver:6443 --write-kubeconfig-mode=644" \ + K3S_TOKEN=foobar sh - ---- - -[,console] ++ +Agent nodes:: ++ +[,bash] ---- -# Agent nodes -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent --server https://myserver:6443" K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent \ + --server https://myserver:6443" K3S_TOKEN=foobar sh - ---- This is what a cluster with 3 control-plane nodes and 2 agents looks like: -[,console] +[,shell] ---- NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME cp01 Ready control-plane,etcd,master 2m26s v1.26.4+k3s1 192.168.205.99 SUSE Linux Enterprise Micro 5.4 5.14.21-150400.24.46-default containerd://1.6.19-k3s1 @@ -133,9 +144,11 @@ If you are using OSX to virtualize the SLE Micro OS where K3s is going to be ins The first step is to install K3s in HA and using the `--tls-san` flag as well. This flag can be repeated many times, so in this example will be used to add both the IP (`192.168.205.10` in this example) and the DNS name of the VIP (using https://sslip.io[sslip.io] as a poor's man DNS): -[,console] +[,bash] ---- -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --cluster-init --write-kubeconfig-mode=644 --tls-san=192.168.205.10 --tls-san=https://192.168.205.10.sslip.io" K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --cluster-init \ + --write-kubeconfig-mode=644 --tls-san=192.168.205.10 \ + --tls-san=https://192.168.205.10.sslip.io" K3S_TOKEN=foobar sh - ---- The rest of the nodes will be installed after kube-vip as the server URL for them to join the cluster will be the VIP. @@ -150,7 +163,7 @@ IPVS modules must be loaded in order for the https://kube-vip.io/docs/about/arch This is achieved by creating the following file: ==== -[,console] +[,bash] ---- cat <<- EOF > /etc/modules-load.d/ipvs.conf ip_vs @@ -164,7 +177,7 @@ EOF Configurations stored under `/etc/modules-load.d` will be automatically picked up and loaded on boot. Loading them for the first time, however, can be achieved without rebooting by executing: -[,console] +[,bash] ---- for i in $(cat /etc/modules-load.d/ipvs.conf); do modprobe ${i}; done ---- @@ -177,7 +190,7 @@ The Kubernetes resources can be created by leveraging https://docs.k3s.io/instal In this case, the `--services` flag for kube-vip won't be used. ==== -[,console] +[,yaml] ---- export VIP=192.168.205.10 cat <<- EOF > /var/lib/rancher/k3s/server/manifests/kube-vip.yaml @@ -296,10 +309,11 @@ EOF Once kube-vip is in place, the rest of the control-plane nodes can be added to the cluster by pointing them to the VIP as: -[,console] +[,bash] ---- export VIP=192.168.205.10 -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --server https://${VIP}:6443 --write-kubeconfig-mode=644" K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server \ + --server https://${VIP}:6443 --write-kubeconfig-mode=644" K3S_TOKEN=foobar sh - ---- [IMPORTANT] @@ -316,16 +330,20 @@ The kubeconfig file that is generated as part of the installation has localhost [.tabs] SUSE:: + -[,console] +[,bash] ---- -scp 192.168.205.10:/etc/rancher/k3s/k3s.yaml ~/.kube/config && sed -i 's/127.0.0.1/192.168.205.10/g' ~/.kube/config && chmod 600 ~/.kube/config +scp 192.168.205.10:/etc/rancher/k3s/k3s.yaml ~/.kube/config && \ + sed -i 's/127.0.0.1/192.168.205.10/g' ~/.kube/config && \ + chmod 600 ~/.kube/config ---- + MacOS:: + -[,console] +[,bash] ---- -scp 192.168.205.10:/etc/rancher/k3s/k3s.yaml ~/.kube/config && sed -i '' 's/127.0.0.1/192.168.205.10/g' ~/.kube/config && chmod 600 ~/.kube/config +scp 192.168.205.10:/etc/rancher/k3s/k3s.yaml ~/.kube/config && \ + sed -i '' 's/127.0.0.1/192.168.205.10/g' ~/.kube/config \ + && chmod 600 ~/.kube/config ---- @@ -333,15 +351,16 @@ scp 192.168.205.10:/etc/rancher/k3s/k3s.yaml ~/.kube/config && sed -i '' 's/127. Agents can be added as usual, pointing to the VIP address as: -[,console] +[,bash] ---- export VIP=192.168.205.10 -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent --server https://${VIP}:6443" K3S_TOKEN=foobar sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent \ + --server https://${VIP}:6443" K3S_TOKEN=foobar sh - ---- === Final picture -[,console] +[,shell] ---- kubectl get nodes -o jsonpath="{.items[*].status.addresses[?(@.type=='InternalIP')].address}" 192.168.205.69 192.168.205.70 192.168.205.71 192.168.205.72 192.168.205.73% @@ -356,9 +375,11 @@ As you can see, the control plane IP is the VIP and the nodes have their own IP. === Access Traefik dashboard -[,console] +[,bash] ---- -kubectl port-forward $(kubectl get pods --selector "app.kubernetes.io/name=traefik" -o=name -n kube-system) -n kube-system 9000:9000 +kubectl port-forward $(kubectl get pods \ + --selector "app.kubernetes.io/name=traefik" -o=name -n kube-system) \ + -n kube-system 9000:9000 ---- Then, browse http://localhost:9000/dashboard to observe the Traefik dashboard: diff --git a/asciidoc/demo_setup/adoc/slemicro-utm-aarch64.adoc b/asciidoc/demo_setup/adoc/slemicro-utm-aarch64.adoc index b59dc82d..8e54a431 100644 --- a/asciidoc/demo_setup/adoc/slemicro-utm-aarch64.adoc +++ b/asciidoc/demo_setup/adoc/slemicro-utm-aarch64.adoc @@ -121,7 +121,7 @@ For Ignition, the configuration file `config.ign` must reside in the `ignition` subdirectory on the configuration media labeled `ignition`. The directory structure must look as follows: -[,console] +[,shell] ---- └── ignition @@ -136,7 +136,7 @@ directory of the configuration medium, create a directory called files--SSH key, configuration files, etc. The directory structure then should look as follows: -[,console] +[,shell] ---- └── combustion @@ -149,7 +149,7 @@ label your configuration medium `ignition` and include the `ignition` directory with the `config.ign` to your directory structure as shown below: -[,console] +[,shell] ---- └── combustion @@ -185,14 +185,14 @@ file. ** Search for `SUSE Linux Enterprise Micro` via the `Products` menu, select the arch/version then copy and manually activate the registration code * Butane, qemu and cdrtools installed (using brew for example) + -[,console] +[,bash] ---- brew install butane cdrtools qemu ---- * UTM installed (using brew for example) + -[,console] +[,bash] ---- brew install --cask utm ---- @@ -207,7 +207,7 @@ If using the previous script, it is required to install UTM * Uncompress the SLE Micro image + -[,console] +[,bash] ---- xz -d ~/Downloads/SLE-Micro.*-Default-GM.raw.xz ---- @@ -215,14 +215,14 @@ xz -d ~/Downloads/SLE-Micro.*-Default-GM.raw.xz * Move the file to a proper location and rename it to fit the VM hostname + -[,console] +[,bash] ---- cp ~/Downloads/SLE-Micro.*-Default-GM.raw ~/VMs/slemicro.raw ---- * Resize the image file. In this example, to 30G + -[,console] +[,bash] ---- qemu-img resize -f raw ~/VMs/slemicro.raw 30G > /dev/null ---- @@ -234,14 +234,14 @@ Combustion as explained before: * Create a temporary folder to store the assets + -[,console] +[,bash] ---- TMPDIR=$(mktemp -d) ---- * Create the required folders for ignition and combustion + -[,console] +[,bash] ---- mkdir -p ${TMPDIR}/{combustion,ignition} ---- @@ -250,7 +250,7 @@ mkdir -p ${TMPDIR}/{combustion,ignition} following example to set a `root` password for the root user, and to configure the hostname to be "slemicro"' + -[,console] +[,yaml] ---- cat << 'EOF' > ${TMPDIR}/config.fcc variant: fcos @@ -273,7 +273,7 @@ EOF example to register the SLE Micro instance to SUSE's SCC (use your own email/regcode) and to create a `/etc/issue.d/combustion` file + -[,console] +[,bash] ---- cat << EOF > ${TMPDIR}/combustion/script #!/bin/bash @@ -299,7 +299,7 @@ EOF * Convert the butane config to ignition + -[,console] +[,bash] ---- butane -p -o ${TMPDIR}/ignition/config.ign ${TMPDIR}/config.fcc ---- @@ -308,16 +308,17 @@ butane -p -o ${TMPDIR}/ignition/config.ign ${TMPDIR}/config.fcc to work that the ISO is labeled as `ignition` (hence the -V parameter) + -[,console] +[,bash] ---- -mkisofs -full-iso9660-filenames -o ignition-and-combustion.iso -V ignition ${TMPDIR} +mkisofs -full-iso9660-filenames -o ignition-and-combustion.iso \ + -V ignition ${TMPDIR} ---- * *Optional:* Remove the temporary folder + -[,console] +[,bash] ---- -rm -Rf ${TMPDIR} +rm -rf ${TMPDIR} ---- === VM Creation diff --git a/asciidoc/demo_setup/adoc/slemicro-virt-install-x86_64.adoc b/asciidoc/demo_setup/adoc/slemicro-virt-install-x86_64.adoc index ebb44fb7..042dfab3 100644 --- a/asciidoc/demo_setup/adoc/slemicro-virt-install-x86_64.adoc +++ b/asciidoc/demo_setup/adoc/slemicro-virt-install-x86_64.adoc @@ -40,28 +40,28 @@ If you are trying to download to a remote server, you can use scp to copy that f ** Search for `SUSE Linux Enterprise Micro` via the `Products` menu, select the arch/version then copy and manually activate the registration code * Butane, qemu-img and cdrtools installed (using zypper for example) -[,console] +[,bash] ---- sudo zypper install butane qemu-tools xz mkisofs ---- * Unzip the file -[,console] +[,bash] ---- xz -d SLE-Micro.x86_64-5.4.0-Default-GM.raw.xz ---- * Resize the image file. In this example, to 30G -[,console] +[,bash] ---- qemu-img resize -f raw ~/PATH-TO-FILE/SLE-Micro.x86_64-5.4.0-Default-GM.raw 30G > /dev/null ---- === Convert the raw image to qcow2 -[,console] +[,bash] ---- qemu-img convert -O qcow2 SLE-Micro.x86_64-5.4.0-Default-GM.raw slemicro ---- @@ -73,14 +73,14 @@ Combustion as explained before: * Create a temporary folder to store the assets + -[,console] +[,bash] ---- TMPDIR=$(mktemp -d) ---- * Create the required folders for ignition and combustion + -[,console] +[,bash] ---- mkdir -p ${TMPDIR}/{combustion,ignition} ---- @@ -89,7 +89,7 @@ mkdir -p ${TMPDIR}/{combustion,ignition} following example to set a `root` password for the root user, and to configure the hostname to be "slemicro"' + -[,console] +[,yaml] ---- cat << 'EOF' > ${TMPDIR}/config.fcc variant: fcos @@ -112,7 +112,7 @@ EOF example to register the SLE Micro instance to SUSE's SCC (use your own email/regcode) and to create a `/etc/issue.d/combustion` file + -[,console] +[,bash] ---- cat << EOF > ${TMPDIR}/combustion/script #!/bin/bash @@ -138,7 +138,7 @@ EOF * Convert the butane config to ignition + -[,console] +[,bash] ---- butane -p -o ${TMPDIR}/ignition/config.ign ${TMPDIR}/config.fcc ---- @@ -147,23 +147,25 @@ butane -p -o ${TMPDIR}/ignition/config.ign ${TMPDIR}/config.fcc to work that the ISO is labeled as `ignition` (hence the -V parameter) + -[,console] +[,bash] ---- mkisofs -full-iso9660-filenames -o ignition-and-combustion.iso -V ignition ${TMPDIR} ---- * *Optional:* Remove the temporary folder + -[,console] +[,bash] ---- -rm -Rf ${TMPDIR} +rm -rf ${TMPDIR} ---- === Create the VM -[,console] +[,bash] ---- -virt-install --name MyVM --memory 4096 --vcpus 4 --disk ./slemicro --import --cdrom ./ignition-and-combustion.iso --network default --osinfo detect=on,name=sle-unknown +virt-install --name MyVM --memory 4096 --vcpus 4 --disk ./slemicro \ + --import --cdrom ./ignition-and-combustion.iso --network default \ + --osinfo detect=on,name=sle-unknown ---- [NOTE] @@ -171,8 +173,8 @@ virt-install --name MyVM --memory 4096 --vcpus 4 --disk ./slemicro --import --cd * Pass the `--noautoconsole` flag in case your console hangs on the installation, this will allow you to run other commands without CTRL-C interrupt * Pass the `--debug` flag if you run into issues * If you run into an issue and you need to restart, or if you get an error saying that MyVM is already running, run this command: -==== - + +[,bash] ---- virsh destroy MyVM ; virsh undefine MyVM ---- @@ -181,19 +183,20 @@ After a couple of seconds, the VM will boot up and will configure itself using the ignition and combustion scripts, including registering itself to SCC -[,console] +[,shell] ---- virsh list Id Nombre State ---------------------------------- 14 MyVM running ---- +==== === Access to the vm You can access to the VM using virsh console: -[,console] +[,shell] ---- virsh console MyVM @@ -202,7 +205,7 @@ Connected to domain MyVM or using ssh directly and the user set in the ignition file (in this case root) -[,console] +[,shell] ---- virsh domifaddr MyVM Nombre MAC address Protocol Address @@ -214,7 +217,7 @@ ssh root@192.168.122.221 === Delete the VM -[,console] +[,bash] ---- virsh destroy MyVM ; virsh undefine MyVM ---- diff --git a/asciidoc/integrations/adoc/create-package-obs.adoc b/asciidoc/integrations/adoc/create-package-obs.adoc index 7a75c9a1..3512bae0 100644 --- a/asciidoc/integrations/adoc/create-package-obs.adoc +++ b/asciidoc/integrations/adoc/create-package-obs.adoc @@ -23,21 +23,21 @@ To install `osc`: [.tabs] SUSE:: + -[,console] +[,bash] ---- zypper install osc ---- + MacOS:: + -[,console] +[,bash] ---- brew install osc ---- + PIP:: + -[,console] +[,bash] ---- pip install osc ---- @@ -47,7 +47,7 @@ pip install osc We are going to create a project under your home namespace, this will bring up your editor to configure it right away. -[,console] +[,bash] ---- osc meta prj -e "home:$USERNAME:$PROJECTNAME" ---- @@ -98,7 +98,7 @@ In the editor you can now fill the metadata to look similar to this: If you want to build containers you need to tweak the configuration of the project as well: -[,console] +[,bash] ---- osc meta prjconf -e "home:$USERNAME:$PROJECTNAME" ---- @@ -109,7 +109,7 @@ build system: [.tabs] Dockerfile:: + -[,console] +[,shell] ---- %if "%_repository" == "containers" Type: docker @@ -121,7 +121,7 @@ BuildEngine: podman + KIWI:: + -[,console] +[,shell] ---- %if "%_repository" == "containers" Type: kiwi @@ -141,7 +141,7 @@ If you want to build containers using both KIWI and `Dockerfiles` in the same pr To create a package in your project use the following command: -[,console] +[,bash] ---- osc meta pkg -e home:$USERNAME:$PROJECTNAME $PACKAGENAME ---- @@ -150,14 +150,14 @@ There you'll get another XML file to edit, you only have to set a title and desc Now you can checkout the directory to start adding your files: -[,console] +[,bash] ---- osc co home:$USERNAME:$PROJECTNAME/$PACKAGENAME ---- Now go into the directory and when all is ready you can add your files and commit using: -[,console] +[,bash] ---- osc add ... osc ci @@ -244,7 +244,7 @@ You now have to tell OBS about the name and tag of your image: Dockerfile:: You can use one or multiple `BuildTag` as comments in your `Dockerfile` like this: + -[,console] +[,shell] ---- #!BuildTag: foo/bar:latest foo/bar:%PKG_VERSION%.%RELEASE% #!BuildTag: foo/bar:tag foo/bar:anothertag diff --git a/asciidoc/integrations/adoc/nats.adoc b/asciidoc/integrations/adoc/nats.adoc index 053e9e55..7e415044 100644 --- a/asciidoc/integrations/adoc/nats.adoc +++ b/asciidoc/integrations/adoc/nats.adoc @@ -37,7 +37,7 @@ NATS is built for multiple architectures, so it can easily be installed on the h Let's create a values file that will be used for overwriting the default values of NATS. -[,console] +[,yaml] ---- cat > values.yaml < with the actual IP of the node where the K3s will be started +Replace with the actual IP of the node where the K3s will be started + +[,bash] +---- export NODE_IP= sudo scp dist/artifacts/k3s-arm64 ${NODE_IP}:/usr/local/bin/k3s ---- @@ -137,7 +152,7 @@ It can be https://github.com/docker/buildx#manual-download[manually installed] i ==== Install NATS CLI -[,local] +[,bash] ---- TMPDIR=$(mktemp -d) nats_version="nats-0.0.35-linux-arm64" @@ -158,7 +173,7 @@ The command below will start K3s as a foreground process, so the logs can be eas If you want to not block the current terminal a `&` flag could be added before the command to start it as a background process. ==== -[,console] +[,bash] ---- k3s server --datastore-endpoint=nats:// ---- @@ -168,19 +183,20 @@ k3s server --datastore-endpoint=nats:// For making the K3s server with the NATS backend permanent on your `slemicro` VM, the script below can be run, which will create a `systemd` service with the needed configurations. ==== -[,console] +[,bash] ---- export INSTALL_K3S_SKIP_START=false export INSTALL_K3S_SKIP_DOWNLOAD=true -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --datastore-endpoint=nats://" sh - +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server \ + --datastore-endpoint=nats://" sh - ---- ==== Troubleshooting The following commands can be run on the node to verify that everything with the stream is working properly: -[,console] +[,bash] ---- nats str report -a nats str view -a diff --git a/asciidoc/integrations/adoc/nvidia-slemicro.adoc b/asciidoc/integrations/adoc/nvidia-slemicro.adoc index d8c649c1..8d105872 100644 --- a/asciidoc/integrations/adoc/nvidia-slemicro.adoc +++ b/asciidoc/integrations/adoc/nvidia-slemicro.adoc @@ -31,24 +31,27 @@ In addition, the example below calls for _535.86.05_ of the driver; please make When you've confirmed the above, you're ready to install the packages on the host operating system, and for this we need to open up a `transactional-update` session, which creates a new read/write snapshot of the underlying operating system so we can make changes to the immutable platform (for further instructions on `transactional-update` see https://documentation.suse.com/sle-micro/5.4/html/SLE-Micro-all/sec-transactional-udate.html[here]): -[,console] +[,bash] ---- transactional-update shell ---- When you're in your `transactional-update` shell, add the additional required package repositories from NVIDIA; this will allow us to pull in additional utilities, e.g. `nvidia-smi`, along with access to CUDA packages that you may want to utilise: -[,console] +[,bash] ---- -zypper ar https://developer.download.nvidia.com/compute/cuda/repos/sles15/x86_64/ nvidia-sle15sp4-cuda +zypper ar \ + https://developer.download.nvidia.com/compute/cuda/repos/sles15/x86_64/ \ + nvidia-sle15sp4-cuda zypper ar https://download.nvidia.com/suse/sle15sp4/ nvidia-sle15sp4-main ---- You can then install the driver and the `nvidia-compute-utils` for additional utilities: -[,console] +[,bash] ---- -zypper install -y nvidia-open-driver-G06-signed-kmp=535.86.05 kernel-firmware-nvidia-gspx-G06 nvidia-compute-utils-G06 +zypper install -y nvidia-open-driver-G06-signed-kmp=535.86.05 \ + kernel-firmware-nvidia-gspx-G06 nvidia-compute-utils-G06 ---- [TIP] @@ -58,14 +61,15 @@ If this fails to install it's likely that there's a dependency mismatch between Next, if you're _not_ using a supported GPU, remembering that the list can be found https://github.com/NVIDIA/open-gpu-kernel-modules#compatible-gpus[here], you can see if the driver will work by enabling support at the module level, but your mileage may vary -- skip this step if you're using a _supported_ GPU: -[,console] +[,bash] ---- -sed -i '/NVreg_OpenRmEnableUnsupportedGpus/s/^#//g' /etc/modprobe.d/50-nvidia-default.conf +sed -i '/NVreg_OpenRmEnableUnsupportedGpus/s/^#//g' \ + /etc/modprobe.d/50-nvidia-default.conf ---- Now that you've installed these packages, it's time to exit the `transactional-update` session: -[,console] +[,bash] ---- exit ---- @@ -77,21 +81,21 @@ Please make sure that you've exited the `transactional-update` session before pr Now that you've got your drivers installed, it's time to reboot, as SLE Micro is an immutable operating system it needs to reboot into the new snapshot that you created in a previous step; the drivers are only installed into this new snapshot, and hence it's not possible to load the drivers without rebooting into this new snapshot, which will happen automatically. Issue the reboot command when you're ready: -[,console] +[,bash] ---- reboot ---- Once the system has rebooted successfully, log back in and try to use the `nvidia-smi` tool to verify that the driver is loaded successfully and that it's able to both access and enumerate your GPU(s): -[,console] +[,bash] ---- nvidia-smi ---- The output of this command should show you something similar to the following output, noting that in the example below we have two GPU's: -[,console] +[,shell] ---- Mon Sep 18 06:58:12 2023 +---------------------------------------------------------------------------------------+ @@ -125,21 +129,21 @@ Mon Sep 18 06:58:12 2023 At this stage, all we've been able to verify is that at the host level the NVIDIA device can be accessed and that the drivers are loading successfully. However, if we want to be sure that it's functioning, a simple test would be to try and validate that the GPU can take instruction from a user-space application, ideally via a container, and through the CUDA library, as that's typically what a real workload would utilise. For this, we can make a further modification to the host OS by installing the `nvidia-container-toolkit`. First, open up another `transactional-update` shell, noting that we could have done this in a single transaction in the previous step, but to many (e.g. customers wanting to use Kubernetes) this step won't be required: -[,console] +[,bash] ---- transactional-update shell ---- Next, install the `nvidia-container-toolkit` package, which comes from one of the repo's that we configured in a previous step. Note that this command will initially appear to fail as it has a dependency on `libseccomp`, whereas this package is `libseccomp2` in SLE Micro, so you can safely select the second option ("break dependencies") here: -[,console] +[,bash] ---- zypper in install nvidia-container-toolkit ---- Your output should look like the following: -[,console] +[,shell] ---- Refreshing service 'SUSE_Linux_Enterprise_Micro_5.4_x86_64'. Refreshing service 'SUSE_Linux_Enterprise_Micro_x86_64'. @@ -163,14 +167,14 @@ We're working on fixing this dependency issue, so this should be a lot cleaner i When you're ready, you can exit the `transactional-update` shell: -[,console] +[,bash] ---- exit ---- ...and reboot the machine into the new snapshot: -[,console] +[,bash] ---- reboot ---- @@ -182,36 +186,40 @@ As before, you will need to ensure that you've exited the `transactional-shell` Now that the machine has rebooted, you can validate that the system is able to successfully enumerate the devices via the NVIDIA container toolkit (the output should be verbose, and it should provide a number of INFO and WARN messages, but no ERROR messages): -[,console] +[,bash] ---- nvidia-ctk cdi generate --output=/etc/cdi/nvidia.yaml ---- When ready, you can then run a podman-based container (doing this via `podman` gives us a good way of validating access to the NVIDIA device from within a container, which should give confidence for doing the same with Kubernetes), giving it access to the labelled NVIDIA device(s) that were taken care of by the previous command, based on https://registry.suse.com/bci/bci-base-15sp5/index.html[SLE BCI] and simply running bash: -[,cosnsole] +[,bash] ---- -podman run --rm --device nvidia.com/gpu=all --security-opt=label=disable -it registry.suse.com/bci/bci-base:latest bash +podman run --rm --device nvidia.com/gpu=all --security-opt=label=disable \ + -it registry.suse.com/bci/bci-base:latest bash ---- When we're in the temporary podman container we can install the required CUDA libraries, again checking the correct CUDA version for your driver https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/[here] although the previous output of `nvidia-smi` should show the required CUDA version. In the example below we're installing _CUDA 12.1_ and we're pulling a large number of examples, demo's, and development kits so you can fully validate the GPU: -[,console] +[,bash] ---- -zypper ar http://developer.download.nvidia.com/compute/cuda/repos/sles15/x86_64/ cuda-sle15-sp4 -zypper in -y cuda-libraries-devel-12-1 cuda-minimal-build-12-1 cuda-demo-suite-12-1 +zypper ar \ + http://developer.download.nvidia.com/compute/cuda/repos/sles15/x86_64/ \ + cuda-sle15-sp4 +zypper in -y cuda-libraries-devel-12-1 cuda-minimal-build-12-1 \ + cuda-demo-suite-12-1 ---- Once this has been installed successfully, don't exit from the container, we'll run the `deviceQuery` CUDA example, which will comprehensively validate GPU access via CUDA, and from within the container itself: -[,console] +[,shell] ---- /usr/local/cuda-12/extras/demo_suite/deviceQuery ---- If successful, you should see output that shows similar to the following, noting the `Result = PASS` message at the end of the command: -[,console] +[,shell] ---- /usr/local/cuda-12/extras/demo_suite/deviceQuery Starting... @@ -299,7 +307,7 @@ Result = PASS From here, you can continue to run any other CUDA workload - you can utilise compilers, and any other aspect of the CUDA ecosystem to run some further tests. When you're done you can exit from the container, noting that whatever you've installed in there is ephemeral (so will be lost!), and hasn't impacted the underlying operating system: -[,console] +[,bash] ---- exit ---- @@ -314,9 +322,10 @@ exit Check the kernel messages using `dmesg`. In case this indicates that it fails to allocate `NvKMSKapDevice`, then apply the unsupported GPU workaround: -[,console] +[,bash] ---- -transactional-update run sed -i '/NVreg_OpenRmEnableUnsupportedGpus/s/^#//g' /etc/modprobe.d/50-nvidia-default.conf +transactional-update run sed -i '/NVreg_OpenRmEnableUnsupportedGpus/s/^#//g' \ + /etc/modprobe.d/50-nvidia-default.conf ---- [IMPORTANT] ==== diff --git a/asciidoc/misc/adoc/create-a-simple-container-image-obs.adoc b/asciidoc/misc/adoc/create-a-simple-container-image-obs.adoc index a461d052..221a029d 100644 --- a/asciidoc/misc/adoc/create-a-simple-container-image-obs.adoc +++ b/asciidoc/misc/adoc/create-a-simple-container-image-obs.adoc @@ -21,7 +21,7 @@ In this case it will be a _subproject_ of the "home:foobar" project . Select the "Project Config" tab . Paste the following code: + -[,console] +[,shell] ---- %if "%_repository" == "images" Type: docker @@ -51,7 +51,7 @@ BuildEngine: podman Create a simple Dockerfile locally, something like: -[,console] +[,shell] ---- # The container image tag needs to be specified as follows: #!BuildTag: mytoolbox:latest diff --git a/asciidoc/misc/adoc/create-a-single-iso-image-customized.adoc b/asciidoc/misc/adoc/create-a-single-iso-image-customized.adoc index 5d34fe70..d65af9f8 100644 --- a/asciidoc/misc/adoc/create-a-single-iso-image-customized.adoc +++ b/asciidoc/misc/adoc/create-a-single-iso-image-customized.adoc @@ -44,9 +44,9 @@ Now, for any modification to the combustion package, the SLE Micro image will be To modify the combustion package, we need to go to the `combustion` package and then download the next file: -[,console] +[,bash] ---- -osc getbinaries home::branches:SUSE:SLE-15-SP4:Update:Products:Micro54\ +osc getbinaries home::branches:SUSE:SLE-15-SP4:Update:Products:Micro54 \ combustion standard x86_64 combustion-1.0+git2.obscpio ---- @@ -54,14 +54,14 @@ This file contains the combustion image that will be used by the SLE Micro insta To extract the content of this file, we need to execute the next command: -[,console] +[,bash] ---- cpio -idmv < combustion-1.0+git2.obscpio ---- After that, we should see something like: -[,console] +[,shell] ---- $ ls -l total 68 @@ -82,14 +82,14 @@ Let's change the next things: * *Timeout* to wait for the config drive from 10 to 15 seconds -[,console] +[,bash] ---- sed -i 's/devtimeout=10/devtimeout=15/g' module-setup.sh ---- * *Combustion labels* to be able to mount the config drive adding the labels `install` and `INSTALL` -[,console] +[,bash] ---- ... ... @@ -100,14 +100,14 @@ for label in combustion COMBUSTION ignition IGNITION install INSTALL; do After changing the code, we need to create a new `combustion-1.0+git2.obscpio` file: -[,console] +[,bash] ---- find combustion-1.0+git2 -type f -print | cpio -ocv > combustion-1.0+git2.obscpio ---- And upload again to the combustion package OBS to build a new package with the modifications -[,console] +[,bash] ---- osc add combustion-1.0+git2.obscpio osc commit -m "Update combustion-1.0+git2.obscpio" @@ -115,7 +115,7 @@ osc commit -m "Update combustion-1.0+git2.obscpio" After that you should see a new build is running: -[,console] +[,bash] ---- osc results ---- @@ -143,12 +143,12 @@ Now, we should have the next files to generate the final `single-iso` image: Using `xorriso` we will create the final single-iso: -[,console] +[,bash] ---- xorriso -indev ./SLE-Micro.x86_64-5.4.0-Default-SelfInstall-Build15.1.install.iso \ - -outdev ./SLE-Micro-Selfinstall-with-mycombustion-single-iso.iso \ - -map ~/my-local-path/combustion /combustion \ - -boot_image any replay -changes_pending yes + -outdev ./SLE-Micro-Selfinstall-with-mycombustion-single-iso.iso \ + -map ~/my-local-path/combustion /combustion \ + -boot_image any replay -changes_pending yes ---- After that, we should have the final iso image with the combustion script included `SLE-Micro-Selfinstall-with-mycombustion-single-iso.iso` diff --git a/asciidoc/misc/adoc/metallb-kube-api.adoc b/asciidoc/misc/adoc/metallb-kube-api.adoc index c7101e52..c82b9986 100644 --- a/asciidoc/misc/adoc/metallb-kube-api.adoc +++ b/asciidoc/misc/adoc/metallb-kube-api.adoc @@ -25,7 +25,7 @@ First a free IP in the network must be reserved that will be used later for `Ext SSH to the first host and install `K3s` in cluster mode as: -[,console] +[,bash] ---- # Export the free IP mentioned above export VIP_SERVICE_IP= @@ -48,7 +48,7 @@ From now on, the commands should be run on the local machine. In order to access the API server from outside, the IP of the K3s VM will be used. -[,console] +[,bash] ---- # Replace with the actual IP of the machine export NODE_IP= @@ -69,7 +69,7 @@ In order to change the K3s flags, `/etc/systemd/system/k3s.service` should be mo The flags should be inserted in the `ExecStart`. For example: -[,console] +[,shell] ---- # Replace the with the actual ip ExecStart=/usr/local/bin/k3s \ @@ -83,7 +83,7 @@ ExecStart=/usr/local/bin/k3s \ Then the following commands should be executed in order for K3S to load the new configurations: -[,console] +[,bash] ---- systemctl daemon-reload systemctl restart k3s @@ -97,7 +97,7 @@ To deploy `MetalLB`, the https://suse-edge.github.io/docs/quickstart/metallb[Met Create a separate `IpAddressPool` that will be used only for the managed Service. -[,console] +[,yaml] ---- # Export the VIP_SERVICE_IP on the local machine # Replace with the actual IP @@ -119,7 +119,7 @@ spec: EOF ---- -[,console] +[,yaml] ---- cat <<-EOF | kubectl apply -f - apiVersion: metallb.io/v1beta1 @@ -136,7 +136,7 @@ EOF == Install the Endpoint Copier Operator -[,console] +[,bash] ---- helm repo add endpoint-copier-operator \ https://suse-edge.github.io/endpoint-copier-operator @@ -153,7 +153,7 @@ The command above will deploy three different resources in the cluster: Verify that the `kubernetes-vip` Service has the correct IP address: -[,console] +[,bash] ---- kubectl get service kubernetes-vip -n default \ -o=jsonpath='{.status.loadBalancer.ingress[0].ip}' @@ -161,14 +161,14 @@ kubectl get service kubernetes-vip -n default \ Ensure that the `kubernetes-vip` and `kubernetes` Endpoints resources in the `default` namespace point to the same IPs. -[,console] +[,bash] ---- kubectl get endpoints kubernetes kubernetes-vip ---- If everything is correct, the last thing left is to use the `VIP_SERVICE_IP` in our `Kubeconfig`. -[,console] +[,bash] ---- sed -i '' "s/${NODE_IP}/${VIP_SERVICE_IP}/g" ~/.kube/config ---- @@ -181,21 +181,21 @@ To monitor the entire process, two more terminal tabs can be opened. First terminal: -[,console] +[,bash] ---- watch kubectl get nodes ---- Second terminal: -[,console] +[,bash] ---- watch kubectl get endpoints ---- Now execute the commands below on the second and third nodes. -[,console] +[,bash] ---- # Export the VIP_SERVICE_IP in the VM # Replace with the actual IP diff --git a/asciidoc/misc/adoc/modify-sle-micro-iso.adoc b/asciidoc/misc/adoc/modify-sle-micro-iso.adoc index e4843b60..95effad8 100644 --- a/asciidoc/misc/adoc/modify-sle-micro-iso.adoc +++ b/asciidoc/misc/adoc/modify-sle-micro-iso.adoc @@ -18,7 +18,7 @@ Imagine you want to modify the `/boot/grub2/grub.cfg` file. You just need to: . mount the ISO somewhere + -[,console] +[,bash] ---- ISO=${${HOME}/SLE-Micro.x86_64-5.4.0-Default-SelfInstall-GM.install.iso} DIR=$(mktemp -d) @@ -27,7 +27,7 @@ sudo mount ${ISO} ${DIR} + . extract the file + -[,console] +[,bash] ---- cp ${DIR}/boot/grub2/grub.cfg /tmp/mygrub.cfg ---- @@ -35,7 +35,7 @@ cp ${DIR}/boot/grub2/grub.cfg /tmp/mygrub.cfg . perform the modifications as needed . Umount the ISO (not really needed) + -[,console] +[,bash] ---- sudo umount ${DIR} rmdir ${DIR} @@ -43,7 +43,7 @@ rmdir ${DIR} + . rebuild the ISO as + -[,console] +[,shell] ---- xorriso -indev ${ISO} -outdev SLE-Micro-tweaked.iso -map /tmp/mygrub.cfg \ /boot/grub2/grub.cfg -boot_image any replay diff --git a/asciidoc/misc/adoc/rancher-disambiguation.adoc b/asciidoc/misc/adoc/rancher-disambiguation.adoc index 0156785a..9b273128 100644 --- a/asciidoc/misc/adoc/rancher-disambiguation.adoc +++ b/asciidoc/misc/adoc/rancher-disambiguation.adoc @@ -6,50 +6,50 @@ https://www.rancher.com/[Rancher] ecosystem host a few projects under its umbrel RKE1, RKE2 and K3s are flavours of Kubernetes, Rancher Manager can be used to manage and provision different deployments of Kubernetes itself with a primary focus on RKE1/RKE2, Fleet can watch Git Repositories, detect changes and tell Kubernetes what it needs to be running, Elemental considers a specific approach to provisioning Kubernetes in Edge scenarios where the provisioning can be preloaded at the OS level for Rancher Manager to control later -=== Rancher +== Rancher https://www.rancher.com/products/rancher[Rancher] (or Rancher Manager) is a multi cluster management solution for provisioning, managing and accessing multiple downstream kubernetes clusters. To provision new clusters Rancher can interact with different infrastructure and virtualization tools (vSphere/AWS etc) as an api client, request VMs and networks and setup a kubernetes cluster inside of those, it also works with bare metal machines by generating an join command you an run each time. -=== Fleet +== Fleet https://fleet.rancher.io/[Fleet] is usually a component of https://www.rancher.com/products/rancher[Rancher] (although it can be used independently) that allows you to use a GitOps workflow for multi-cluster (i.e it allows you to define your git repositories and the clusters they should apply to at the management cluster level). -=== Elemental +== Elemental https://elemental.docs.rancher.com/[Elemental] is a way to automatically deploy/register new clusters and manage the OS of their node, you can define clusters and their nodes on the management cluster then generate an OS installer image, when booting your node from that image it will install the node, register it to the manager and configure it for its role in the local cluster. This is the SUSE/Rancher way of doing zero touch provisioning. Elemental takes a different view of cluster provisioning focused on Edge deployments, typically Rancher services datacentre deployments of Kubernetes with enterprise servers etc; in an Edge scenario e.g. factory or cruise ship theres no guarantee of access for Rancher to contact and provision a cluster directly (i.e. limited bandwidth, firewalls etc) - Elemental instead is used to preload an operating system with all the information needed to set the cluster up, you can install that into the servers that you want to cluster and then it will reach back to Rancher to be under management at that point -=== Kubernetes +== Kubernetes https://kubernetes.io/[Kubernetes] as a standard and core technology is really a cross industry effort like Linux and has become core to DevOps as a cultural movement - as it enables defining and deploying your infrastructure as code and with lots of automation for extensive business continuity and high availability Kubernetes is a receptacle though - it runs what you tell it to run, usually people use automation to tell it what to do and this requires some kind of application to detect application configuration and apply it to Kubernetes - usually this is fulfilled through developer pipelines (CI/CD) where things are deployed as they are developed -=== Kubernetes distributions +== Kubernetes distributions Kubernetes Distributions, like Linux OSes, come in different flavours, RKE and RKE2 are two different flavours of Kubernetes in this manner; but like Ubuntu vs SUSE do for an OS they are ultimately just packaging an implementation of Kubernetes. Other examples include EKS,AKS and GKE which are flavours produced by AWS, Azure and GCP respectively. When we say a kubernetes cluster we mean a specific instance of a distribution installed on servers that are managed as a group (each server being a node in the cluster) -=== K3Ss +== K3Ss https://docs.k3s.io/[K3s] is a fully compliant and lightweight Kubernetes distribution focused on Edge, IoT, ARM or just for situations where a PhD in K8s clusterology is infeasible -=== RKE (or RKE1) +== RKE (or RKE1) https://www.rancher.com/products/rke[Rancher Kubernetes Engine] is a Kubernetes distribution that uses an older architecture and relies on Docker Engine to run containers -=== RKE2 +== RKE2 https://docs.rke2.io/[RKE2] also known as RKE Government, is Rancher's next-generation Kubernetes distribution that uses a newer architecture based on ContainerD. -RKE2 combines the best-of-both-worlds from the 1.x version of RKE (hereafter referred to as RKE1) and K3s. +RKE2 combines the best-of-both-worlds from the 1.x version of RKE (hereafter referred to as RKE1) anxd K3s. From K3s, it inherits the usability, ease-of-operations, and deployment model. From RKE1, it inherits close alignment with upstream Kubernetes. In places K3s has diverged from upstream Kubernetes in order to optimize for edge deployments, but RKE1 and RKE2 can stay closely aligned with upstream. -=== RKE2 using Air-gap install +== RKE2 using Air-gap install https://docs.rke2.io/install/airgap[air-gap install] is an RKE2 Installation where all package dependencies are installed using two different methods. Using the tarball release 'rke2-airgap-images' or by using a private registry and passing the parameter 'system-default-registry' during the installation to point directly to the private registry where images are located (as a mirror for docker.io) -=== Rancher vs K3s vs RKE +== Rancher vs K3s vs RKE You don't need Rancher to set up K3s or RKE1 or RKE2 on their own it just makes the whole process easier. Rancher runs as a Management Interface that can interact with running clusters and also provision new clusters - as well as manage authentication to the downstream clusters, and it can also do other things like interact with applications that kubernetes is orchestrating and provides monitoring tools diff --git a/asciidoc/misc/adoc/rke2-selinux.adoc b/asciidoc/misc/adoc/rke2-selinux.adoc index 95903490..159ecfec 100644 --- a/asciidoc/misc/adoc/rke2-selinux.adoc +++ b/asciidoc/misc/adoc/rke2-selinux.adoc @@ -15,7 +15,7 @@ include::../../common/attributes.adoc[] Once we've got the VM started and running, let's prepare the config to enable SELinux mode in the RKE2 configuration file: -[,console] +[,bash] ---- mkdir -p /etc/rancher/rke2 && echo "selinux: true" \ >> /etc/rancher/rke2/config.yaml @@ -23,7 +23,7 @@ mkdir -p /etc/rancher/rke2 && echo "selinux: true" \ Install RKE2 cluster -[,console] +[,bash] ---- curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=stable \ INSTALL_RKE2_METHOD=rpm RKE2_SELINUX=true sh - @@ -34,7 +34,7 @@ systemctl enable rke2-server.service Now, the VM should be rebooted for the transactional-update to finish properly: -[,console] +[,bash] ---- reboot ---- @@ -49,7 +49,7 @@ The first thing that will be installed is the https://github.com/rancher/rke2-se Let's connect to the VM and run: -[,console] +[,bash] ---- cat >> install-selinux.sh << 'END' #!/bin/bash @@ -69,14 +69,14 @@ chmod +x install-selinux.sh && transactional-update run /root/install-selinux.sh Now, the VM should be rebooted for the transactional-update to finish properly: -[,console] +[,bash] ---- reboot ---- After restarting the VM, we can verify that the policy was successfully installed as follows: -[,console] +[,bash] ---- rpm -qa | grep rke2 ---- @@ -89,7 +89,7 @@ As the `rke2-policy` was installed manually on the VM, some of its paths may not Let's connect to the VM and run: -[,console] +[,bash] ---- mkdir -p /var/lib/cni mkdir -p /opt/cni @@ -110,14 +110,14 @@ restorecon -R /var/run/flannel It's time for the RKE2 cluster to be installed but before that, RKE2 must be running Selinux mode: -[,console] +[,bash] ---- mkdir -p /etc/rancher/rke2 && echo "selinux: true" >> /etc/rancher/rke2/config.yaml ---- Install RKE2 Using Install Script -[,console] +[,bash] ---- curl -sfL https://get.rke2.io | INSTALL_RKE2_EXEC="server" \ RKE2_SELINUX=true INSTALL_RKE2_VERSION=v1.27.3+rke2r1 sh - @@ -135,7 +135,7 @@ RKE2 version `1.27` is the first that supports `arm64` architecture and it is st To use the Kubeconfig outside of the node, the following commands can be used: -[,console] +[,bash] ---- # Replace with the actual ip export NODE_IP= @@ -148,14 +148,14 @@ sudo scp ${NODE_IP}:/etc/rancher/rke2/rke2.yaml ~/.kube/config && \ Check SELinux status: -[,console] +[,bash] ---- sestatus ---- The output should be similar to this one: -[,console] +[,shell] ---- SELinux status: enabled SELinuxfs mount: /sys/fs/selinux @@ -171,14 +171,14 @@ Max kernel policy version: 33 Check that all pods are in Running state: -[,console] +[,bash] ---- kubectl get pod -A ---- The output should be similar to this one: -[,console] +[,shell] ---- NAMESPACE NAME READY STATUS RESTARTS AGE kube-system cloud-controller-manager-slemicro 1/1 Running 0 (2m3s ago) 3d5h diff --git a/asciidoc/misc/adoc/sushy-emulator-sles-certificates-container.adoc b/asciidoc/misc/adoc/sushy-emulator-sles-certificates-container.adoc index 135bce85..4279caaa 100644 --- a/asciidoc/misc/adoc/sushy-emulator-sles-certificates-container.adoc +++ b/asciidoc/misc/adoc/sushy-emulator-sles-certificates-container.adoc @@ -11,7 +11,7 @@ include::../../common/attributes.adoc[] . Create the `/etc/sushy/` folder to store the configuration and certificates + -[,console] +[,bash] ---- mkdir -p /etc/sushy/ cd /etc/sushy/ @@ -19,7 +19,7 @@ cd /etc/sushy/ + Ansible Redfish module https://github.com/ansible-collections/community.general/blob/main/plugins/modules/redfish_command.py#L851C5-L851C13[requires https]: + -[,console] +[,bash] ---- # Build root URI root_uri = "https://" + module.params['baseuri'] @@ -27,7 +27,7 @@ root_uri = "https://" + module.params['baseuri'] + . Create the certificate and key (hint: this is not the best way to create them as it is passwordless and 10 years of expiration) + -[,console] +[,bash] ---- openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 \ -subj "/C=ES/ST=Madrid/L=Las Rozas/O=Foo/CN=sushy-emulator" \ @@ -36,7 +36,7 @@ openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 \ + . Create the config file for the sushy-emulator as: + -[,console] +[,bash] ---- cat << EOF > /etc/sushy/sushy-emulator.conf SUSHY_EMULATOR_LISTEN_IP = u'0.0.0.0' @@ -59,7 +59,7 @@ EOF + . Create the systemd unit file for the container to be executed properly (including the paths, files, etc.): + -[,console] +[,shell] ---- cat << 'EOF' > /etc/systemd/system/container-sushy-emulator.service [Unit] @@ -92,7 +92,7 @@ EOF + . Notify systemd for the new unit file, enable the service at boot and start it: + -[,console] +[,bash] ---- systemctl daemon-reload systemctl enable --now container-sushy-emulator @@ -100,7 +100,7 @@ systemctl enable --now container-sushy-emulator + . Open the 8443/tcp port if required to be reachable from the outside world: + -[,console] +[,bash] ---- firewall-cmd --add-port=8443/tcp --zone=public --permanent firewall-cmd --add-port=8443/tcp --zone=public @@ -108,7 +108,7 @@ firewall-cmd --add-port=8443/tcp --zone=public == Testing -[,console] +[,shell] ---- curl https://localhost:8443/redfish/v1/Systems curl: (60) SSL certificate problem: self signed certificate @@ -121,7 +121,7 @@ how to fix it, please visit the web page mentioned above. Ignoring the certificate: -[,console] +[,json] ---- curl https://localhost:8443/redfish/v1/Systems -k { diff --git a/asciidoc/product/atip/adoc/atip.adoc b/asciidoc/product/atip/adoc/atip.adoc index f802e74e..dcb6c2e4 100644 --- a/asciidoc/product/atip/adoc/atip.adoc +++ b/asciidoc/product/atip/adoc/atip.adoc @@ -1,3 +1,4 @@ +[#atip] = SUSE Adaptive Telco Infrastructure Platform (ATIP) include::../../../common/attributes.adoc[] diff --git a/asciidoc/product/atip/adoc/features.adoc b/asciidoc/product/atip/adoc/features.adoc index a1b75a97..7d82373a 100644 --- a/asciidoc/product/atip/adoc/features.adoc +++ b/asciidoc/product/atip/adoc/features.adoc @@ -160,7 +160,7 @@ In our case, if you have installed a real time image like SLE Micro RT, kernel r + You could check it looking for the kernel and see if contains the *`rt`* string at the end of the kernel info: + -[,console] +[,bash] ---- uname -r 5.14.21-150400.15.11-rt @@ -177,7 +177,7 @@ https://www.suse.com/products/realtime/ The first thing is to create a profile for the cpu cores we want to isolate. In this case, we will isolate the cores 1-30 and 33-62. -[,console ] +[,shell] ---- echo "export tuned_params" >> /etc/grub.d/00_tuned @@ -237,7 +237,7 @@ With the values showed above, we are isolating 60 cores, and we are using 4 core Let's modify the grub file with the previous values: -[,console] +[,shell] ---- vi /boot/efi/EFI/sle_rt/grub.cfg set tuned_params="skew_tick=1 nohz=on nohz_full=1-30,33-62 rcu_nocbs=1-30,33-62 tuned.non_isolcpus=80000001,80000001 nosoftlockup" @@ -250,7 +250,7 @@ transactional-update grub.cfg To validate that the parameters are applied after reboot, you could check: -[,console] +[,bash] ---- cat /proc/cmdline ---- @@ -301,7 +301,7 @@ There are different ways to deploy SRIOV, and in this case, we will show two dif You could get the information to fill the config map from the lspci command: -[,console ] +[,shell] ---- lspci | grep -i acc 8a:00.0 Processing accelerators: Intel Corporation Device 0d5c @@ -489,7 +489,7 @@ EOF * After that you should see the pods running: + -[,console ] +[,shell] ---- kubectl get pods -n kube-system | grep sriov kube-system kube-sriov-device-plugin-amd64-twjfl 1/1 Running 0 2m @@ -524,9 +524,10 @@ If you don't get the interfaces available here, does not make sense continue wit ===== Get helm if not present -[,console ] +[,bash] ---- -curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 +curl -fsSL -o get_helm.sh \ + https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 chmod 500 get_helm.sh ./get_helm.sh ---- @@ -551,7 +552,7 @@ image::features_sriov.png[sriov.png] ===== Check the deployed resources crd and pods -[,console ] +[,bash] ---- kubectl -n sriov-network-operator get crd kubectl -n sriov-network-operator get pods @@ -561,7 +562,7 @@ kubectl -n sriov-network-operator get pods Now, if you have all resources running, the label should appears automatically in your node: -[,console ] +[,shell] ---- kubectl get nodes -oyaml | grep feature.node.kubernetes.io/network-sriov.capable feature.node.kubernetes.io/network-sriov.capable: "true" @@ -569,14 +570,15 @@ kubectl get nodes -oyaml | grep feature.node.kubernetes.io/network-sriov.capable if not present, you can add it manually: -[,console ] +[,bash] ---- -kubectl label $(kubectl get nodes -oname) feature.node.kubernetes.io/network-sriov.capable=true +kubectl label $(kubectl get nodes -oname) \ + feature.node.kubernetes.io/network-sriov.capable=true ---- ===== Review the daemonset to see the new `sriov-network-config-daemon` and `sriov-rancher-nfd-worker` as active and ready -[,console ] +[,shell] ---- kubectl get daemonset -A NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE @@ -589,7 +591,7 @@ kube-system rke2-multus-ds 1 1 1 After some minutes (can take up to 10 min to be updated) the nodes detected and configured will appear: -[,console ] +[,shell] ---- kubectl get sriovnetworknodestates.sriovnetwork.openshift.io -A NAMESPACE NAME AGE @@ -655,7 +657,7 @@ metadata: ==== If your interface is not detected here you should ensure that it is present in the next config map -[,console ] +[,bash] ---- kubectl get cm supported-nic-ids -oyaml -n cattle-sriov-system ---- @@ -782,19 +784,19 @@ To use dpdk using some drivers we need to enable some parameters in the kernel: To enable this parameters we need to add them to the kernel command line: -[,console ] +[,bash] ---- vi /etc/default/grub ---- -[,console ] +[,shell] ---- GRUB_CMDLINE_LINUX="intel_iommu=on intel_pstate=passive processor.max_cstate=1 intel_idle.max_cstate=0 iommu=pt usbcore.autosuspend=-1 selinux=0 enforcing=0 nmi_watchdog=0 crashkernel=auto softlockup_panic=0 audit=0 mce=off hugepagesz=1G hugepages=40 hugepagesz=2M hugepages=0 default_hugepagesz=1G kthread_cpus=0,31,32,63 irqaffinity=0,31,32,63 isolcpu=1-30,33-62 skew_tick=1 nohz_full=1-30,33-62 rcu_nocbs=1-30,33-62 rcu_nocb_poll" ---- Then you need to update the grub configuration and reboot the system to apply the changes: -[,console ] +[,bash] ---- transactional-update grub.cfg reboot @@ -802,14 +804,14 @@ reboot To validate that the parameters are applied after the reboot you can check the command line: -[,console ] +[,bash] ---- cat /proc/cmdline ---- ==== Load vfio-pci kernel module -[,console ] +[,bash] ---- modprobe vfio-pci ---- @@ -818,7 +820,7 @@ modprobe vfio-pci To create 4 VFs PCI addresses for example for 2 different NICs we need to execute the following commands: -[,console ] +[,bash] ---- echo 4 > /sys/bus/pci/devices/0000:51:00.0/sriov_numvfs echo 4 > /sys/bus/pci/devices/0000:51:00.1/sriov_numvfs @@ -826,7 +828,7 @@ echo 4 > /sys/bus/pci/devices/0000:51:00.1/sriov_numvfs === Bind the new VFs with the vfio-pci driver -[,console ] +[,bash] ---- dpdk-devbind.py -b vfio-pci 0000:51:01.0 0000:51:01.1 0000:51:01.2 \ 0000:51:01.3 0000:51:11.0 0000:51:11.1 0000:51:11.2 0000:51:11.3 @@ -834,7 +836,7 @@ dpdk-devbind.py -b vfio-pci 0000:51:01.0 0000:51:01.1 0000:51:01.2 \ === Review the configuration applied: -[,console ] +[,shell] ---- dpdk-devbind.py -s @@ -890,12 +892,12 @@ To enable the huge pages we should add the next kernel parameters: Modify the grub file to add them to the kernel command line: -[,console ] +[,bash] ---- vi /etc/default/grub ---- -[,console ] +[,shell] ---- GRUB_CMDLINE_LINUX="intel_iommu=on intel_pstate=passive processor.max_cstate=1 intel_idle.max_cstate=0 iommu=pt usbcore.autosuspend=-1 selinux=0 enforcing=0 nmi_watchdog=0 crashkernel=auto softlockup_panic=0 audit=0 mce=off hugepagesz=1G hugepages=40 hugepagesz=2M hugepages=0 default_hugepagesz=1G kthread_cpus=0,31,32,63 irqaffinity=0,31,32,63 isolcpu=1-30,33-62 skew_tick=1 nohz_full=1-30,33-62 rcu_nocbs=1-30,33-62 rcu_nocb_poll" ---- @@ -904,7 +906,7 @@ GRUB_CMDLINE_LINUX="intel_iommu=on intel_pstate=passive processor.max_cstate=1 i In order to use the huge pages we need to mount them: -[,console ] +[,bash] ---- mkdir -p /hugepages mount -t hugetlbfs nodev /hugepages @@ -1027,7 +1029,7 @@ Non-Uniform Memory Access or Non-Uniform Memory Architecture (NUMA) is a physica To identify the NUMA nodes on your system you can use the next command: -[,console ] +[,shell] ---- numactl --hardware available: 1 nodes (0) @@ -1068,19 +1070,19 @@ To enable the vRAN acceleration we need to enable the following kernel parameter Modify the grub file to add them to the kernel command line: -[,console ] +[,bash] ---- vi /etc/default/grub ---- -[,console ] +[,shell] ---- GRUB_CMDLINE_LINUX="intel_iommu=on intel_pstate=passive processor.max_cstate=1 intel_idle.max_cstate=0 iommu=pt usbcore.autosuspend=-1 selinux=0 enforcing=0 nmi_watchdog=0 crashkernel=auto softlockup_panic=0 audit=0 mce=off hugepagesz=1G hugepages=40 hugepagesz=2M hugepages=0 default_hugepagesz=1G kthread_cpus=0,31,32,63 irqaffinity=0,31,32,63 isolcpu=1-30,33-62 skew_tick=1 nohz_full=1-30,33-62 rcu_nocbs=1-30,33-62 rcu_nocb_poll" ---- Then you need to update the grub configuration and reboot the system to apply the changes: -[,console ] +[,bash] ---- transactional-update grub.cfg reboot @@ -1088,14 +1090,14 @@ reboot To validate that the parameters are applied after the reboot you can check the command line: -[,console ] +[,bash] ---- cat /proc/cmdline ---- ===== Load igb_uio and vfio-pci kernel modules -[,console ] +[,bash] ---- modprobe igb_uio modprobe vfio-pci @@ -1105,7 +1107,7 @@ modprobe vfio-pci Maybe in some cases (depending on the OS) you should add to the path the /sbin/ for the lspci command doing: `export PATH=$PATH:/sbin/` -[,console ] +[,shell] ---- lspci | grep -i acc 8a:00.0 Processing accelerators: Intel Corporation Device 0d5c @@ -1113,7 +1115,7 @@ lspci | grep -i acc ==== Bind the PF with igb_uio module -[,console ] +[,bash] ---- dpdk-devbind.py -b igb_uio 0000:8a:00.0 ---- @@ -1122,7 +1124,7 @@ dpdk-devbind.py -b igb_uio 0000:8a:00.0 To create 2 vfs from the PF and bind with vfio-pci follow the next steps: -[,console ] +[,shell] ---- echo 2 > /sys/bus/pci/devices/0000:8a:00.0/max_vfs dpdk-devbind.py -b vfio-pci 0000:8b:00.0 @@ -1130,7 +1132,7 @@ dpdk-devbind.py -b vfio-pci 0000:8b:00.0 ==== Configure acc100 with the proposed configuration file -[,console ] +[,shell] ---- pf_bb_config ACC100 -c /opt/pf-bb-config/acc100_config_vf_5g.cfg Tue Jun 6 10:49:20 2023:INFO:Queue Groups: 2 5GUL, 2 5GDL, 2 4GUL, 2 4GDL @@ -1143,7 +1145,7 @@ Tue Jun 6 10:49:21 2023:INFO:ACC100 PF [0000:8a:00.0] configuration complete! ==== Check the new VFs created from the FEC PF: -[,console ] +[,ahell] ---- dpdk-devbind.py -s ... diff --git a/asciidoc/product/atip/adoc/management-cluster.adoc b/asciidoc/product/atip/adoc/management-cluster.adoc index b2e98750..c01240f8 100644 --- a/asciidoc/product/atip/adoc/management-cluster.adoc +++ b/asciidoc/product/atip/adoc/management-cluster.adoc @@ -45,14 +45,14 @@ The following procedure describes how to prepare the configuration device in cas . Format the disk to any file system supported by SLE Micro: Ext3, Ext4, etc.: + -[,console] +[,bash] ---- sudo mkfs.ext4 /dev/sdY ---- + . Set the device label to either ignition (when either Ignition or Combustion is used) or combustion (when only Combustion is used). For the Ext4 file system: + -[,console] +[,bash] ---- sudo e2label /dev/sdY ignition ---- @@ -61,21 +61,21 @@ You can use any type of configuration storage media that your virtualization sys + . Mount the device: + -[,console] +[,bash] ---- sudo mount /dev/sdY /mnt ---- + . Create the directory structure as mentioned in https://documentation.suse.com/sle-micro/5.4/html/SLE-Micro-all/cha-images-ignition.html#[this link]: + -[,console] +[,bash] ---- sudo mkdir -p /mnt/ignition/` ---- + or: + -[,console] +[,bash] ---- sudo mkdir -p /mnt/combustion/ ---- @@ -132,14 +132,14 @@ Registering the system is possible from the command line using the `transactiona . To register SUSE Linux Enterprise Micro with SUSE Customer Center, run transactional-update register as follows: + -[,console] +[,bash] ---- transactional-update register -r REGISTRATION_CODE -e EMAIL_ADDRESS ---- + To register with a local registration server, additionally provide the URL to the server: + -[,console] +[,bash] ---- transactional-update register -r REGISTRATION_CODE -e EMAIL_ADDRESS \ --url "https://suse_register.example.com/" @@ -163,14 +163,14 @@ For more information, please refer to the documentation: https://docs.rke2.io/in . Run the installer: + -[,console] +[,bash] ---- curl -sfL https://get.rke2.io | sh - ---- + if you want to install a especific version, you can use the following command (i.e. `v1.25.9+rke2r1`): + -[,console] +[,bash] ---- curl -sfL https://get.rke2.io | INSTALL_RKE2_VERSION="v1.25.9+rke2r1" sh - ---- @@ -182,14 +182,14 @@ For more information about the installation, please refer to the documentation: + . Enable the rke2-server service: + -[,console] +[,bash] ---- systemctl enable rke2-server.service ---- + . Start the service: + -[,console] +[,bash] ---- systemctl start rke2-server.service ---- @@ -198,14 +198,14 @@ systemctl start rke2-server.service . Run the installer: + -[,console] +[,bash] ---- curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - ---- + . Enable the rke2-agent service: + -[,console] +[,bash] ---- systemctl enable rke2-agent.service ---- @@ -220,7 +220,7 @@ token: + . Start the service: + -[,console] +[,bash] ---- systemctl start rke2-agent.service ---- @@ -241,7 +241,7 @@ For more information about the installation, please refer to the documentation: There are three releases available to be added as a Helm repository for Rancher. In our case, we will use the `rancher-stable` because it's the release recommended for production environments, but you could use `rancher-latest` or `rancher-alpha` if you want. Also, there is a `rancher primer` release that is the enterprise version of Rancher. -[,console] +[,bash] ---- helm repo add rancher-stable https://releases.rancher.com/server-charts/stable ---- @@ -249,9 +249,10 @@ helm repo add rancher-stable https://releases.rancher.com/server-charts/stable [TIP] ==== If you don't have `helm` installed previously, you could install it using the following command: -[,console] + +[,bash] ---- -curl -fsSL +curl -fsSL \ https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 |bash ---- ==== @@ -275,20 +276,20 @@ https://ranchermanager.docs.rancher.com/v2.7/pages-for-subheaders/install-upgrad In our case we will use the Rancher-generated TLS certificate: -[,console] +[,bash] ---- helm repo add jetstack https://charts.jetstack.io helm repo update helm install cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace \ - --set installCRDs=true \ - --version v1.11.1 + --namespace cert-manager \ + --create-namespace \ + --set installCRDs=true \ + --version v1.11.1 ---- Once you've installed cert-manager, you can verify the pods are running: -[,console] +[,bash] ---- kubectl get pods --namespace cert-manager ---- @@ -297,22 +298,22 @@ kubectl get pods --namespace cert-manager You can install Rancher with helm using the following command modifying the `` and `` values: -[,console] +[,bash] ---- helm install rancher rancher-stable/rancher \ - --namespace cattle-system \ - --create-namespace \ - --set hostname= \ - --set bootstrapPassword= \ - --set replicas=1 \ - --set global.cattle.psp.enabled=false + --namespace cattle-system \ + --create-namespace \ + --set hostname= \ + --set bootstrapPassword= \ + --set replicas=1 \ + --set global.cattle.psp.enabled=false ---- ==== Verify the Rancher installation You should wait a few minutes for Rancher to be rolled out: -[,console] +[,bash] ---- kubectl -n cattle-system rollout status deploy/rancher ---- diff --git a/asciidoc/product/atip/adoc/requirements.adoc b/asciidoc/product/atip/adoc/requirements.adoc index a54688cd..5efff389 100644 --- a/asciidoc/product/atip/adoc/requirements.adoc +++ b/asciidoc/product/atip/adoc/requirements.adoc @@ -25,7 +25,7 @@ https://github.com/SUSE/rebootmgr You could verify the strategic being used as: -[,console] +[,ini] ---- cat /etc/rebootmgr.conf [rebootmgr] @@ -37,14 +37,14 @@ lock-group=default and you could disable it as: -[,console] +[,bash] ---- sed -i 's/strategy=best-effort/strategy=off/g' /etc/rebootmgr.conf ---- or using the rebootmgrctl command: -[,console] +[,bash] ---- rebootmgrctl strategy off ---- diff --git a/asciidoc/quickstart/adoc/eib.adoc b/asciidoc/quickstart/adoc/eib.adoc index 0ce08983..f53c7421 100644 --- a/asciidoc/quickstart/adoc/eib.adoc +++ b/asciidoc/quickstart/adoc/eib.adoc @@ -193,9 +193,9 @@ To build the image, we can run: [,bash] ---- docker run --rm -it -v $PWD/eib/:/eib \ -registry.opensuse.org/home/atgracey/eib/container/containerfile/eib:latest \ -/usr/bin/eib -config-file eib-config.yaml -config-dir /eib -build-dir \ -/eib/_build + registry.opensuse.org/home/atgracey/eib/container/containerfile/eib:latest \ + /usr/bin/eib -config-file eib-config.yaml -config-dir /eib -build-dir \ + /eib/_build ---- TODO: Change the image to the released version when available diff --git a/asciidoc/quickstart/adoc/elemental.adoc b/asciidoc/quickstart/adoc/elemental.adoc index 51f9a7b6..734c05b4 100644 --- a/asciidoc/quickstart/adoc/elemental.adoc +++ b/asciidoc/quickstart/adoc/elemental.adoc @@ -93,35 +93,37 @@ TODO: Do I need to explicitly allow for "local" management in the helm install? [.tabs] Linux:: + -[,console] +[,bash] ---- helm repo add rancher-latest https://releases.rancher.com/server-charts/latest kubectl create namespace cattle-system -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.crds.yaml +kubectl apply -f \ + https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.crds.yaml helm repo add jetstack https://charts.jetstack.io helm repo update helm install cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace + --namespace cert-manager \ + --create-namespace ---- + MacOS:: + -[,console] +[,bash] ---- helm repo add rancher-latest https://releases.rancher.com/server-charts/latest kubectl create namespace cattle-system -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.crds.yaml +kubectl apply -f \ + https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.crds.yaml helm repo add jetstack https://charts.jetstack.io helm repo update helm install cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace + --namespace cert-manager \ + --create-namespace ---- + Windows:: + -[,console] +[,bash] ---- helm repo add rancher-latest https://releases.rancher.com/server-charts/latest kubectl create namespace cattle-system @@ -138,7 +140,7 @@ helm install cert-manager jetstack/cert-manager ` [.tabs] Linux:: + -[,console] +[,bash] ---- helm install rancher rancher-latest/rancher \ --namespace cattle-system \ @@ -149,7 +151,7 @@ helm install rancher rancher-latest/rancher \ + MacOS:: + -[,console] +[,bash] ---- helm install rancher rancher-latest/rancher \ --namespace cattle-system \ @@ -160,7 +162,7 @@ helm install rancher rancher-latest/rancher \ + Windows:: + -[,console] +[,bash] ---- helm install rancher rancher-latest/rancher ` --namespace cattle-system ` @@ -182,14 +184,14 @@ With Rancher installed, you can now install Elemental itself! The helm chart for Elemental is published as an OCI artifact so the installation is a little simpler than other charts. It can be installed from either the same shell you used to install Rancher or in the browser from within Rancher's shell. -[,console] +[,bash] ---- helm install --create-namespace -n cattle-elemental-system \ -elemental-operator-crds \ -oci://registry.suse.com/rancher/elemental-operator-crds-chart + elemental-operator-crds \ + oci://registry.suse.com/rancher/elemental-operator-crds-chart helm install --create-namespace -n cattle-elemental-system \ -elemental-operator \ -oci://registry.suse.com/rancher/elemental-operator-chart + elemental-operator \ + oci://registry.suse.com/rancher/elemental-operator-chart ---- ==== (Optionally) Install the Elemental UI Extension @@ -244,7 +246,7 @@ We can create this object in a few ways: YAML:: Create a file with the yaml from above called `registration.yaml` the apply it with: + -[,console] +[,bash] ---- kubectl apply -f registration.yaml ---- @@ -312,14 +314,20 @@ From a system with Podman, Docker, or Rancher desktop installed, run Linux:: TODO: Add elemental rpms + -[,console] +[,bash] ---- mkdir -p eib_quickstart/base-images mkdir -p eib_quickstart/elemental mkdir -p eib_quickstart/rpms - +---- ++ +[,bash] +---- curl -o eib_quickstart/elemental/elemental.yaml - +---- ++ +[,yaml] +---- echo > /eib_quickstart/eib-config.yaml <<< HEREDOC apiVersion: 1.0 image: @@ -328,7 +336,10 @@ image: outputImageName: elemental-image.iso HEREDOC - +---- ++ +[,bash] +---- docker run --rm -it -v $PWD/eib_quickstart/:/eib \ registry.opensuse.org/home/atgracey/eib/container/containerfile/eib:latest \ /usr/bin/eib -config-file eib-config.yaml -config-dir /eib \ @@ -353,7 +364,7 @@ If you are booting a physical device, we need to burn the image to a USB flash d Linux:: TODO: how to find sdX + -[,console] +[,bash] ---- sudo dd if=/path/to/output.iso of=/dev/sdX ---- @@ -361,7 +372,7 @@ sudo dd if=/path/to/output.iso of=/dev/sdX MacOS:: TODO: any difference? + -[,console] +[,bash] ---- sudo dd if=/path/to/output.iso of=/dev/sdX ---- @@ -404,14 +415,14 @@ spec: + . Then we can create it with + -[,console] +[,bash] ---- kubectl create -f selector.yaml ---- + . This will match any machine in the inventory with the label `locationID: 123` so we need to add this label to the machines that should be matched. We can do this with: + -[,console] +[,bash] ---- kubectl label MachineInventory -n fleet-default \ locationID=123 @@ -443,7 +454,7 @@ spec: + . To provision this cluster, we can create a file with these contents called `cluster.yaml` and run + -[,console] +[,bash] ---- kubectl create -f cluster.yaml ---- @@ -476,7 +487,7 @@ For example, if you know that each location will have a unique subnet, you would This would typically be custom to your system's design but could look like: -[,console] +[,bash] ---- INET=`ip addr show dev eth0 | grep "inet\ "` elemental-register --label "network=$INET" \ diff --git a/asciidoc/quickstart/adoc/metal3.adoc b/asciidoc/quickstart/adoc/metal3.adoc index 1046cf3c..08187e7f 100644 --- a/asciidoc/quickstart/adoc/metal3.adoc +++ b/asciidoc/quickstart/adoc/metal3.adoc @@ -64,7 +64,7 @@ This IP must be part of the controlplane subnet, and reserved for static configu . First we install MetalLB: + -[,console] +[,bash] ---- helm repo add suse-edge https://suse-edge.github.io/charts helm install \ @@ -75,7 +75,7 @@ helm install \ + . Then we define an `IPAddressPool` and `L2Advertisment` using the reserved IP, defined as `STATIC_IRONIC_IP` below: + -[,console] +[,yaml] ---- export STATIC_IRONIC_IP= @@ -96,7 +96,7 @@ spec: EOF ---- + -[,console] +[,yaml ---- cat <<-EOF | kubectl apply -f - apiVersion: metallb.io/v1beta1 @@ -112,7 +112,7 @@ EOF + . Now Metal^3^ can be installed: + -[,console] +[,bash] ---- helm install \ metal3 suse-edge/metal3 \ @@ -123,7 +123,7 @@ helm install \ + . Note that it can take around 2 minutes for the initContainer to run on this deployment so ensure the pods are all running before proceeding: + -[,console] +[,shell] ---- kubectl get pods -n metal3-system NAME READY STATUS RESTARTS AGE @@ -138,7 +138,7 @@ TODO - update this to use the Rancher turtles operator instead Install https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl[clusterctl] 1.6.x, then install the core, infrastructure, bootstrap and controplane providers -[,console] +[,bash] ---- clusterctl init --core cluster-api:v1.6.0 --infrastructure metal3:v1.6.0 clusterctl init --bootstrap rke2 --control-plane rke2