From 646a4982cd31616e326d5aed35085f142ad001b0 Mon Sep 17 00:00:00 2001 From: B1F030 Date: Mon, 20 May 2024 17:24:10 +0800 Subject: [PATCH] format command line prompts Signed-off-by: B1F030 --- .../backup/working-with-velero.md | 12 +++++------ .../migration/migration-from-kubefed.md | 8 +++---- .../developers/customize-karmada-scheduler.md | 4 ++-- docs/developers/document-releasing.md | 4 ++-- .../performance-test-setup-for-karmada.md | 6 +++--- docs/developers/profiling-karmada.md | 2 +- docs/get-started/nginx-example.md | 2 +- docs/installation/install-binary.md | 14 ++++++------- docs/installation/installation.md | 6 +++--- .../access-service-across-clusters.md | 2 +- .../autoscaling-with-custom-metrics.md | 18 ++++++++-------- .../autoscaling-with-resource-metrics.md | 10 ++++----- docs/tutorials/resource-migration.md | 10 ++++----- docs/userguide/cicd/working-with-argocd.md | 2 +- .../clustermanager/cluster-registration.md | 8 +++---- .../failover/application-failover.md | 4 ++-- .../globalview/proxy-global-resource.md | 6 +++--- docs/userguide/scheduling/descheduler.md | 6 +++--- .../scheduling/resource-propagating.md | 2 +- .../scheduling/scheduler-estimator.md | 2 +- .../working-with-gatekeeper.md | 4 ++-- .../service/multi-cluster-service.md | 2 +- .../service/working-with-eriecanal.md | 12 +++++------ .../working-with-istio-on-flat-network.md | 21 ++++++++----------- .../backup/working-with-velero.md | 12 +++++------ .../migration/migration-from-kubefed.md | 8 +++---- .../developers/customize-karmada-scheduler.md | 4 ++-- .../current/developers/document-releasing.md | 4 ++-- .../performance-test-setup-for-karmada.md | 6 +++--- .../current/developers/profiling-karmada.md | 2 +- .../current/get-started/nginx-example.md | 2 +- .../current/installation/install-binary.md | 14 ++++++------- .../current/installation/installation.md | 6 +++--- .../access-service-across-clusters.md | 2 +- .../autoscaling-with-custom-metrics.md | 16 +++++++------- .../autoscaling-with-resource-metrics.md | 16 +++++++------- .../current/tutorials/resource-migration.md | 10 ++++----- .../userguide/cicd/working-with-argocd.md | 2 +- .../clustermanager/cluster-registration.md | 8 +++---- .../failover/application-failover.md | 4 ++-- .../globalview/proxy-global-resource.md | 6 +++--- .../userguide/scheduling/descheduler.md | 6 +++--- .../scheduling/resource-propagating.md | 2 +- .../scheduling/scheduler-estimator.md | 2 +- .../working-with-gatekeeper.md | 4 ++-- .../service/multi-cluster-service.md | 2 +- .../service/working-with-eriecanal.md | 12 +++++------ .../working-with-istio-on-flat-network.md | 21 ++++++++----------- 48 files changed, 166 insertions(+), 172 deletions(-) diff --git a/docs/administrator/backup/working-with-velero.md b/docs/administrator/backup/working-with-velero.md index 1929b59b..56e3a35e 100644 --- a/docs/administrator/backup/working-with-velero.md +++ b/docs/administrator/backup/working-with-velero.md @@ -113,7 +113,7 @@ Velero consists of two components: And then you will find nginx is deployed successfully. ```shell - # kubectl get deployment.apps + $ kubectl get deployment.apps NAME READY UP-TO-DATE AVAILABLE AGE nginx 2/2 2 2 17s ``` @@ -134,27 +134,27 @@ kubectl config use-context member2 In `member2`, we can also get the backup that we created in `member1`: ```shell -# velero backup get +$ velero backup get NAME STATUS ERRORS WARNINGS CREATED EXPIRES STORAGE LOCATION SELECTOR nginx-backup Completed 0 0 2021-12-10 15:16:46 +0800 CST 29d default app=nginx ``` Restore `member1` resources to `member2`: ```shell -# velero restore create --from-backup nginx-backup +$ velero restore create --from-backup nginx-backup Restore request "nginx-backup-20211210151807" submitted successfully. ``` Watch restore result, you'll find that the status is Completed. ```shell -# velero restore get +$ velero restore get NAME BACKUP STATUS STARTED COMPLETED ERRORS WARNINGS CREATED SELECTOR nginx-backup-20211210151807 nginx-backup Completed 2021-12-10 15:18:07 +0800 CST 2021-12-10 15:18:07 +0800 CST 0 0 2021-12-10 15:18:07 +0800 CST ``` And then you can find deployment nginx will be restored successfully. ```shell -# kubectl get deployment.apps/nginx +$ kubectl get deployment.apps/nginx NAME READY UP-TO-DATE AVAILABLE AGE nginx 2/2 2 2 21s ``` @@ -247,7 +247,7 @@ EOF And then you can find deployment nginx will be restored on member2 successfully. ```shell -# kubectl get deployment.apps/nginx +$ kubectl get deployment.apps/nginx NAME READY UP-TO-DATE AVAILABLE AGE nginx 2/2 2 2 10s ``` diff --git a/docs/administrator/migration/migration-from-kubefed.md b/docs/administrator/migration/migration-from-kubefed.md index f5265912..9ac9da2a 100644 --- a/docs/administrator/migration/migration-from-kubefed.md +++ b/docs/administrator/migration/migration-from-kubefed.md @@ -50,8 +50,8 @@ object to describe the joined cluster. Assume you use the `kubefedctl` tool to check the status of the joined clusters as follows: -``` -kubectl -n kube-federation-system get kubefedclusters +```bash +$ kubectl -n kube-federation-system get kubefedclusters NAME AGE READY KUBERNETES-VERSION cluster1 1m True v1.21.2 @@ -60,8 +60,8 @@ cluster2 1m True v1.22.0 Now with Karmada, you can use `karmadactl` tool to do the same thing: -``` -kubectl get clusters +```bash +$ kubectl get clusters NAME VERSION MODE READY AGE member1 v1.20.7 Push True 66s diff --git a/docs/developers/customize-karmada-scheduler.md b/docs/developers/customize-karmada-scheduler.md index 718e298d..7aa242a6 100644 --- a/docs/developers/customize-karmada-scheduler.md +++ b/docs/developers/customize-karmada-scheduler.md @@ -140,7 +140,7 @@ make image-karmada-scheduler ``` ```shell -kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system +$ kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system ... spec: automountServiceAccountToken: false @@ -175,7 +175,7 @@ You can config the plugin enablement by setting the flag `--plugins`. For example, the following config will disable `TestFilter` plugin. ```shell -kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system +$ kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system ... spec: automountServiceAccountToken: false diff --git a/docs/developers/document-releasing.md b/docs/developers/document-releasing.md index 63f1a827..b88cd88e 100644 --- a/docs/developers/document-releasing.md +++ b/docs/developers/document-releasing.md @@ -54,8 +54,8 @@ go build ./hack/tools/gencomponentdocs/. 1. Update versions.json ```shell -cd website/ -vim versions.json +$ cd website/ +$ vim versions.json [ v1.5 # add a new version tag diff --git a/docs/developers/performance-test-setup-for-karmada.md b/docs/developers/performance-test-setup-for-karmada.md index d24245a4..947b2b20 100644 --- a/docs/developers/performance-test-setup-for-karmada.md +++ b/docs/developers/performance-test-setup-for-karmada.md @@ -56,7 +56,7 @@ kubectl apply -f fakekubelet.yml `kubectl get node` You will find fake nodes. ```shell -> kubectl get node -o wide +$ kubectl get node -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME fake-0 Ready agent 10s fake 10.88.0.136 fake-1 Ready agent 10s fake 10.88.0.136 @@ -68,7 +68,7 @@ fake-4 Ready agent 10s fake 10.88.0.136 kubectl apply -f - < kubectl get pod -o wide +$ kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES fake-pod-78884479b7-52qcx 1/1 Running 0 6s 10.0.0.23 fake-4 fake-pod-78884479b7-bd6nk 1/1 Running 0 6s 10.0.0.13 fake-2 diff --git a/docs/developers/profiling-karmada.md b/docs/developers/profiling-karmada.md index 4353c27c..89344de3 100644 --- a/docs/developers/profiling-karmada.md +++ b/docs/developers/profiling-karmada.md @@ -40,7 +40,7 @@ The HTTP endpoint will now be available as a local port. You can then generate the file for the memory profile with curl and pipe the data to a file: ```shell -$ curl http://localhost:6060/debug/pprof/heap > heap.pprof +curl http://localhost:6060/debug/pprof/heap > heap.pprof ``` Generate the file for the CPU profile with curl and pipe the data to a file (7200 seconds is two hours): diff --git a/docs/get-started/nginx-example.md b/docs/get-started/nginx-example.md index 2e03a121..848f4e20 100644 --- a/docs/get-started/nginx-example.md +++ b/docs/get-started/nginx-example.md @@ -29,7 +29,7 @@ cd karmada run the following script: ``` -# hack/local-up-karmada.sh +hack/local-up-karmada.sh ``` This script will do the following tasks for you: - Start a Kubernetes cluster to run the Karmada control plane, aka. the `host cluster`. diff --git a/docs/installation/install-binary.md b/docs/installation/install-binary.md index 2578483c..65129fac 100644 --- a/docs/installation/install-binary.md +++ b/docs/installation/install-binary.md @@ -29,7 +29,7 @@ Step-by-step installation of binary high-availability `karmada` cluster. Execute operations at `karmada-01` `karmada-02` `karmada-03`. ```bash -vi /etc/hosts +$ vi /etc/hosts 172.31.209.245 karmada-01 172.31.209.246 karmada-02 172.31.209.247 karmada-03 @@ -126,9 +126,9 @@ You normally don't need to change `*.sh` files. ### Step 3: Run Shell Scripts ```bash -$ ./generate_ca.sh -$ ./generate_leaf.sh ca_cert/ -$ ./generate_etcd.sh +./generate_ca.sh +./generate_leaf.sh ca_cert/ +./generate_etcd.sh ``` @@ -312,7 +312,7 @@ systemctl status etcd.service ### Verify ```bash -etcdctl --cacert /etc/karmada/pki/etcd/ca.crt \ +$ etcdctl --cacert /etc/karmada/pki/etcd/ca.crt \ --cert /etc/karmada/pki/etcd/healthcheck-client.crt \ --key /etc/karmada/pki/etcd/healthcheck-client.key \ --endpoints "172.31.209.245:2379,172.31.209.246:2379,172.31.209.247:2379" \ @@ -515,7 +515,7 @@ Then, like `karmada-webhook`, use `nginx` for high availability. modify the `nginx` configuration and add the following configuration,Execute operations at `karmada-01`. ```bash -cat /usr/local/karmada-nginx/conf/nginx.conf +$ cat /usr/local/karmada-nginx/conf/nginx.conf worker_processes 2; events { @@ -858,7 +858,7 @@ ok modify the `nginx` configuration and add the following configuration,Execute operations at `karmada-01`. ```bash -cat /usr/local/karmada-nginx/conf/nginx.conf +$ cat /usr/local/karmada-nginx/conf/nginx.conf worker_processes 2; events { diff --git a/docs/installation/installation.md b/docs/installation/installation.md index 70b721e8..d9676bea 100644 --- a/docs/installation/installation.md +++ b/docs/installation/installation.md @@ -82,7 +82,7 @@ Step 2: Show members of karmada The components of Karmada are installed in `karmada-system` namespace by default, you can get them by: ```bash -kubectl get deployments -n karmada-system +$ kubectl get deployments -n karmada-system NAME READY UP-TO-DATE AVAILABLE AGE karmada-aggregated-apiserver 1/1 1 1 102s karmada-apiserver 1/1 1 1 2m34s @@ -93,7 +93,7 @@ kube-controller-manager 1/1 1 1 2m3s ``` And the `karmada-etcd` is installed as the `StatefulSet`, get it by: ```bash -kubectl get statefulsets -n karmada-system +$ kubectl get statefulsets -n karmada-system NAME READY AGE etcd 1/1 28m ``` @@ -140,7 +140,7 @@ kubectl karmada init --crds https://github.com/karmada-io/karmada/releases/downl Check installed components: ```bash -kubectl get pods -n karmada-system --kubeconfig=$HOME/.kube/host.config +$ kubectl get pods -n karmada-system --kubeconfig=$HOME/.kube/host.config NAME READY STATUS RESTARTS AGE etcd-0 1/1 Running 0 2m55s karmada-aggregated-apiserver-84b45bf9b-n5gnk 1/1 Running 0 109s diff --git a/docs/tutorials/access-service-across-clusters.md b/docs/tutorials/access-service-across-clusters.md index 2e65236d..c40eff73 100644 --- a/docs/tutorials/access-service-across-clusters.md +++ b/docs/tutorials/access-service-across-clusters.md @@ -26,7 +26,7 @@ Note: In order to prevent routing conflicts, Pod and Service CIDRs of clusters n To enable the MultiClusterService feature in the karmada-controller-manager, run the following command: ```shell -$ kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --v=4/i \ - --feature-gates=MultiClusterService=true' | kubectl --context karmada-host replace -f - +kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --v=4/i \ - --feature-gates=MultiClusterService=true' | kubectl --context karmada-host replace -f - ``` Please note that the MultiClusterService feature is disabled by default and can be enabled using the `--feature-gates=MultiClusterService=true` flag. diff --git a/docs/tutorials/autoscaling-with-custom-metrics.md b/docs/tutorials/autoscaling-with-custom-metrics.md index f6a9aed0..8fbae40e 100644 --- a/docs/tutorials/autoscaling-with-custom-metrics.md +++ b/docs/tutorials/autoscaling-with-custom-metrics.md @@ -89,7 +89,7 @@ kubectl apply -f manifests/ You can verify the installation by the following command: ```sh -kubectl --kubeconfig=/root/.kube/members.config --context=member1 get po -nmonitoring +$ kubectl --kubeconfig=/root/.kube/members.config --context=member1 get po -nmonitoring NAME READY STATUS RESTARTS AGE alertmanager-main-0 2/2 Running 0 30h alertmanager-main-1 2/2 Running 0 30h @@ -254,9 +254,9 @@ data: ``` ```sh -$ kubectl apply -f prom-adapter.config.yaml +kubectl apply -f prom-adapter.config.yaml # Restart prom-adapter pods -$ kubectl rollout restart deployment prometheus-adapter -n monitoring +kubectl rollout restart deployment prometheus-adapter -n monitoring ``` ## Register metrics API in `member1` and `member2` cluster @@ -280,13 +280,13 @@ spec: ``` ```sh -$ kubectl create -f api-service.yaml +kubectl create -f api-service.yaml ``` The API is registered as `custom.metrics.k8s.io/v1beta2`, and you can use the following command to verify: ```sh -$ kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta2/namespaces/default/pods/*/http_requests?selector=app%3Dsample-app" +kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta2/namespaces/default/pods/*/http_requests?selector=app%3Dsample-app" ``` The output is similar to: @@ -419,9 +419,9 @@ derived-sample-app member1 ClusterIP 10.11.59.213 80/T In order to do http requests, here you can use `hey`. * Download `hey` and copy it to kind cluster container. ```sh -$ wget https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64 -$ chmod +x hey_linux_amd64 -$ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey +wget https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64 +chmod +x hey_linux_amd64 +docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey ``` ## Test scaling up @@ -442,7 +442,7 @@ $ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey * Request multi-cluster service with hey to increase the nginx pods' custom metrics(http_requests_total). ```sh - $ docker exec member1-control-plane hey -c 1000 -z 1m http://10.11.59.213/metrics + docker exec member1-control-plane hey -c 1000 -z 1m http://10.11.59.213/metrics ``` * Wait 15s, the replicas will be scaled up, then you can check the pod distribution again. diff --git a/docs/tutorials/autoscaling-with-resource-metrics.md b/docs/tutorials/autoscaling-with-resource-metrics.md index 0dcb5418..03b3f746 100644 --- a/docs/tutorials/autoscaling-with-resource-metrics.md +++ b/docs/tutorials/autoscaling-with-resource-metrics.md @@ -279,10 +279,10 @@ derived-nginx-service member1 ClusterIP 10.11.59.213 80/T In order to do http requests, here we use `hey`. * Download `hey` and copy it to kind cluster container. -``` -$ wget https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64 -$ chmod +x hey_linux_amd64 -$ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey +```sh +wget https://hey-release.s3.us-east-2.amazonaws.com/hey_linux_amd64 +chmod +x hey_linux_amd64 +docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey ``` ## Test scaling up @@ -303,7 +303,7 @@ $ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey * Request multi-cluster service with hey to increase the nginx pods' CPU usage. ```sh - $ docker exec member1-control-plane hey -c 1000 -z 1m http://10.11.59.213 + docker exec member1-control-plane hey -c 1000 -z 1m http://10.11.59.213 ``` * Wait 15s, the replicas will be scaled up, then you can check the pod distribution again. diff --git a/docs/tutorials/resource-migration.md b/docs/tutorials/resource-migration.md index 256df386..2a7a5cbd 100644 --- a/docs/tutorials/resource-migration.md +++ b/docs/tutorials/resource-migration.md @@ -19,10 +19,10 @@ So, this section will guide you to cover: #### Step 1: Run the command ```shell -$ git clone https://github.com/karmada-io/karmada -$ cd karmada -$ hack/local-up-karmada.sh -$ export KUBECONFIG=~/.kube/karmada.config:~/.kube/members.config +git clone https://github.com/karmada-io/karmada +cd karmada +hack/local-up-karmada.sh +export KUBECONFIG=~/.kube/karmada.config:~/.kube/members.config ``` > **Note:** @@ -37,7 +37,7 @@ $ export KUBECONFIG=~/.kube/karmada.config:~/.kube/members.config #### Step 2: Run the command ```shell -$ kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --failover-eviction-timeout=30s/{n;s/- --v=4/- --feature-gates=PropagationPolicyPreemption=true\n &/g}' | kubectl --context karmada-host replace -f - +kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --failover-eviction-timeout=30s/{n;s/- --v=4/- --feature-gates=PropagationPolicyPreemption=true\n &/g}' | kubectl --context karmada-host replace -f - ``` > **Note:** diff --git a/docs/userguide/cicd/working-with-argocd.md b/docs/userguide/cicd/working-with-argocd.md index 3b1e25b2..d28df28b 100644 --- a/docs/userguide/cicd/working-with-argocd.md +++ b/docs/userguide/cicd/working-with-argocd.md @@ -15,7 +15,7 @@ In this example, we are using a Karmada environment with at least `3` member clu You can set up the environment by `hack/local-up-karmada.sh`, which is also used to run our E2E cases. ```bash -# kubectl get clusters +$ kubectl get clusters NAME VERSION MODE READY AGE member1 v1.19.1 Push True 18h member2 v1.19.1 Push True 18h diff --git a/docs/userguide/clustermanager/cluster-registration.md b/docs/userguide/clustermanager/cluster-registration.md index 564ac0df..5c6962d2 100644 --- a/docs/userguide/clustermanager/cluster-registration.md +++ b/docs/userguide/clustermanager/cluster-registration.md @@ -51,7 +51,7 @@ kubectl karmada join member1 --kubeconfig= --karmada-context Check the status of the joined clusters by using the following command. ``` -kubectl get clusters +$ kubectl get clusters NAME VERSION MODE READY AGE member1 v1.20.7 Push True 66s @@ -81,7 +81,7 @@ Be different from the `karmadactl join` which registers a cluster with `Push` mo In Karmada control plane, we can use `karmadactl token create` command to create bootstrap tokens whose default ttl is 24h. ``` -$ karmadactl token create --print-register-command --kubeconfig /etc/karmada/karmada-apiserver.config +karmadactl token create --print-register-command --kubeconfig /etc/karmada/karmada-apiserver.config ``` ``` @@ -97,7 +97,7 @@ For more details about `bootstrap token` please refer to: In the Kubernetes control plane of member clusters, we also need the `kubeconfig` file of the member cluster. Right after we execute the output of the `karmadactl register` command provided above. ``` -$ karmadactl register 10.10.x.x:32443 --token t2jgtm.9nybj0526mjw1jbf --discovery-token-ca-cert-hash sha256:f5a5a43869bb44577dba582e794c3e3750f2050d62f1b1dc80fd3d6a371b6ed4 +karmadactl register 10.10.x.x:32443 --token t2jgtm.9nybj0526mjw1jbf --discovery-token-ca-cert-hash sha256:f5a5a43869bb44577dba582e794c3e3750f2050d62f1b1dc80fd3d6a371b6ed4 ``` ``` @@ -124,7 +124,7 @@ Once deployed, `the karmada-agent` will automatically register the cluster durin Check the status of the registered clusters by using the same command above. ``` -kubectl get clusters +$ kubectl get clusters NAME VERSION MODE READY AGE member3 v1.20.7 Pull True 66s ``` diff --git a/docs/userguide/failover/application-failover.md b/docs/userguide/failover/application-failover.md index 140ee53c..fb706eaf 100644 --- a/docs/userguide/failover/application-failover.md +++ b/docs/userguide/failover/application-failover.md @@ -152,9 +152,9 @@ Now the application is scheduled into member2 and these two replicas run normall ```shell # mark node "member2-control-plane" as unschedulable in cluster member2 -$ kubectl --context member2 cordon member2-control-plane +kubectl --context member2 cordon member2-control-plane # delete the pod in cluster member2 -$ kubectl --context member2 delete pod -l app=nginx +kubectl --context member2 delete pod -l app=nginx ``` You can immediately find that the deployment is unhealthy now from the ResourceBinding. diff --git a/docs/userguide/globalview/proxy-global-resource.md b/docs/userguide/globalview/proxy-global-resource.md index 916f92ee..90641ea3 100644 --- a/docs/userguide/globalview/proxy-global-resource.md +++ b/docs/userguide/globalview/proxy-global-resource.md @@ -38,7 +38,7 @@ spec: ``` ```shell -$ kubectl --context karmada-apiserver apply -f resourceregistry.yaml +kubectl --context karmada-apiserver apply -f resourceregistry.yaml ``` Based on the ResourceRegistry above, you can access pods and nodes with proxy function. @@ -60,13 +60,13 @@ After processing the above steps, you can access pods and nodes with kubectl. Taking getting the pod log as an example, you can use the following command: ```shell -$ kubectl logs -n +kubectl logs -n ``` Specifying `--raw` as following has the same effect: ```shell -$ kubectl get --raw /apis/search.karmada.io/v1alpha1/proxying/karmada/proxy/api/v1/namespaces//pods//log +kubectl get --raw /apis/search.karmada.io/v1alpha1/proxying/karmada/proxy/api/v1/namespaces//pods//log ``` Enjoy it! diff --git a/docs/userguide/scheduling/descheduler.md b/docs/userguide/scheduling/descheduler.md index c2fdaacf..93b9b92f 100644 --- a/docs/userguide/scheduling/descheduler.md +++ b/docs/userguide/scheduling/descheduler.md @@ -50,7 +50,7 @@ After all member clusters have joined and estimators are all ready, specify the ```bash # edit the deployment of karmada-scheduler -$ kubectl --context karmada-host -n karmada-system edit deployments.apps karmada-scheduler +kubectl --context karmada-host -n karmada-system edit deployments.apps karmada-scheduler ``` Add the option `--enable-scheduler-estimator=true` into the command of container `karmada-scheduler`. @@ -121,9 +121,9 @@ Now we taint all nodes in member1 and evict the replica. ```bash # mark node "member1-control-plane" as unschedulable in cluster member1 -$ kubectl --context member1 cordon member1-control-plane +kubectl --context member1 cordon member1-control-plane # delete the pod in cluster member1 -$ kubectl --context member1 delete pod -l app=nginx +kubectl --context member1 delete pod -l app=nginx ``` A new pod will be created and cannot be scheduled by `kube-scheduler` due to lack of resources. diff --git a/docs/userguide/scheduling/resource-propagating.md b/docs/userguide/scheduling/resource-propagating.md index 753ddc84..2ea4c18f 100644 --- a/docs/userguide/scheduling/resource-propagating.md +++ b/docs/userguide/scheduling/resource-propagating.md @@ -357,7 +357,7 @@ By leveraging the spread-by-region constraint, users are able to deploy workload To enable multi region deployment, you should use the command below to customize the region of clusters. ```shell -kubectl --kubeconfig ~/.kube/karmada.config --context karmada-apiserver edit cluster/member1 +$ kubectl --kubeconfig ~/.kube/karmada.config --context karmada-apiserver edit cluster/member1 ... spec: diff --git a/docs/userguide/scheduling/scheduler-estimator.md b/docs/userguide/scheduling/scheduler-estimator.md index 3fdc715b..c57db2a9 100644 --- a/docs/userguide/scheduling/scheduler-estimator.md +++ b/docs/userguide/scheduling/scheduler-estimator.md @@ -42,7 +42,7 @@ After all member clusters have been joined and estimators are all ready, please ```bash # edit the deployment of karmada-scheduler -$ kubectl --context karmada-host edit -n karmada-system deployments.apps karmada-scheduler +kubectl --context karmada-host edit -n karmada-system deployments.apps karmada-scheduler ``` And then add the option `--enable-scheduler-estimator=true` into the command of container `karmada-scheduler`. diff --git a/docs/userguide/security-governance/working-with-gatekeeper.md b/docs/userguide/security-governance/working-with-gatekeeper.md index fb88b53a..bd01d6cc 100644 --- a/docs/userguide/security-governance/working-with-gatekeeper.md +++ b/docs/userguide/security-governance/working-with-gatekeeper.md @@ -470,8 +470,8 @@ In this case, you will use Gatekeeper v3.7.2. Related deployment files are from ### Create a bad namespace - ```console - kubectl create ns test + ```bash + $ kubectl create ns test Error from server ([ns-must-have-gk] you must provide labels: {"gatekeepers"}): admission webhook "validation.gatekeeper.sh" denied the request: [ns-must-have-gk] you must provide labels: {"gatekeepers"} ``` diff --git a/docs/userguide/service/multi-cluster-service.md b/docs/userguide/service/multi-cluster-service.md index d70fb776..9aa9ec83 100644 --- a/docs/userguide/service/multi-cluster-service.md +++ b/docs/userguide/service/multi-cluster-service.md @@ -192,7 +192,7 @@ kubernetes ClusterIP 10.13.0.1 443/TCP 15m Start a pod `request` on the `member2` cluster to access the ClusterIP of **derived service**: ```shell -$ kubectl --kubeconfig ~/.kube/members.config --context member2 run -i --rm --restart=Never --image=jeremyot/request:0a40de8 request -- --duration={duration-time} --address={ClusterIP of derived service} +kubectl --kubeconfig ~/.kube/members.config --context member2 run -i --rm --restart=Never --image=jeremyot/request:0a40de8 request -- --duration={duration-time} --address={ClusterIP of derived service} ``` Eg, if we continue to access service for 3s, ClusterIP is `10.13.205.2`: diff --git a/docs/userguide/service/working-with-eriecanal.md b/docs/userguide/service/working-with-eriecanal.md index c117da09..da3c9b39 100644 --- a/docs/userguide/service/working-with-eriecanal.md +++ b/docs/userguide/service/working-with-eriecanal.md @@ -67,7 +67,7 @@ kubectl --kubeconfig PATH_TO_KARMADA_CONFIG apply -f https://raw.githubuserconte In the control-plane cluster, you can use the Karmada API server's configuration to view the cluster registration information. ```shell -kubectl --kubeconfig PATH_TO_KARMADA_CONFIG get cluster +$ kubectl --kubeconfig PATH_TO_KARMADA_CONFIG get cluster NAME VERSION MODE READY AGE cluster-1 v1.23.8+k3s2 Push True 154m cluster-2 v1.23.8+k3s2 Push True 154m @@ -122,7 +122,7 @@ After registration is completed, you can view the registration information of th To view the registration information of member clusters, use the following command in the control-plane cluster: ```shell -kubectl get cluster +$ kubectl get cluster NAME REGION ZONE GROUP GATEWAY HOST GATEWAY PORT MANAGED MANAGED AGE AGE local default default default 80 159m cluster-1 default default default 10.0.2.4 80 True 159m 159m @@ -158,7 +158,7 @@ fsm install \ To check the installed service mesh version and other information in the cluster, you can use the following command: ```shell -fsm version +$ fsm version CLI Version: version.Info{Version:"v1.0.0", GitCommit:"9966a2b031c862b54b4b007eae35ee16afa31a80", BuildDate:"2023-05-29-12:10"} MESH NAME MESH NAMESPACE VERSION GIT COMMIT BUILD DATE @@ -242,7 +242,7 @@ After creating resources in the Karmada control plane, you also need to create a Create the `PropagationPolicy` for distributing the resources: ```shell -$kmd apply -n httpbin -f - < istio-remote-secret-member2.yaml Export `KUBECONFIG` and switch to `karmada apiserver`: -``` -# export KUBECONFIG=$HOME/.kube/karmada.config - -# kubectl config use-context karmada-apiserver +```bash +export KUBECONFIG=$HOME/.kube/karmada.config +kubectl config use-context karmada-apiserver ``` Apply istio remote secret: @@ -285,10 +283,9 @@ EOF Export `KUBECONFIG` and switch to `karmada apiserver`: -``` -# export KUBECONFIG=$HOME/.kube/karmada.config - -# kubectl config use-context karmada-apiserver +```bash +export KUBECONFIG=$HOME/.kube/karmada.config +kubectl config use-context karmada-apiserver ``` Create an `istio-demo` namespace: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/administrator/backup/working-with-velero.md b/i18n/zh/docusaurus-plugin-content-docs/current/administrator/backup/working-with-velero.md index 2f355203..53cb90b9 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/administrator/backup/working-with-velero.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/administrator/backup/working-with-velero.md @@ -131,7 +131,7 @@ Velero 由两个部分组成。 然后你将发现 nginx 应用部署成功。 ```shell - # kubectl get deployment.apps + $ kubectl get deployment.apps NAME READY UP-TO-DATE AVAILABLE AGE nginx 2/2 2 2 17s ``` @@ -156,7 +156,7 @@ kubectl config use-context member2 在 `member2` 中,我们也可以得到我们在 `member1` 中创建的备份: ```shell -# velero restore get +$ velero restore get NAME STATUS ERRORS WARNINGS CREATED EXPIRES STORAGE LOCATION SELECTOR nginx-backup Completed 0 0 2021-12-10 15:16:46 +0800 CST 29d default app=nginx ``` @@ -164,14 +164,14 @@ nginx-backup Completed 0 0 2021-12-10 15:16:46 +0800 CST 2 将 `member1` 的资源恢复到 `member2`: ```shell -# velero restore create --from-backup nginx-backup +$ velero restore create --from-backup nginx-backup Restore request "nginx-backup-20211210151807" submitted successfully. ``` 然后你就可以发现部署 nginx 将被成功恢复。 ```shell -# velero restore get +$ velero restore get NAME BACKUP STATUS STARTED COMPLETED ERRORS WARNINGS CREATED SELECTOR nginx-backup-20211210151807 nginx-backup Completed 2021-12-10 15:18:07 +0800 CST 2021-12-10 15:18:07 +0800 CST 0 0 2021-12-10 15:18:07 +0800 CST ``` @@ -179,7 +179,7 @@ nginx-backup-20211210151807 nginx-backup Completed 2021-12-10 15:18:07 +08 然后你就可以发现部署 nginx 会被成功恢复。 ```shell -# kubectl get deployment.apps/nginx +$ kubectl get deployment.apps/nginx NAME READY UP-TO-DATE AVAILABLE AGE nginx 2/2 2 2 21s ``` @@ -272,7 +272,7 @@ EOF 然后你可以发现部署的 nginx 将被成功恢复到 member2 上。 ```shell -# kubectl get deployment.apps/nginx +$ kubectl get deployment.apps/nginx NAME READY UP-TO-DATE AVAILABLE AGE nginx 2/2 2 2 10s ``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/administrator/migration/migration-from-kubefed.md b/i18n/zh/docusaurus-plugin-content-docs/current/administrator/migration/migration-from-kubefed.md index a30f5186..077e6808 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/administrator/migration/migration-from-kubefed.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/administrator/migration/migration-from-kubefed.md @@ -46,8 +46,8 @@ karmadactl join cluster1 --cluster-context cluster1 --karmada-context karmada 假设您如下使用 `kubefedctl` 工具来检查接入集群的状态: -``` -kubectl -n kube-federation-system get kubefedclusters +```bash +$ kubectl -n kube-federation-system get kubefedclusters NAME AGE READY KUBERNETES-VERSION cluster1 1m True v1.21.2 @@ -56,8 +56,8 @@ cluster2 1m True v1.22.0 现在通过 Karmada,您可以用 `karmadactl` 工具达到同样的效果: -``` -kubectl get clusters +```bash +$ kubectl get clusters NAME VERSION MODE READY AGE member1 v1.20.7 Push True 66s diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/developers/customize-karmada-scheduler.md b/i18n/zh/docusaurus-plugin-content-docs/current/developers/customize-karmada-scheduler.md index 8123a732..9af75390 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/developers/customize-karmada-scheduler.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/developers/customize-karmada-scheduler.md @@ -138,7 +138,7 @@ make image-karmada-scheduler ``` ```shell -kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system +$ kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system ... spec: automountServiceAccountToken: false @@ -173,7 +173,7 @@ I0408 12:57:14.565008 1 registry.go:79] Enable Scheduler plugin "TestFilte 例如,以下的配置将会关闭`TestFilter`插件。 ```shell -kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system +$ kubectl --kubeconfig ~/.kube/karmada.config --context karmada-host edit deploy/karmada-scheduler -nkarmada-system ... spec: automountServiceAccountToken: false diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/developers/document-releasing.md b/i18n/zh/docusaurus-plugin-content-docs/current/developers/document-releasing.md index 786c243d..c27141a5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/developers/document-releasing.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/developers/document-releasing.md @@ -52,8 +52,8 @@ go build ./hack/tools/gencomponentdocs/. 1. 更新 versions.json ```shell -cd website/ -vim versions.json +$ cd website/ +$ vim versions.json [ v1.5 # add a new version tag diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/developers/performance-test-setup-for-karmada.md b/i18n/zh/docusaurus-plugin-content-docs/current/developers/performance-test-setup-for-karmada.md index d24245a4..947b2b20 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/developers/performance-test-setup-for-karmada.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/developers/performance-test-setup-for-karmada.md @@ -56,7 +56,7 @@ kubectl apply -f fakekubelet.yml `kubectl get node` You will find fake nodes. ```shell -> kubectl get node -o wide +$ kubectl get node -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME fake-0 Ready agent 10s fake 10.88.0.136 fake-1 Ready agent 10s fake 10.88.0.136 @@ -68,7 +68,7 @@ fake-4 Ready agent 10s fake 10.88.0.136 kubectl apply -f - < kubectl get pod -o wide +$ kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES fake-pod-78884479b7-52qcx 1/1 Running 0 6s 10.0.0.23 fake-4 fake-pod-78884479b7-bd6nk 1/1 Running 0 6s 10.0.0.13 fake-2 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/developers/profiling-karmada.md b/i18n/zh/docusaurus-plugin-content-docs/current/developers/profiling-karmada.md index 4353c27c..89344de3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/developers/profiling-karmada.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/developers/profiling-karmada.md @@ -40,7 +40,7 @@ The HTTP endpoint will now be available as a local port. You can then generate the file for the memory profile with curl and pipe the data to a file: ```shell -$ curl http://localhost:6060/debug/pprof/heap > heap.pprof +curl http://localhost:6060/debug/pprof/heap > heap.pprof ``` Generate the file for the CPU profile with curl and pipe the data to a file (7200 seconds is two hours): diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/get-started/nginx-example.md b/i18n/zh/docusaurus-plugin-content-docs/current/get-started/nginx-example.md index 8951026f..7d066739 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/get-started/nginx-example.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/get-started/nginx-example.md @@ -29,7 +29,7 @@ cd karmada 运行以下脚本: ``` -# hack/local-up-karmada.sh +hack/local-up-karmada.sh ``` 该脚本将为你执行以下任务: - 启动一个 Kubernetes 集群来运行 Karmada 控制面,即 `host cluster`。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/installation/install-binary.md b/i18n/zh/docusaurus-plugin-content-docs/current/installation/install-binary.md index 7b75085e..a9296f17 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/installation/install-binary.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/installation/install-binary.md @@ -29,7 +29,7 @@ title: 通过二进制方式安装 对 `karmada-01`、`karmada-02`、`karmada-03` 执行操作。 ```bash -vi /etc/hosts +$ vi /etc/hosts 172.31.209.245 karmada-01 172.31.209.246 karmada-02 172.31.209.247 karmada-03 @@ -129,9 +129,9 @@ mv /usr/local/karmada-nginx/sbin/nginx /usr/local/karmada-nginx/sbin/karmada-ngi ### 步骤 3:运行 Shell 脚本 ```bash -$ ./generate_ca.sh -$ ./generate_leaf.sh ca_cert/ -$ ./generate_etcd.sh +./generate_ca.sh +./generate_leaf.sh ca_cert/ +./generate_etcd.sh ``` @@ -317,7 +317,7 @@ systemctl status etcd.service ### 验证 ```bash -etcdctl --cacert /etc/karmada/pki/etcd/ca.crt \ +$ etcdctl --cacert /etc/karmada/pki/etcd/ca.crt \ --cert /etc/karmada/pki/etcd/healthcheck-client.crt \ --key /etc/karmada/pki/etcd/healthcheck-client.key \ --endpoints "172.31.209.245:2379,172.31.209.246:2379,172.31.209.247:2379" \ @@ -520,7 +520,7 @@ kubectl create clusterrolebinding cluster-admin:karmada --clusterrole=cluster-ad 修改 `nginx` 配置并添加以下配置。对 `karmada-01` 执行以下操作。 ```bash -cat /usr/local/karmada-nginx/conf/nginx.conf +$ cat /usr/local/karmada-nginx/conf/nginx.conf worker_processes 2; events { @@ -861,7 +861,7 @@ ok 修改 `nginx` 配置并添加以下配置。对 `karmada-01` 执行以下操作。 ```bash -cat /usr/local/karmada-nginx/conf/nginx.conf +$ cat /usr/local/karmada-nginx/conf/nginx.conf worker_processes 2; events { diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md b/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md index 49710b25..069ddf7c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md @@ -71,7 +71,7 @@ Step 2: Show members of karmada Karmada 的组件默认安装在 `karmada-system` 命名空间中,你可以通过以下命令查看: ```bash -kubectl get deployments -n karmada-system +$ kubectl get deployments -n karmada-system NAME READY UP-TO-DATE AVAILABLE AGE karmada-aggregated-apiserver 1/1 1 1 102s karmada-apiserver 1/1 1 1 2m34s @@ -82,7 +82,7 @@ kube-controller-manager 1/1 1 1 2m3s ``` `karmada-etcd` 被安装为 `StatefulSet`,通过以下命令查看: ```bash -kubectl get statefulsets -n karmada-system +$ kubectl get statefulsets -n karmada-system NAME READY AGE etcd 1/1 28m ``` @@ -128,7 +128,7 @@ kubectl karmada init --crds https://github.com/karmada-io/karmada/releases/downl 检查已安装的组件: ```bash -kubectl get pods -n karmada-system --kubeconfig=$HOME/.kube/host.config +$ kubectl get pods -n karmada-system --kubeconfig=$HOME/.kube/host.config NAME READY STATUS RESTARTS AGE etcd-0 1/1 Running 0 2m55s karmada-aggregated-apiserver-84b45bf9b-n5gnk 1/1 Running 0 109s diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/access-service-across-clusters.md b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/access-service-across-clusters.md index 2e65236d..c40eff73 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/access-service-across-clusters.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/access-service-across-clusters.md @@ -26,7 +26,7 @@ Note: In order to prevent routing conflicts, Pod and Service CIDRs of clusters n To enable the MultiClusterService feature in the karmada-controller-manager, run the following command: ```shell -$ kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --v=4/i \ - --feature-gates=MultiClusterService=true' | kubectl --context karmada-host replace -f - +kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --v=4/i \ - --feature-gates=MultiClusterService=true' | kubectl --context karmada-host replace -f - ``` Please note that the MultiClusterService feature is disabled by default and can be enabled using the `--feature-gates=MultiClusterService=true` flag. diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-custom-metrics.md b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-custom-metrics.md index 21b6fde4..695f6a27 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-custom-metrics.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-custom-metrics.md @@ -88,7 +88,7 @@ kubectl apply -f manifests/ 我们可以通过下面的命令验证安装: ```sh -kubectl --kubeconfig=/root/.kube/members.config --context=member1 get po -nmonitoring +$ kubectl --kubeconfig=/root/.kube/members.config --context=member1 get po -nmonitoring NAME READY STATUS RESTARTS AGE alertmanager-main-0 2/2 Running 0 30h alertmanager-main-1 2/2 Running 0 30h @@ -191,10 +191,10 @@ spec: 部署完成后,您可以检查 Pod 和 Service 的分发情况: ```sh -karmadactl get pods +$ karmadactl get pods NAME CLUSTER READY STATUS RESTARTS AGE sample-app-9b7d8c9f5-xrnfx member1 1/1 Running 0 111s -karmadactl get svc +$ karmadactl get svc NAME CLUSTER TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ADOPTION sample-app member1 ClusterIP 10.11.29.250 80/TCP 3m53s Y ``` @@ -407,7 +407,7 @@ sample-app Deployment sample-app 1 10 1 1 部署完成后,您可以检查多集群 Service: ```sh -karmadactl get svc +$ karmadactl get svc NAME CLUSTER TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ADOPTION derived-sample-app member1 ClusterIP 10.11.59.213 80/TCP 9h Y ``` @@ -426,14 +426,14 @@ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey * 首先检查 Pod 的分发情况。 ```sh - karmadactl get pods + $ karmadactl get pods NAME CLUSTER READY STATUS RESTARTS AGE sample-app-9b7d8c9f5-xrnfx member1 1/1 Running 0 111s ``` * 检查多集群 Service ip。 ```sh - karmadactl get svc + $ karmadactl get svc NAME CLUSTER TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ADOPTION derived-sample-app member1 ClusterIP 10.11.59.213 80/TCP 20m Y ``` @@ -445,7 +445,7 @@ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey * 等待 15 秒,副本将扩容,然后您可以再次检查 Pod 分发状态。 ```sh - karmadactl get po -l app=sample-app + $ karmadactl get po -l app=sample-app NAME CLUSTER READY STATUS RESTARTS AGE sample-app-9b7d8c9f5-454vz member2 1/1 Running 0 84s sample-app-9b7d8c9f5-7fjhn member2 1/1 Running 0 69s @@ -463,7 +463,7 @@ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey 1 分钟后,负载测试工具将停止运行,然后您可以看到工作负载在多个集群中缩容。 ```sh -karmadactl get pods -l app=sample-app +$ karmadactl get pods -l app=sample-app NAME CLUSTER READY STATUS RESTARTS AGE sample-app-9b7d8c9f5-xrnfx member1 1/1 Running 0 91m ``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-resource-metrics.md b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-resource-metrics.md index f1bbacdd..495b5f12 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-resource-metrics.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/autoscaling-with-resource-metrics.md @@ -171,10 +171,10 @@ spec: 部署完成后,您可以检查 Pod 和 Service 的分发情况: ```sh -karmadactl get pods +$ karmadactl get pods NAME CLUSTER READY STATUS RESTARTS AGE nginx-777bc7b6d7-mbdn8 member1 1/1 Running 0 9h -karmadactl get svc +$ karmadactl get svc NAME CLUSTER TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ADOPTION nginx-service member1 ClusterIP 10.11.216.215 80/TCP 9h Y nginx-service member2 ClusterIP 10.13.46.61 80/TCP 9h Y @@ -213,7 +213,7 @@ spec: 部署完成后,您可以检查 FederatedHPA: ```sh -kubectl --kubeconfig $HOME/.kube/karmada.config --context karmada-apiserver get fhpa +$ kubectl --kubeconfig $HOME/.kube/karmada.config --context karmada-apiserver get fhpa NAME REFERENCE-KIND REFERENCE-NAME MINPODS MAXPODS REPLICAS AGE nginx Deployment nginx 1 10 1 9h ``` @@ -272,7 +272,7 @@ nginx Deployment nginx 1 10 1 9h 部署完成后,您可以检查多集群 Service: ```sh -karmadactl get svc +$ karmadactl get svc NAME CLUSTER TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ADOPTION derived-nginx-service member1 ClusterIP 10.11.59.213 80/TCP 9h Y ``` @@ -291,13 +291,13 @@ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey * 首先检查 Pod 的分发情况。 ```sh - karmadactl get pods + $ karmadactl get pods NAME CLUSTER READY STATUS RESTARTS AGE nginx-777bc7b6d7-mbdn8 member1 1/1 Running 0 61m ``` * 检查多集群 Service ip。 ```sh - karmadactl get svc + $ karmadactl get svc NAME CLUSTER TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ADOPTION derived-nginx-service member1 ClusterIP 10.11.59.213 80/TCP 20m Y ``` @@ -309,7 +309,7 @@ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey * 等待 15 秒,副本将扩容,然后您可以再次检查 Pod 分发状态。 ```sh - karmadactl get pods -l app=nginx + $ karmadactl get pods -l app=nginx NAME CLUSTER READY STATUS RESTARTS AGE nginx-777bc7b6d7-c2cfv member1 1/1 Running 0 22s nginx-777bc7b6d7-mbdn8 member1 1/1 Running 0 62m @@ -328,7 +328,7 @@ docker cp hey_linux_amd64 member1-control-plane:/usr/local/bin/hey 1 分钟后,负载测试工具将停止运行,然后您可以看到工作负载在多个集群中缩容。 ```sh -karmadactl get pods -l app=nginx +$ karmadactl get pods -l app=nginx NAME CLUSTER READY STATUS RESTARTS AGE nginx-777bc7b6d7-mbdn8 member1 1/1 Running 0 64m ``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/resource-migration.md b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/resource-migration.md index 98dc83cf..acc023f4 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/resource-migration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/tutorials/resource-migration.md @@ -18,10 +18,10 @@ title: 平滑迁移 #### 步骤一: 运行命令 ```shell -$ git clone https://github.com/karmada-io/karmada -$ cd karmada -$ hack/local-up-karmada.sh -$ export KUBECONFIG=~/.kube/karmada.config:~/.kube/members.config +git clone https://github.com/karmada-io/karmada +cd karmada +hack/local-up-karmada.sh +export KUBECONFIG=~/.kube/karmada.config:~/.kube/members.config ``` > **说明:** @@ -36,7 +36,7 @@ $ export KUBECONFIG=~/.kube/karmada.config:~/.kube/members.config #### 步骤二: 运行命令 ```shell -$ kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --failover-eviction-timeout=30s/{n;s/- --v=4/- --feature-gates=PropagationPolicyPreemption=true\n &/g}' | kubectl --context karmada-host replace -f - +kubectl --context karmada-host get deploy karmada-controller-manager -n karmada-system -o yaml | sed '/- --failover-eviction-timeout=30s/{n;s/- --v=4/- --feature-gates=PropagationPolicyPreemption=true\n &/g}' | kubectl --context karmada-host replace -f - ``` > **说明:** diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/cicd/working-with-argocd.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/cicd/working-with-argocd.md index 3b1e25b2..d28df28b 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/cicd/working-with-argocd.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/cicd/working-with-argocd.md @@ -15,7 +15,7 @@ In this example, we are using a Karmada environment with at least `3` member clu You can set up the environment by `hack/local-up-karmada.sh`, which is also used to run our E2E cases. ```bash -# kubectl get clusters +$ kubectl get clusters NAME VERSION MODE READY AGE member1 v1.19.1 Push True 18h member2 v1.19.1 Push True 18h diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/clustermanager/cluster-registration.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/clustermanager/cluster-registration.md index bb5fcbb9..bc24d9b5 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/clustermanager/cluster-registration.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/clustermanager/cluster-registration.md @@ -51,7 +51,7 @@ kubectl karmada join member1 --kubeconfig= --karmada-context Check the status of the joined clusters by using the following command. ``` -kubectl get clusters +$ kubectl get clusters NAME VERSION MODE READY AGE member1 v1.20.7 Push True 66s @@ -81,7 +81,7 @@ Be different from the `karmadactl join` which registers a cluster with `Push` mo In Karmada control plane, we can use `karmadactl token create` command to create bootstrap tokens whose default ttl is 24h. ``` -$ karmadactl token create --print-register-command --kubeconfig /etc/karmada/karmada-apiserver.config +karmadactl token create --print-register-command --kubeconfig /etc/karmada/karmada-apiserver.config ``` ``` @@ -97,7 +97,7 @@ More details about `bootstrap token` please refer to: In the Kubernetes control plane of member clusters, we also need the `kubeconfig` file of the member cluster, then directly execute the above output `karmadactl register` command. ``` -$ karmadactl register 10.10.x.x:32443 --token t2jgtm.9nybj0526mjw1jbf --discovery-token-ca-cert-hash sha256:f5a5a43869bb44577dba582e794c3e3750f2050d62f1b1dc80fd3d6a371b6ed4 +karmadactl register 10.10.x.x:32443 --token t2jgtm.9nybj0526mjw1jbf --discovery-token-ca-cert-hash sha256:f5a5a43869bb44577dba582e794c3e3750f2050d62f1b1dc80fd3d6a371b6ed4 ``` ``` @@ -124,7 +124,7 @@ After `karmada-agent` be deployed, it will register cluster automatically at the Check the status of the registered clusters by using the same command above. ``` -kubectl get clusters +$ kubectl get clusters NAME VERSION MODE READY AGE member3 v1.20.7 Pull True 66s ``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/failover/application-failover.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/failover/application-failover.md index 08da0dc7..77b2af0a 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/failover/application-failover.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/failover/application-failover.md @@ -154,9 +154,9 @@ spec: ```shell # mark node "member2-control-plane" as unschedulable in cluster member2 -$ kubectl --context member2 cordon member2-control-plane +kubectl --context member2 cordon member2-control-plane # delete the pod in cluster member2 -$ kubectl --context member2 delete pod -l app=nginx +kubectl --context member2 delete pod -l app=nginx ``` 你可以立即从 ResourceBinding 中发现应用变成不健康的状态。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/globalview/proxy-global-resource.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/globalview/proxy-global-resource.md index 916f92ee..90641ea3 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/globalview/proxy-global-resource.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/globalview/proxy-global-resource.md @@ -38,7 +38,7 @@ spec: ``` ```shell -$ kubectl --context karmada-apiserver apply -f resourceregistry.yaml +kubectl --context karmada-apiserver apply -f resourceregistry.yaml ``` Based on the ResourceRegistry above, you can access pods and nodes with proxy function. @@ -60,13 +60,13 @@ After processing the above steps, you can access pods and nodes with kubectl. Taking getting the pod log as an example, you can use the following command: ```shell -$ kubectl logs -n +kubectl logs -n ``` Specifying `--raw` as following has the same effect: ```shell -$ kubectl get --raw /apis/search.karmada.io/v1alpha1/proxying/karmada/proxy/api/v1/namespaces//pods//log +kubectl get --raw /apis/search.karmada.io/v1alpha1/proxying/karmada/proxy/api/v1/namespaces//pods//log ``` Enjoy it! diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/descheduler.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/descheduler.md index c2fdaacf..93b9b92f 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/descheduler.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/descheduler.md @@ -50,7 +50,7 @@ After all member clusters have joined and estimators are all ready, specify the ```bash # edit the deployment of karmada-scheduler -$ kubectl --context karmada-host -n karmada-system edit deployments.apps karmada-scheduler +kubectl --context karmada-host -n karmada-system edit deployments.apps karmada-scheduler ``` Add the option `--enable-scheduler-estimator=true` into the command of container `karmada-scheduler`. @@ -121,9 +121,9 @@ Now we taint all nodes in member1 and evict the replica. ```bash # mark node "member1-control-plane" as unschedulable in cluster member1 -$ kubectl --context member1 cordon member1-control-plane +kubectl --context member1 cordon member1-control-plane # delete the pod in cluster member1 -$ kubectl --context member1 delete pod -l app=nginx +kubectl --context member1 delete pod -l app=nginx ``` A new pod will be created and cannot be scheduled by `kube-scheduler` due to lack of resources. diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/resource-propagating.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/resource-propagating.md index 2d9b4d9d..b32d1f0e 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/resource-propagating.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/resource-propagating.md @@ -359,7 +359,7 @@ By leveraging the spread-by-region constraint, users are able to deploy workload To enable multi region deployment, you should use the command below to customize the region of clusters. ```shell -kubectl --kubeconfig ~/.kube/karmada.config --context karmada-apiserver edit cluster/member1 +$ kubectl --kubeconfig ~/.kube/karmada.config --context karmada-apiserver edit cluster/member1 ... spec: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/scheduler-estimator.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/scheduler-estimator.md index fb77d5df..3ef4d1f1 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/scheduler-estimator.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/scheduling/scheduler-estimator.md @@ -40,7 +40,7 @@ After all member clusters have been joined and estimators are all ready, please ```bash # edit the deployment of karmada-scheduler -$ kubectl --context karmada-host edit -n karmada-system deployments.apps karmada-scheduler +kubectl --context karmada-host edit -n karmada-system deployments.apps karmada-scheduler ``` And then add the option `--enable-scheduler-estimator=true` into the command of container `karmada-scheduler`. diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/security-governance/working-with-gatekeeper.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/security-governance/working-with-gatekeeper.md index fb88b53a..bd01d6cc 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/security-governance/working-with-gatekeeper.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/security-governance/working-with-gatekeeper.md @@ -470,8 +470,8 @@ In this case, you will use Gatekeeper v3.7.2. Related deployment files are from ### Create a bad namespace - ```console - kubectl create ns test + ```bash + $ kubectl create ns test Error from server ([ns-must-have-gk] you must provide labels: {"gatekeepers"}): admission webhook "validation.gatekeeper.sh" denied the request: [ns-must-have-gk] you must provide labels: {"gatekeepers"} ``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/multi-cluster-service.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/multi-cluster-service.md index 35fee562..a545fb5d 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/multi-cluster-service.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/multi-cluster-service.md @@ -192,7 +192,7 @@ kubernetes ClusterIP 10.13.0.1 443/TCP 15m ``` ```shell -$ kubectl --kubeconfig ~/.kube/members.config --context member2 run -i --rm --restart=Never --image=jeremyot/request:0a40de8 request -- --duration={duration-time} --address={ClusterIP of derived service} +kubectl --kubeconfig ~/.kube/members.config --context member2 run -i --rm --restart=Never --image=jeremyot/request:0a40de8 request -- --duration={duration-time} --address={ClusterIP of derived service} ``` 例如,如果我们使用ClusterIP地址`10.13.205.2`持续访问该服务3s,将会得到如下输出: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/working-with-eriecanal.md b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/working-with-eriecanal.md index 87844222..1c71541c 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/working-with-eriecanal.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/userguide/service/working-with-eriecanal.md @@ -71,7 +71,7 @@ kubectl --kubeconfig PATH_TO_KARMADA_CONFIG apply -f https://raw.githubuserconte 在控制面集群 control-plane 使用 Karmada apiserver 的 config 可查看集群的注册信息。 ```shell -kubectl --kubeconfig PATH_TO_KARMADA_CONFIG get cluster +$ kubectl --kubeconfig PATH_TO_KARMADA_CONFIG get cluster NAME VERSION MODE READY AGE cluster-1 v1.23.8+k3s2 Push True 154m cluster-2 v1.23.8+k3s2 Push True 154m @@ -116,7 +116,7 @@ EOF 注册完成后,可在控制面集群 control-plane 查看成员集群的注册信息: ```shell -kubectl get cluster +$ kubectl get cluster NAME REGION ZONE GROUP GATEWAY HOST GATEWAY PORT MANAGED MANAGED AGE AGE local default default default 80 159m cluster-1 default default default 10.0.2.4 80 True 159m 159m @@ -153,7 +153,7 @@ fsm install \ 执行命令可常看集群中安装的服务网格版本等信息。 ```shell -fsm version +$ fsm version CLI Version: version.Info{Version:"v1.0.0", GitCommit:"9966a2b031c862b54b4b007eae35ee16afa31a80", BuildDate:"2023-05-29-12:10"} MESH NAME MESH NAMESPACE VERSION GIT COMMIT BUILD DATE @@ -235,7 +235,7 @@ EOF 在 Karmada 控制面创建资源后,还需要创建 `PropagationPolicy` 策略来对资源进行分发,我们将 `Deployment` 和 `Service` 分发到成员集群 `cluster-1` 和 `cluster-3`。 ```shell -$kmd apply -n httpbin -f - < istio-remote-secret-member2.yaml Export `KUBECONFIG` and switch to `karmada apiserver`: -``` -# export KUBECONFIG=$HOME/.kube/karmada.config - -# kubectl config use-context karmada-apiserver +```bash +export KUBECONFIG=$HOME/.kube/karmada.config +kubectl config use-context karmada-apiserver ``` Apply istio remote secret: @@ -285,10 +283,9 @@ EOF Export `KUBECONFIG` and switch to `karmada apiserver`: -``` -# export KUBECONFIG=$HOME/.kube/karmada.config - -# kubectl config use-context karmada-apiserver +```bash +export KUBECONFIG=$HOME/.kube/karmada.config +kubectl config use-context karmada-apiserver ``` Create an `istio-demo` namespace: