diff --git a/404.html b/404.html index 25808998e..331de7b4f 100644 --- a/404.html +++ b/404.html @@ -5,13 +5,13 @@ 找不到页面 | Framework as a Building Block for Kubernetes - +
跳到主要内容

找不到页面

我们找不到您要找的页面。

请联系原始链接来源网站的所有者,并告知他们链接已损坏。

- + \ No newline at end of file diff --git a/assets/js/f665e660.9b352357.js b/assets/js/f665e660.5c98f5cc.js similarity index 85% rename from assets/js/f665e660.9b352357.js rename to assets/js/f665e660.5c98f5cc.js index c097bfa80..24b31f33f 100644 --- a/assets/js/f665e660.9b352357.js +++ b/assets/js/f665e660.5c98f5cc.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2437],{3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>b});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var c=r.createContext({}),p=function(e){var t=r.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=p(e.components);return r.createElement(c.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),u=p(n),d=a,b=u["".concat(c,".").concat(d)]||u[d]||m[d]||o;return n?r.createElement(b,i(i({ref:t},s),{},{components:n})):r.createElement(b,i({ref:t},s))}));function b(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=d;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>l,toc:()=>p});var r=n(7462),a=(n(7294),n(3905));const o={sidebar_position:5},i="\u7ec4\u4ef6\u5f00\u53d1",l={unversionedId:"core/userguide/component-dev",id:"core/userguide/component-dev",title:"\u7ec4\u4ef6\u5f00\u53d1",description:"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528Helm\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002",source:"@site/docs/core/userguide/component-dev.md",sourceDirName:"core/userguide",slug:"/core/userguide/component-dev",permalink:"/website/docs/core/userguide/component-dev",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"\u4ece Helm \u547d\u4ee4\u8fc1\u79fb",permalink:"/website/docs/core/userguide/helmtofuture"},next:{title:"\u7ec4\u4ef6\u8bc4\u7ea7",permalink:"/website/docs/core/rating"}},c={},p=[{value:"\u7ec4\u4ef6\u7c7b\u578b",id:"\u7ec4\u4ef6\u7c7b\u578b",level:2},{value:"\u901a\u7528\u914d\u7f6e",id:"\u901a\u7528\u914d\u7f6e",level:2},{value:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",level:2},{value:"Chart.yaml",id:"chartyaml",level:3}],s={toc:p},u="wrapper";function m(e){let{components:t,...n}=e;return(0,a.kt)(u,(0,r.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"\u7ec4\u4ef6\u5f00\u53d1"},"\u7ec4\u4ef6\u5f00\u53d1"),(0,a.kt)("p",null,"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002"),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u7c7b\u578b"},"\u7ec4\u4ef6\u7c7b\u578b"),(0,a.kt)("p",null,"\u4ece\u529f\u80fd\u89d2\u5ea6\uff0c\u6211\u4eec\u5c06\u7ec4\u4ef6\u5212\u5206\u4e3a\u4e24\u7c7b:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u7cfb\u7edf\u7ec4\u4ef6,\u5982U4A\u3001TMF\u7b49,\u7ec4\u4ef6\u7684\u8fd0\u884c\u9700\u8981\u7cfb\u7edf\u7ba1\u7406\u6743\u9650")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u666e\u901a\u529f\u80fd\u7ec4\u4ef6\uff0c\u5982minio\u3001weaviate\u7b49\uff0c\u7ec4\u4ef6\u53ef\u8fd0\u884c\u5728\u4efb\u4f55",(0,a.kt)("inlineCode",{parentName:"p"},"\u79df\u6237-\u9879\u76ee"),"\u4e2d\uff0c\u6ca1\u6709\u7279\u6b8a\u9650\u5236"))),(0,a.kt)("h2",{id:"\u901a\u7528\u914d\u7f6e"},"\u901a\u7528\u914d\u7f6e"),(0,a.kt)("p",null,"\u53c2\u8003",(0,a.kt)("a",{parentName:"p",href:"https://helm.sh/docs/"},"Helm\u5b98\u65b9\u6587\u6863")),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"},"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"),(0,a.kt)("p",null,"\u4e3a\u652f\u6301\u4e0d\u540c\u7ec4\u4ef6\u5bf9\u5b89\u88c5\u4f4d\u7f6e\u3001\u6743\u9650\u7684\u53ef\u63a7\uff0c\u7279\u6b64\u989d\u5916\u7ea6\u5b9a\u4e86\u591a\u4e2a\u914d\u7f6e\u5b57\u6bb5"),(0,a.kt)("h3",{id:"chartyaml"},"Chart.yaml"),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"Chart.yaml"),"\u4e2d\u5305\u542b\u7ec4\u4ef6\u7684\u6838\u5fc3\u5b9a\u4e49\u3001\u7248\u672c\u3001\u7ef4\u62a4\u8005\u7b49\u4fe1\u606f\uff0c\u5c5e\u4e8e",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u9884\u5b9a\u4e49\u7684\u5185\u5bb9\u3002\u4e3a\u4e86\u652f\u6301\u989d\u5916\u7684\u7279\u6b8a\u9700\u6c42\uff0c\u6211\u4eec\u51b3\u5b9a\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"p"},"annotations"),"\u6765\u81ea\u7531\u5b9a\u4e49\u3002\u5982\u4e0b\u6240\u793a:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},'annotations:\n core.kubebb.k8s.com.cn/displayname: "\u5185\u6838"\n core.kubebb.k8s.com.cn/restrict-tenants: "system-tenant"\n core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"\n')),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/displayname"),": \u7528\u4e8e\u586b\u5145\u7ec4\u4ef6\u7684\u5c55\u793a\u540d\uff0c\u652f\u6301\u4e2d\u82f1\u6587"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/restrict-tenants"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u79df\u6237\uff0c\u591a\u4e2a\u79df\u6237\u9700\u8981\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"ore.kubebb.k8s.com.cn/restricted-namespaces"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u9879\u76ee/\u547d\u540d\u7a7a\u95f4\uff0c\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2437],{3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>b});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var c=r.createContext({}),p=function(e){var t=r.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=p(e.components);return r.createElement(c.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),u=p(n),d=a,b=u["".concat(c,".").concat(d)]||u[d]||m[d]||o;return n?r.createElement(b,i(i({ref:t},s),{},{components:n})):r.createElement(b,i({ref:t},s))}));function b(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=d;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>l,toc:()=>p});var r=n(7462),a=(n(7294),n(3905));const o={sidebar_position:5},i="\u7ec4\u4ef6\u5f00\u53d1",l={unversionedId:"core/userguide/component-dev",id:"core/userguide/component-dev",title:"\u7ec4\u4ef6\u5f00\u53d1",description:"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528Helm\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002",source:"@site/docs/core/userguide/component-dev.md",sourceDirName:"core/userguide",slug:"/core/userguide/component-dev",permalink:"/website/docs/core/userguide/component-dev",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"\u4ece Helm \u547d\u4ee4\u8fc1\u79fb",permalink:"/website/docs/core/userguide/helmtofuture"},next:{title:"\u7ec4\u4ef6\u8bc4\u7ea7",permalink:"/website/docs/core/rating"}},c={},p=[{value:"\u7ec4\u4ef6\u7c7b\u578b",id:"\u7ec4\u4ef6\u7c7b\u578b",level:2},{value:"\u901a\u7528\u914d\u7f6e",id:"\u901a\u7528\u914d\u7f6e",level:2},{value:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",level:2},{value:"Chart.yaml",id:"chartyaml",level:3}],s={toc:p},u="wrapper";function m(e){let{components:t,...n}=e;return(0,a.kt)(u,(0,r.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"\u7ec4\u4ef6\u5f00\u53d1"},"\u7ec4\u4ef6\u5f00\u53d1"),(0,a.kt)("p",null,"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002"),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u7c7b\u578b"},"\u7ec4\u4ef6\u7c7b\u578b"),(0,a.kt)("p",null,"\u4ece\u529f\u80fd\u89d2\u5ea6\uff0c\u6211\u4eec\u5c06\u7ec4\u4ef6\u5212\u5206\u4e3a\u4e24\u7c7b:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u7cfb\u7edf\u7ec4\u4ef6,\u5982U4A\u3001TMF\u7b49,\u7ec4\u4ef6\u7684\u8fd0\u884c\u9700\u8981\u7cfb\u7edf\u7ba1\u7406\u6743\u9650")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u666e\u901a\u529f\u80fd\u7ec4\u4ef6\uff0c\u5982minio\u3001weaviate\u7b49\uff0c\u7ec4\u4ef6\u53ef\u8fd0\u884c\u5728\u4efb\u4f55",(0,a.kt)("inlineCode",{parentName:"p"},"\u79df\u6237-\u9879\u76ee"),"\u4e2d\uff0c\u6ca1\u6709\u7279\u6b8a\u9650\u5236"))),(0,a.kt)("h2",{id:"\u901a\u7528\u914d\u7f6e"},"\u901a\u7528\u914d\u7f6e"),(0,a.kt)("p",null,"\u53c2\u8003",(0,a.kt)("a",{parentName:"p",href:"https://helm.sh/docs/"},"Helm\u5b98\u65b9\u6587\u6863")),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"},"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"),(0,a.kt)("p",null,"\u4e3a\u652f\u6301\u4e0d\u540c\u7ec4\u4ef6\u5bf9\u5b89\u88c5\u4f4d\u7f6e\u3001\u6743\u9650\u7684\u53ef\u63a7\uff0c\u7279\u6b64\u989d\u5916\u7ea6\u5b9a\u4e86\u591a\u4e2a\u914d\u7f6e\u5b57\u6bb5"),(0,a.kt)("h3",{id:"chartyaml"},"Chart.yaml"),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"Chart.yaml"),"\u4e2d\u5305\u542b\u7ec4\u4ef6\u7684\u6838\u5fc3\u5b9a\u4e49\u3001\u7248\u672c\u3001\u7ef4\u62a4\u8005\u7b49\u4fe1\u606f\uff0c\u5c5e\u4e8e",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u9884\u5b9a\u4e49\u7684\u5185\u5bb9\u3002\u4e3a\u4e86\u652f\u6301\u989d\u5916\u7684\u7279\u6b8a\u9700\u6c42\uff0c\u6211\u4eec\u51b3\u5b9a\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"p"},"annotations"),"\u6765\u81ea\u7531\u5b9a\u4e49\u3002\u5982\u4e0b\u6240\u793a:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},'annotations:\n core.kubebb.k8s.com.cn/displayname: "\u5185\u6838"\n core.kubebb.k8s.com.cn/restricted-tenants: "system-tenant"\n core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"\n')),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/displayname"),": \u7528\u4e8e\u586b\u5145\u7ec4\u4ef6\u7684\u5c55\u793a\u540d\uff0c\u652f\u6301\u4e2d\u82f1\u6587"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/restrict-tenants"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u79df\u6237\uff0c\u591a\u4e2a\u79df\u6237\u9700\u8981\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"ore.kubebb.k8s.com.cn/restricted-namespaces"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u9879\u76ee/\u547d\u540d\u7a7a\u95f4\uff0c\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.73b6391a.js b/assets/js/runtime~main.0c80f13a.js similarity index 99% rename from assets/js/runtime~main.73b6391a.js rename to assets/js/runtime~main.0c80f13a.js index 2771f074c..0844c40bf 100644 --- a/assets/js/runtime~main.73b6391a.js +++ b/assets/js/runtime~main.0c80f13a.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,b,c,d={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return d[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=d,r.c=t,e=[],r.O=(a,f,b,c)=>{if(!f){var d=1/0;for(i=0;i=c)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[f,b,c]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,b){if(1&b&&(e=this(e)),8&b)return e;if("object"==typeof e&&e){if(4&b&&e.__esModule)return e;if(16&b&&"function"==typeof e.then)return e}var c=Object.create(null);r.r(c);var d={};a=a||[null,f({}),f([]),f(f)];for(var t=2&b&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>d[a]=()=>e[a]));return d.default=()=>e,r.d(c,d),c},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",277:"c4348237",406:"ead696ea",638:"2be451b9",813:"5d9ef359",948:"8717b14a",1075:"18874b20",1129:"c2c9b4b3",1500:"b81173c6",1505:"7c2ff145",1649:"8d745e6e",1669:"c64e4644",1914:"d9f32620",1915:"a9a78b31",2217:"8aa54216",2237:"acf368fd",2267:"59362658",2362:"e273c56f",2437:"f665e660",2535:"814f3328",2774:"22646bfb",3085:"1f391b9e",3089:"a6aa9e1f",3497:"c650a001",3514:"73664a40",3589:"6d8d5af3",3608:"9e4087bc",3998:"e83fc973",4013:"01a85c17",4094:"d54d1d36",4154:"300d3bf9",4193:"c4f5d8e4",4195:"59db27b9",4288:"ad895e75",4456:"b42f4d8b",4776:"552535a5",4823:"cd7e3398",5062:"69369ae2",5070:"62fa1b0b",5093:"b0dde2ea",5662:"3c7910a2",5822:"07c7815c",6004:"a76bfff0",6103:"ccc49370",6287:"e90a2c2a",6306:"1d408bef",6320:"647f233e",6353:"c00468a4",6513:"1bba06cb",6741:"791926a5",6846:"710d6d2e",6890:"41ebaef0",6900:"0b2479c4",7133:"f03e9ca9",7331:"e6809710",7414:"393be207",7492:"81fe2174",7521:"ac63f98d",7527:"f345e2d0",7530:"a95e9274",7745:"085a15b4",7818:"037ceaed",7918:"17896441",7943:"a35a66a7",8221:"1c59ea81",8428:"807573a4",8610:"6875c492",8636:"f4f34a3a",8841:"b46b210f",8989:"23822d86",8994:"9546be45",9003:"925b3f96",9067:"c4afd168",9243:"9115b1fc",9449:"3724ddc1",9514:"1be78505",9541:"422f8ca8",9594:"a650ca47",9642:"7661071f",9671:"0e384e19",9735:"4ba7e5a3",9748:"22167790",9817:"14eb3368",9889:"f1f0d3d7"}[e]||e)+"."+{53:"cae20f2f",210:"4d2f5804",277:"4c140266",406:"5e49fbe6",638:"04ee09c2",813:"464c1236",948:"f7dcbeb5",1075:"ec6ab114",1129:"89c59f08",1500:"58453c83",1505:"d5ffaf36",1649:"ca19f709",1669:"78a6930d",1914:"80015ae3",1915:"8f25754e",2217:"975543ac",2237:"4804172f",2267:"21996e9c",2362:"91d595a4",2437:"9b352357",2529:"da2bcb01",2535:"ebb147e0",2774:"99024fc4",3085:"f8464388",3089:"1e1af270",3497:"a4baaa97",3514:"82f400ba",3589:"2056914d",3608:"9a815895",3998:"a5d42404",4013:"5653d10a",4094:"06b988da",4154:"1ab8b83e",4193:"0db3bdb7",4195:"19d4c7b9",4288:"46e8aac9",4456:"e25ccbd4",4776:"62c3c6be",4823:"f04af052",4972:"9374abde",5062:"f229d6b9",5070:"b5b41b52",5093:"81aa7886",5662:"96cd3189",5822:"938e251e",6004:"40037a3e",6103:"5cfe080a",6287:"5bd29202",6306:"70cf2055",6320:"a270a649",6353:"eb6dcaea",6513:"8ad7d849",6741:"fe0ced91",6846:"165dcede",6890:"cc56ef24",6900:"1bae7804",7133:"3c6e1331",7331:"f4db3540",7414:"7cc220a5",7492:"d89689e4",7521:"b3888073",7527:"7d7c7b04",7530:"c0534094",7745:"0dabe575",7818:"82155e3c",7918:"bacd5894",7943:"fe316c62",8221:"7bf39220",8428:"fdfff05a",8610:"da158881",8636:"30f136ee",8841:"d222566a",8989:"edd20c3f",8994:"921e114b",9003:"dc312b15",9067:"86787076",9243:"3ccb976d",9449:"992a3138",9514:"82b3557a",9541:"f766c998",9594:"4783b2bb",9642:"7d45a9e1",9671:"7e7a9980",9735:"4d5b2a4b",9748:"e613f018",9817:"3bb53ce2",9889:"c19b5071"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),b={},c="website:",r.l=(e,a,f,d)=>{if(b[e])b[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var c=b[e];if(delete b[e],t.parentNode&&t.parentNode.removeChild(t),c&&c.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/website/",r.gca=function(e){return e={17896441:"7918",22167790:"9748",59362658:"2267","935f2afb":"53",c4348237:"277",ead696ea:"406","2be451b9":"638","5d9ef359":"813","8717b14a":"948","18874b20":"1075",c2c9b4b3:"1129",b81173c6:"1500","7c2ff145":"1505","8d745e6e":"1649",c64e4644:"1669",d9f32620:"1914",a9a78b31:"1915","8aa54216":"2217",acf368fd:"2237",e273c56f:"2362",f665e660:"2437","814f3328":"2535","22646bfb":"2774","1f391b9e":"3085",a6aa9e1f:"3089",c650a001:"3497","73664a40":"3514","6d8d5af3":"3589","9e4087bc":"3608",e83fc973:"3998","01a85c17":"4013",d54d1d36:"4094","300d3bf9":"4154",c4f5d8e4:"4193","59db27b9":"4195",ad895e75:"4288",b42f4d8b:"4456","552535a5":"4776",cd7e3398:"4823","69369ae2":"5062","62fa1b0b":"5070",b0dde2ea:"5093","3c7910a2":"5662","07c7815c":"5822",a76bfff0:"6004",ccc49370:"6103",e90a2c2a:"6287","1d408bef":"6306","647f233e":"6320",c00468a4:"6353","1bba06cb":"6513","791926a5":"6741","710d6d2e":"6846","41ebaef0":"6890","0b2479c4":"6900",f03e9ca9:"7133",e6809710:"7331","393be207":"7414","81fe2174":"7492",ac63f98d:"7521",f345e2d0:"7527",a95e9274:"7530","085a15b4":"7745","037ceaed":"7818",a35a66a7:"7943","1c59ea81":"8221","807573a4":"8428","6875c492":"8610",f4f34a3a:"8636",b46b210f:"8841","23822d86":"8989","9546be45":"8994","925b3f96":"9003",c4afd168:"9067","9115b1fc":"9243","3724ddc1":"9449","1be78505":"9514","422f8ca8":"9541",a650ca47:"9594","7661071f":"9642","0e384e19":"9671","4ba7e5a3":"9735","14eb3368":"9817",f1f0d3d7:"9889"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var b=r.o(e,a)?e[a]:void 0;if(0!==b)if(b)f.push(b[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var c=new Promise(((f,c)=>b=e[a]=[f,c]));f.push(b[2]=c);var d=r.p+r.u(a),t=new Error;r.l(d,(f=>{if(r.o(e,a)&&(0!==(b=e[a])&&(e[a]=void 0),b)){var c=f&&("load"===f.type?"missing":f.type),d=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+c+": "+d+")",t.name="ChunkLoadError",t.type=c,t.request=d,b[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var b,c,d=f[0],t=f[1],o=f[2],n=0;if(d.some((a=>0!==e[a]))){for(b in t)r.o(t,b)&&(r.m[b]=t[b]);if(o)var i=o(r)}for(a&&a(f);n{"use strict";var e,a,f,b,c,d={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return d[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=d,r.c=t,e=[],r.O=(a,f,b,c)=>{if(!f){var d=1/0;for(i=0;i=c)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[f,b,c]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,b){if(1&b&&(e=this(e)),8&b)return e;if("object"==typeof e&&e){if(4&b&&e.__esModule)return e;if(16&b&&"function"==typeof e.then)return e}var c=Object.create(null);r.r(c);var d={};a=a||[null,f({}),f([]),f(f)];for(var t=2&b&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>d[a]=()=>e[a]));return d.default=()=>e,r.d(c,d),c},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",277:"c4348237",406:"ead696ea",638:"2be451b9",813:"5d9ef359",948:"8717b14a",1075:"18874b20",1129:"c2c9b4b3",1500:"b81173c6",1505:"7c2ff145",1649:"8d745e6e",1669:"c64e4644",1914:"d9f32620",1915:"a9a78b31",2217:"8aa54216",2237:"acf368fd",2267:"59362658",2362:"e273c56f",2437:"f665e660",2535:"814f3328",2774:"22646bfb",3085:"1f391b9e",3089:"a6aa9e1f",3497:"c650a001",3514:"73664a40",3589:"6d8d5af3",3608:"9e4087bc",3998:"e83fc973",4013:"01a85c17",4094:"d54d1d36",4154:"300d3bf9",4193:"c4f5d8e4",4195:"59db27b9",4288:"ad895e75",4456:"b42f4d8b",4776:"552535a5",4823:"cd7e3398",5062:"69369ae2",5070:"62fa1b0b",5093:"b0dde2ea",5662:"3c7910a2",5822:"07c7815c",6004:"a76bfff0",6103:"ccc49370",6287:"e90a2c2a",6306:"1d408bef",6320:"647f233e",6353:"c00468a4",6513:"1bba06cb",6741:"791926a5",6846:"710d6d2e",6890:"41ebaef0",6900:"0b2479c4",7133:"f03e9ca9",7331:"e6809710",7414:"393be207",7492:"81fe2174",7521:"ac63f98d",7527:"f345e2d0",7530:"a95e9274",7745:"085a15b4",7818:"037ceaed",7918:"17896441",7943:"a35a66a7",8221:"1c59ea81",8428:"807573a4",8610:"6875c492",8636:"f4f34a3a",8841:"b46b210f",8989:"23822d86",8994:"9546be45",9003:"925b3f96",9067:"c4afd168",9243:"9115b1fc",9449:"3724ddc1",9514:"1be78505",9541:"422f8ca8",9594:"a650ca47",9642:"7661071f",9671:"0e384e19",9735:"4ba7e5a3",9748:"22167790",9817:"14eb3368",9889:"f1f0d3d7"}[e]||e)+"."+{53:"cae20f2f",210:"4d2f5804",277:"4c140266",406:"5e49fbe6",638:"04ee09c2",813:"464c1236",948:"f7dcbeb5",1075:"ec6ab114",1129:"89c59f08",1500:"58453c83",1505:"d5ffaf36",1649:"ca19f709",1669:"78a6930d",1914:"80015ae3",1915:"8f25754e",2217:"975543ac",2237:"4804172f",2267:"21996e9c",2362:"91d595a4",2437:"5c98f5cc",2529:"da2bcb01",2535:"ebb147e0",2774:"99024fc4",3085:"f8464388",3089:"1e1af270",3497:"a4baaa97",3514:"82f400ba",3589:"2056914d",3608:"9a815895",3998:"a5d42404",4013:"5653d10a",4094:"06b988da",4154:"1ab8b83e",4193:"0db3bdb7",4195:"19d4c7b9",4288:"46e8aac9",4456:"e25ccbd4",4776:"62c3c6be",4823:"f04af052",4972:"9374abde",5062:"f229d6b9",5070:"b5b41b52",5093:"81aa7886",5662:"96cd3189",5822:"938e251e",6004:"40037a3e",6103:"5cfe080a",6287:"5bd29202",6306:"70cf2055",6320:"a270a649",6353:"eb6dcaea",6513:"8ad7d849",6741:"fe0ced91",6846:"165dcede",6890:"cc56ef24",6900:"1bae7804",7133:"3c6e1331",7331:"f4db3540",7414:"7cc220a5",7492:"d89689e4",7521:"b3888073",7527:"7d7c7b04",7530:"c0534094",7745:"0dabe575",7818:"82155e3c",7918:"bacd5894",7943:"fe316c62",8221:"7bf39220",8428:"fdfff05a",8610:"da158881",8636:"30f136ee",8841:"d222566a",8989:"edd20c3f",8994:"921e114b",9003:"dc312b15",9067:"86787076",9243:"3ccb976d",9449:"992a3138",9514:"82b3557a",9541:"f766c998",9594:"4783b2bb",9642:"7d45a9e1",9671:"7e7a9980",9735:"4d5b2a4b",9748:"e613f018",9817:"3bb53ce2",9889:"c19b5071"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),b={},c="website:",r.l=(e,a,f,d)=>{if(b[e])b[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var c=b[e];if(delete b[e],t.parentNode&&t.parentNode.removeChild(t),c&&c.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/website/",r.gca=function(e){return e={17896441:"7918",22167790:"9748",59362658:"2267","935f2afb":"53",c4348237:"277",ead696ea:"406","2be451b9":"638","5d9ef359":"813","8717b14a":"948","18874b20":"1075",c2c9b4b3:"1129",b81173c6:"1500","7c2ff145":"1505","8d745e6e":"1649",c64e4644:"1669",d9f32620:"1914",a9a78b31:"1915","8aa54216":"2217",acf368fd:"2237",e273c56f:"2362",f665e660:"2437","814f3328":"2535","22646bfb":"2774","1f391b9e":"3085",a6aa9e1f:"3089",c650a001:"3497","73664a40":"3514","6d8d5af3":"3589","9e4087bc":"3608",e83fc973:"3998","01a85c17":"4013",d54d1d36:"4094","300d3bf9":"4154",c4f5d8e4:"4193","59db27b9":"4195",ad895e75:"4288",b42f4d8b:"4456","552535a5":"4776",cd7e3398:"4823","69369ae2":"5062","62fa1b0b":"5070",b0dde2ea:"5093","3c7910a2":"5662","07c7815c":"5822",a76bfff0:"6004",ccc49370:"6103",e90a2c2a:"6287","1d408bef":"6306","647f233e":"6320",c00468a4:"6353","1bba06cb":"6513","791926a5":"6741","710d6d2e":"6846","41ebaef0":"6890","0b2479c4":"6900",f03e9ca9:"7133",e6809710:"7331","393be207":"7414","81fe2174":"7492",ac63f98d:"7521",f345e2d0:"7527",a95e9274:"7530","085a15b4":"7745","037ceaed":"7818",a35a66a7:"7943","1c59ea81":"8221","807573a4":"8428","6875c492":"8610",f4f34a3a:"8636",b46b210f:"8841","23822d86":"8989","9546be45":"8994","925b3f96":"9003",c4afd168:"9067","9115b1fc":"9243","3724ddc1":"9449","1be78505":"9514","422f8ca8":"9541",a650ca47:"9594","7661071f":"9642","0e384e19":"9671","4ba7e5a3":"9735","14eb3368":"9817",f1f0d3d7:"9889"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var b=r.o(e,a)?e[a]:void 0;if(0!==b)if(b)f.push(b[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var c=new Promise(((f,c)=>b=e[a]=[f,c]));f.push(b[2]=c);var d=r.p+r.u(a),t=new Error;r.l(d,(f=>{if(r.o(e,a)&&(0!==(b=e[a])&&(e[a]=void 0),b)){var c=f&&("load"===f.type?"missing":f.type),d=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+c+": "+d+")",t.name="ChunkLoadError",t.type=c,t.request=d,b[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var b,c,d=f[0],t=f[1],o=f[2],n=0;if(d.some((a=>0!==e[a]))){for(b in t)r.o(t,b)&&(r.m[b]=t[b]);if(o)var i=o(r)}for(a&&a(f);n 历史博文 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/blog/first-blog-post/index.html b/blog/first-blog-post/index.html index b758d60c3..6585e9c45 100644 --- a/blog/first-blog-post/index.html +++ b/blog/first-blog-post/index.html @@ -5,13 +5,13 @@ First Blog Post | Framework as a Building Block for Kubernetes - +

First Blog Post

· 阅读需 1 分钟
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index 644c429f7..3c3a9f940 100644 --- a/blog/index.html +++ b/blog/index.html @@ -5,13 +5,13 @@ Blog | Framework as a Building Block for Kubernetes - +

· 阅读需 1 分钟
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· 阅读需 1 分钟
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/long-blog-post/index.html b/blog/long-blog-post/index.html index 87364213d..3a88f6297 100644 --- a/blog/long-blog-post/index.html +++ b/blog/long-blog-post/index.html @@ -5,13 +5,13 @@ Long Blog Post | Framework as a Building Block for Kubernetes - +

Long Blog Post

· 阅读需 3 分钟
Endilie Yacop Sucipto

This is the summary of a very long blog post,

Use a <!-- truncate --> comment to limit blog post size in the list view.

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/mdx-blog-post/index.html b/blog/mdx-blog-post/index.html index b49ff3a5c..e427cea23 100644 --- a/blog/mdx-blog-post/index.html +++ b/blog/mdx-blog-post/index.html @@ -5,13 +5,13 @@ MDX Blog Post | Framework as a Building Block for Kubernetes - +

MDX Blog Post

· 阅读需 1 分钟
Sébastien Lorber

Blog posts support Docusaurus Markdown features, such as MDX.

提示

Use the power of React to create interactive blog posts.

<button onClick={() => alert('button clicked!')}>Click me!</button>
- + \ No newline at end of file diff --git a/blog/tags/docusaurus/index.html b/blog/tags/docusaurus/index.html index 6047bdae8..a6fc81d83 100644 --- a/blog/tags/docusaurus/index.html +++ b/blog/tags/docusaurus/index.html @@ -5,13 +5,13 @@ 4 篇博文 含有标签「docusaurus」 | Framework as a Building Block for Kubernetes - +

4 篇博文 含有标签「docusaurus」

查看所有标签

· 阅读需 1 分钟
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· 阅读需 1 分钟
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/facebook/index.html b/blog/tags/facebook/index.html index 2a67dd727..74056ff31 100644 --- a/blog/tags/facebook/index.html +++ b/blog/tags/facebook/index.html @@ -5,13 +5,13 @@ 1 篇博文 含有标签「facebook」 | Framework as a Building Block for Kubernetes - +

1 篇博文 含有标签「facebook」

查看所有标签

· 阅读需 1 分钟
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hello/index.html b/blog/tags/hello/index.html index d102e622b..e9cb89d31 100644 --- a/blog/tags/hello/index.html +++ b/blog/tags/hello/index.html @@ -5,13 +5,13 @@ 2 篇博文 含有标签「hello」 | Framework as a Building Block for Kubernetes - +

2 篇博文 含有标签「hello」

查看所有标签

· 阅读需 1 分钟
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hola/index.html b/blog/tags/hola/index.html index a9509c509..eb7ee0268 100644 --- a/blog/tags/hola/index.html +++ b/blog/tags/hola/index.html @@ -5,13 +5,13 @@ 1 篇博文 含有标签「hola」 | Framework as a Building Block for Kubernetes - +

1 篇博文 含有标签「hola」

查看所有标签

· 阅读需 1 分钟
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/index.html b/blog/tags/index.html index 6d67b101f..b3383b383 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -5,13 +5,13 @@ 标签 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/blog/welcome/index.html b/blog/welcome/index.html index e981c8162..c30cc90e1 100644 --- a/blog/welcome/index.html +++ b/blog/welcome/index.html @@ -5,13 +5,13 @@ Welcome | Framework as a Building Block for Kubernetes - +

Welcome

· 阅读需 1 分钟
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/docs/FAQ/index.html b/docs/FAQ/index.html index f0e74d000..2e874e3b3 100644 --- a/docs/FAQ/index.html +++ b/docs/FAQ/index.html @@ -5,13 +5,13 @@ 常见问题 | Framework as a Building Block for Kubernetes - +

常见问题

BuildingBase相关

部署问题

1. 租户管理控制器probe liveness校验失败,导致无法启动

  • 环境
    • Azure VM (Ubuntu 22.04)

问题重现

运行helm install --wait -n u4a-system u4a-component .后,发现租户管理控制器一直处于CrashLoopBackOff状态:

➜  ~ k get pods -nu4a-system
NAME READY STATUS RESTARTS AGE
bff-server-9cc54cbc5-gcp6x 1/1 Running 0 17m
capsule-controller-manager-5b9864f9bf-7mkhb 0/1 CrashLoopBackOff 8 (27s ago) 17m
cert-manager-79d7998d9-c7q8n 1/1 Running 0 33m
cert-manager-cainjector-57bb7f44dd-c9sj6 1/1 Running 0 33m
cert-manager-webhook-65b494ccf4-4blzx 1/1 Running 0 33m
cluster-component-ingress-nginx-controller-86d6bfdbf6-qj6hf 1/1 Running 0 33m
kube-oidc-proxy-fc6b54b8c-ddc2s 1/1 Running 0 17m
oidc-server-84cbfcc9f5-bmmf9 2/2 Running 0 17m
resource-view-controller-94645667-ttvst 1/1 Running 0 17m

查看日志发现如下错误:

Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 4m35s default-scheduler Successfully assigned u4a-system/capsule-controller-manager-5b9864f9bf-7mkhb to kubebb-core-worker
Normal Pulling 4m34s kubelet Pulling image "hub.tenxcloud.com/u4a-component/capsule:v0.1.2-20221122"
Normal Pulled 3m57s kubelet Successfully pulled image "hub.tenxcloud.com/u4a-component/capsule:v0.1.2-20221122" in 36.616134797s
Warning Unhealthy 3m53s kubelet Liveness probe failed: Get "http://10.244.1.6:10080/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
Warning Unhealthy 3m44s kubelet Readiness probe failed: Get "http://10.244.1.6:10080/readyz": dial tcp 10.244.1.6:10080: connect: connection refused
Warning Unhealthy 3m44s kubelet Liveness probe failed: Get "http://10.244.1.6:10080/healthz": dial tcp 10.244.1.6:10080: connect: connection refused
Normal Created 3m34s (x3 over 3m57s) kubelet Created container manager
Normal Started 3m34s (x3 over 3m57s) kubelet Started container manager
Warning Unhealthy 3m31s (x7 over 3m55s) kubelet Readiness probe failed: Get "http://10.244.1.6:10080/readyz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
Warning BackOff 3m13s (x4 over 3m42s) kubelet Back-off restarting failed container
Normal Pulled 3m1s (x3 over 3m50s) kubelet Container image "hub.tenxcloud.com/u4a-component/capsule:v0.1.2-20221122" already present on machine

问题原因

感谢 @0xff-dev 提供的解决方案

去除liveness probe后,能看到容器报错,发现是由于capsule初始化过程中超出了操作系统允许的最大文件打开数,导致capsule无法启动。

解决方案

感谢 @0xff-dev 提供的解决方案,需设置 fs.inotify.max_user_instances=81920

由微软的New Bing解答得到的答案:

fs.inotify.max_user_instances 是一个内核参数,它表示每个用户可以创建的 inotify 实例的最大数量。inotify 是一个用来监视文件系统变化的机制。如果你想要修改这个参数,你有两种方法:

  • 临时修改:你可以使用 sysctl -w 命令来临时修改这个参数,例如:
sudo sysctl -w fs.inotify.max_user_instances=81920

这个命令会把 fs.inotify.max_user_instances 的值设置为 81920,并写入/proc/sys/fs/inotify/max_user_instances 文件。但是这个修改只在重启之前有效,重启后会恢复默认值。

  • 永久修改:你可以在 /etc/sysctl.conf 文件中添加一行:
fs.inotify.max_user_instances=81920

然后运行 sudo sysctl -p命令来加载这个文件中的设置。这样,这个修改就会在每次重启后生效。

如果你想了解更多关于 sysctl 和 fs.inotify.max_user_instances 的信息,请参考以下链接:

- + \ No newline at end of file diff --git a/docs/building-base/add-cluster/index.html b/docs/building-base/add-cluster/index.html index 823b47ad8..b00194038 100644 --- a/docs/building-base/add-cluster/index.html +++ b/docs/building-base/add-cluster/index.html @@ -5,13 +5,13 @@ 添加集群 | Framework as a Building Block for Kubernetes - +

添加集群

  1. 为集群管理创建一个 namespace,可以使用 cluster-system,用来保存集群信息
kubectl create ns cluster-system
  1. 获取添加集群的 token
export TOKENNAME=$(kubectl get serviceaccount/host-cluster-reader -n u4a-system -o jsonpath='{.secrets[0].name}')
kubectl get secret $TOKENNAME -n u4a-system -o jsonpath='{.data.token}' | base64 -d
  1. 登录管理平台,进入 “集群管理”,参考 安装底座,点击“添加集群”。

  2. 输入集群名称,按需修改集群后缀,这里使用“API Token”方式接入集群。

  • API Host,使用支持 OIDC 协议的 K8s API 地址,可以通过 kubectl get ingress -nu4a-system 查看kube-oidc-proxy-server-ingress 对应的 Host 信息,比如 https://k8s.172.22.96.136.nip.io(注意结尾不要有 /)
  • API Token,输入第 2 步获取的 token 信息
  1. 添加成功后,可以在列表上看到集群信息及其状态;选择“租户管理”,会看到名称为 "system-tenant" 的一个系统租户
- + \ No newline at end of file diff --git a/docs/building-base/configuration/3rd-party-account/index.html b/docs/building-base/configuration/3rd-party-account/index.html index bbc2b02ea..8aad6da6c 100644 --- a/docs/building-base/configuration/3rd-party-account/index.html +++ b/docs/building-base/configuration/3rd-party-account/index.html @@ -5,7 +5,7 @@ 使用第三方系统登录 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ 图 1

  • 在左侧菜单导航处选择“应用”,然后在新建应用中,填写必要信息:

    • 名称:根据需要填写
    • 回调地址:<u4a-component 地址>/oidc/callback
    • 范围:勾选 read_user 和 openid 图 2
    1. 保存应用,点击页面下刚刚保存的应用,得到下图。 图 3

    记录 应用程序ID(clientid)、密码(clientsecret) 以及 回调 URL(redirecturl),需要在oidc-server 端进行相应配置。

    1. 编辑 oidc-server 的配置文件,增加一种新的 connector 配置,kubectl edit cm oidc-server -n u4a-system,按以下示例进行配置:
    connectors:
    - type: k8scrd
    ...
    - type: gitlab ## 固定值 gitlab
    name: gitlab ## 名称
    id: gitlab ## 固定值 gitlab
    config:
    baseURL: http://gitlab.172.22.50.155.nip.io ## gitlab对外访问的地址
    clientID: ef2b579e5b4c1cf9ae5b0b2acb166271ebff5892e84aa113689d4646ffcb29e7 ## gitlab配置的应用的clientID, 可在上一步骤”配置gitlab“中获取
    clientSecret: 3a9e79368a70bcdf1e4ac1df64e4220e7af798876333c9642a8edb782e6eb558 ## gitlab配置的应用的密码, 可在上一步骤”配置gitlab“中获取
    redirectURI: https://portal.172.22.96.209.nip.io/oidc/callback ## gitlab配置的callback地址, 可在上一步骤”配置gitlab“中获取
    1. 开启第三方系统对接,目前支持 gitlab/github 两种,仅需要将对应的 enabled 字段设置为 true 即可。
    • kubectl edit connector3rd connector3rd
    kind: Connector3rd
    metadata:
    annotations:
    helm.sh/hook: post-install,post-upgrade
    helm.sh/hook-weight: "-5"
    name: connector3rd
    spec:
    connectors:
    - description: gitlab description
    enabled: false # 修改为 true
    icon: <保持不变>
    id: gitlab
    name: gitlab
    - description: github description
    enabled: false
    icon: <保持不变>
    id: github
    name: github
    1. 配置完毕后,重启 oidc-server 服务即可生效,此时再次访问登录页面,既可看到对应的登录图标。

    图 4

    - + \ No newline at end of file diff --git a/docs/building-base/configuration/audit-config/index.html b/docs/building-base/configuration/audit-config/index.html index a1d1fb943..751bb0a71 100644 --- a/docs/building-base/configuration/audit-config/index.html +++ b/docs/building-base/configuration/audit-config/index.html @@ -5,14 +5,14 @@ 配置审计能力 | Framework as a Building Block for Kubernetes - +

    配置审计能力

    提示

    注意,审计功能依赖对审计日志的采集功能,需要在集群设置中配置日志服务 ElasticSearch 的地址,日志服务的配置参考日志组件

    1、编辑 audit-policy.yaml 文件对审计进行配置,kubectl edit cm audit-policy-conf -n u4a-system

    按照以下模式进行定义:

    apiVersion: audit.k8s.io/v1beta1
    kind: Policy
    # Don't generate audit events for all requests in RequestReceived stage.
    omitStages:
    - "RequestReceived"
    rules:
    # Don't audit namespace: kube-system/cluster-system/system-bigdata
    - level: None
    resources:
    - group: "" # core API group
    resources: ["secrets", "configmaps"]
    namespaces: ["kube-system", "cluster-system", "system-bigdata"]
    # Only enable 'write' verbs audit log for secret and configmap
    - level: Metadata
    verbs: ["create","delete","deletecollection","patch","update"]
    resources:
    - group: "" # core API group
    resources: ["secrets", "configmaps"]

    各个组件,需要根据自己的资源类型,来针对性的将自己的资源添加到审计的 rule 列表中,默认对写操作进行审计即可。

    2、配置 kube-apiserver 参数(也可以在 kube-oidc-proxy 上进行类似的配置,在无法操作原有 K8S 集群的条件下,使用 kube-oidc-proxy 的配置),添加:

    # 审计策略配置文件地址
    - --audit-policy-file=/etc/kubernetes/pki/audit-policy.yaml
    # 审计日志的路径文件
    - --audit-log-path=/var/log/apiserver/audit/audit.log
    # 日志保存策略
    - --audit-log-maxage=7
    - --audit-log-maxbackup=10
    - --audit-log-maxsize=10

    如果是在 K8S 上直接修改,则需要修改 /etc/kubernetes/manifests/kube-apiserver.yaml 来添加该参数,修改后确认 kube-apiserver 容器发生重启,此时配置才能生效

    3、配置 fluentd 进行采集(索引模版跟原来的 fluentd 冲突,所以需要单独的fluentd 进程专门采集 master 上的审计日志。 目前测试方法,从原有的 fluentd daemonset 复制一个,修改 ds 的名字,并使用以下配置文件进行挂载即可:

    apiVersion: v1
    data:
    fluent.conf: |2
    # for audit log
    <source>
    @type tail
    @id in_tail_kube_apiserver_audit
    multiline_flush_interval 5s
    path /var/log/apiserver/audit/audit.log
    pos_file /var/log/kube-apiserver-audit.log.pos
    tag kube-apiserver-audit
    <parse>
    @type json
    keep_time_key true
    time_key timestamp
    time_format %Y-%m-%dT%T.%L%Z
    </parse>
    </source>

    ## Used for health check
    <source>
    @type http
    port 9880
    bind 0.0.0.0
    </source>

    ## sink all log to elasticsearch directly
    <match **>
    @type elasticsearch
    @log_level debug
    include_tag_key true
    host elasticsearch-logging
    port 9200
    user "#{ENV['ES_USERNAME']}"
    password "#{ENV['ES_PASSWORD']}"
    scheme "#{ENV['ES_SCHEME']}"
    ca_file /etc/fluent/certs/ca.crt
    logstash_prefix audit-k8s
    logstash_format true
    # Set the chunk limit the same as for fluentd-gcp.
    reload_on_failure true
    reconnect_on_error true
    request_timeout 120s
    <buffer>
    @type file
    path /var/log/td-agent/buffer/elasticsearch
    chunk_limit_size 15MB
    total_limit_size 20GB
    flush_interval 3s
    flush_thread_count 8
    flush_mode interval
    # Never wait longer than 5 minutes between retries.
    retry_timeout 300
    retry_forever true
    </buffer>
    </match>
    kind: ConfigMap
    metadata:
    labels:
    component: fluentd
    k8s-app: fluentd
    # 新的 fluentd 使用这个 configmap
    name: fluentd-audit
    namespace: kube-system

    注意:

    • 新的 fluentd daemonset 只需要启动在 kube-oidc-proxy 部署的节点上(通过节点亲和性配置)
    • 将使用的 configmap 修改为上面的,名字 fluentd-audit

    4、最终在 es 里的审计记录格式如下,可以通过查询 es 来获取审计日志

    {
    "_index": "logstash-2022.08.20",
    "_type": "fluentd",
    "_id": "iRzLu4IBqmHyli33qpUa",
    "_version": 1,
    "_score": null,
    "_source": {
    "kind": "Event",
    "apiVersion": "audit.k8s.io/v1",
    "level": "Metadata",
    "auditID": "7c0072aa-b48a-4772-bc52-42e50c0e65ce",
    "stage": "ResponseComplete",
    "requestURI": "/api/v1/namespaces/addon-system/configmaps/42c733ea.clastix.capsule.io",
    "verb": "update",
    "user": {
    "username": "system:serviceaccount:addon-system:default",
    "uid": "c8cb442d-853c-4a53-9c83-c7a1520095c4",
    "groups": [
    "system:serviceaccounts",
    "system:serviceaccounts:addon-system",
    "system:authenticated"
    ]
    },
    "sourceIPs": [
    "172.22.96.146"
    ],
    "userAgent": "manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election",
    "objectRef": {
    "resource": "configmaps",
    "namespace": "addon-system",
    "name": "42c733ea.clastix.capsule.io",
    "uid": "c4542d46-0e07-41be-8420-f912a2918e51",
    "apiVersion": "v1",
    "resourceVersion": "236579314"
    },
    "responseStatus": {
    "metadata": {},
    "code": 200
    },
    "requestReceivedTimestamp": "2022-08-20T15:07:41.991582Z",
    "stageTimestamp": "2022-08-20T15:07:42.000098Z",
    "annotations": {
    "authentication.k8s.io/legacy-token": "system:serviceaccount:addon-system:default",
    "authorization.k8s.io/decision": "allow",
    "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"t7d.io.capsule-manager-rolebinding\" of ClusterRole \"cluster-admin\" to ServiceAccount \"default/addon-system\""
    },
    "@timestamp": "2022-08-20T15:07:42.000871648+00:00",
    "tag": "kube-apiserver-audit"
    },
    "fields": {
    "requestReceivedTimestamp": [
    "2022-08-20T15:07:41.991Z"
    ],
    "stageTimestamp": [
    "2022-08-20T15:07:42.000Z"
    ],
    "@timestamp": [
    "2022-08-20T15:07:42.000Z"
    ]
    },
    "sort": [
    1661008062000
    ]
    }

    5、如果不是标准的 K8S 资源类型的审计,应用/服务可以自己将审计写入到 /var/log/apiserver/audit 目录,比如命名为 service-audit.log,格式符合 K8S audit 消息格式即可,参考以下单条审计记录的格式:

    {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"8d8d8163-54e8-457d-94f6-7851e887f3cf","stage":"ResponseComplete","requestURI":"/api/v1/namespaces/u4a-system/secrets/sh.helm.release.v1.u4a-system.v1","verb":"delete","user":{"username":"admin","groups":["system:nodes","iam.tenxcloud.com"]},"sourceIPs":["172.16.31.254"],"userAgent":"helm/v0.0.0 (darwin/amd64) kubernetes/$Format","objectRef":{"resource":"secrets","namespace":"u4a-system","name":"sh.helm.release.v1.u4a-system.v1","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2022-08-24T05:36:36.524760Z","stageTimestamp":"2022-08-24T05:36:36.529890Z"}

    其中,重点关注的数据如下:

    1)kind,统一用 ”Event“,查询只查询 Event 的审计

    2)stage:目前都是“ResponseComplete“,只记录了响应结束的时间,没记录请求收到的时间

    3)verb:增删改查

    4)user.username,操作人

    5)sourceIPs:客户端 IP

    6)objectRef.resource:操作的资源

    7)objectRef.namespace:操作的项目/namespace

    8)responseStatus.code:返回代码

    9)requestReceivedTimestamp:请求到达时间
    - + \ No newline at end of file diff --git a/docs/building-base/configuration/customize-menu/index.html b/docs/building-base/configuration/customize-menu/index.html index 546d5db87..91bae4b15 100644 --- a/docs/building-base/configuration/customize-menu/index.html +++ b/docs/building-base/configuration/customize-menu/index.html @@ -5,14 +5,14 @@ 自定义菜单 | Framework as a Building Block for Kubernetes - +

    自定义菜单

    kubebb 的所有菜单均基于 menu 的 CRD 进行定义,如果需要添加自己的菜单,可以参考以下 memnu 示例:

    # 主菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu
    spec:
    column: 1
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: ""
    kind: ""
    name: ""
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 测试菜单
    textEn: "Test Menu"
    ---
    # 测试菜单索引菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-index
    spec:
    getTitleForReplaceSider: {}
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 菜单索引项
    textEn: “Menu Index Item"
    ---
    # 子菜单,具备实际链接功能
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-submenu1
    spec:
    getTitleForReplaceSider: {}
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu-index
    uid: ""
    pathname: /demo-feature1
    rankingInColumn: 200
    text: 测试子菜单
    textEn: "Test Submenu"

    使用 kubectl apply -f 即可将菜单项部署到环境中,如下图所示: 图 1

    - + \ No newline at end of file diff --git a/docs/building-base/configuration/customize-portal/index.html b/docs/building-base/configuration/customize-portal/index.html index 2e9e39d1f..e25eefa8d 100644 --- a/docs/building-base/configuration/customize-portal/index.html +++ b/docs/building-base/configuration/customize-portal/index.html @@ -5,13 +5,13 @@ 自定义门户 | Framework as a Building Block for Kubernetes - +

    自定义门户

    1. 准备好需要替换的 logo,比如 logo-white.png、favicon.png,使用以下命令创建对应的 configmap
    kubectl create configmap portal-logos -n u4a-system \
    --from-file=logo-white.png=./logo-white.png \
    --from-file=favicon.ico=./favicon.png
    1. 修改 bff-server 的 deployment 将 configmap 挂载进去
    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: bff-server
    namespace: u4a-system
    spec:
    template:
    spec:
    volumes:
    - hostPath:
    path: /etc/localtime
    type: ""
    name: time-localtime
    # 存储卷中增加刚刚创建的 configmap portal-logos
    - configMap:
    name: portal-logos
    name: logos
    containers:
    volumeMounts:
    - mountPath: /etc/localtime
    name: time-localtime
    readOnly: true
    # 将上面的存储卷 logos 挂载到对应 logo 目录
    - mountPath: /usr/src/app/public/profile/img
    name: logos

    自定义主色调

    通过创建 portal-global-configs 的 configmap,即可自定义门户主色调,示意如下:

    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: portal-global-configs
    namespace: u4a-system
    data:
    global-configs: |
    {"theme": {"primaryColor": "#008F35"}}

    创建 configMap,刷新门户后即可生效

    kubectl create cm portal-global-configs -n u4a-system
    1. 修改 bff-server 的 deployment 将 configmap 挂载进去
    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: bff-server
    namespace: u4a-system
    spec:
    template:
    spec:
    volumes:
    - hostPath:
    path: /etc/localtime
    type: ""
    name: time-localtime
    # 存储卷中增加刚刚创建的 configmap portal-global-configs
    - configMap:
    name: portal-global-configs
    name: portal-global-configs
    containers:
    volumeMounts:
    - mountPath: /etc/localtime
    name: time-localtime
    readOnly: true
    # 将上面的存储卷 logos 挂载到对应 logo 目录:
    - mountPath: /usr/src/app/configs
    name: portal-global-configs
    - + \ No newline at end of file diff --git a/docs/building-base/configuration/issue-oidc-proxy-certs/index.html b/docs/building-base/configuration/issue-oidc-proxy-certs/index.html index f34de9afc..2e2671177 100644 --- a/docs/building-base/configuration/issue-oidc-proxy-certs/index.html +++ b/docs/building-base/configuration/issue-oidc-proxy-certs/index.html @@ -5,7 +5,7 @@ 生成 oidc-proxy 的证书 | Framework as a Building Block for Kubernetes - + @@ -28,7 +28,7 @@ -reqexts req_ext \ -config openssl.cnf \ -out server.csr

    1. generate the base64 content of server.csr

    cat server.csr | base64 | tr -d "\n"

    1. create certificateSigningRequest in kubernetes cluster for oidc-proxy

    the request value need to replace the output content generated by Step 4

    cat <<EOF | kubectl apply -f -
    apiVersion: certificates.k8s.io/v1
    kind: CertificateSigningRequest
    metadata:
    name: myuser
    spec:
    request: <need replace by the content generated by step 4>
    signerName: kubernetes.io/kube-apiserver-client
    usages:
    - client auth
    EOF

    6 approval the certificatesigningRequest

    kubectl certificate approve myuser

    1. get the certifcate of oidc-proxy issuer by kubernetes cluster

    kubectl get csr myuser -oyaml | grep certificate: | awk '{print $2}' |base64 -d > ./server.cert

    1. after the step above, you get the server.key and server.cert and can rename the server.key and server.cert that you want. you can create secret for oidc-proxy by command:

    kubectl create secret

    - + \ No newline at end of file diff --git a/docs/building-base/configuration/oidc-integration/index.html b/docs/building-base/configuration/oidc-integration/index.html index d17ed475f..6566611ec 100644 --- a/docs/building-base/configuration/oidc-integration/index.html +++ b/docs/building-base/configuration/oidc-integration/index.html @@ -5,13 +5,13 @@ 集成单点登录 | Framework as a Building Block for Kubernetes - +

    集成单点登录

    通过 OIDC 协议,实现同 kubebb 的统一账号、认证及单点登录功能。

    1. 在 kubebb 服务端注册 OIDC 客户端

    通过修改 dex-server 使用的 ConfigMap 来添加 client:

    kubectl edit cm oidc-server -n u4a-system

    在 staticClients 处添加一个新的客户端,参考:

      staticClients:
    - id: my-oidc-client
    name: my-oidc-client
    secret: ZXhhbXBsZS1hcHAtc2VjcmV0 # 这里填写随机密钥,注意不要泄漏密钥
    redirectURIs: # 这里可以配置多个回调地址
    - "<my-oidc-client-callback-address>" # 这里填写登录成功后的回调地址,例如 "http://192.168.1.32:8022"

    重启 oidc-server

    kubectl delete <oidc-server-pod> -n u4a-system

    2. 接入 OIDC 登录

    1. 环境信息示例

    2. 应用注册信息

    3. 用户登录

    • 在应用侧,一般会引用相关的 OIDC SDK 来处理 OIDC 相关的处理流程,来帮助开发者简化交互流程,比如 golang 可以使用:https://github.com/coreos/go-oidc

    这里介绍如何通过 API 调用,来实现 OIDC 单点登录的基本流程:

    1)用户访问应用界面,应自动重定向到 kubebb 认证服务登陆 URL,URL 示例如下:

    https://192.168.2.216/oidc/auth?client_id=my-oidc-client&redirect_uri=http://192.168.1.32:8022/auth/callback&response_type=code&scope=openid+profile+email+offline_access
    参数名称描述
    client_id应用 ID,注册阶段由 kubebb 提供,如:my-oidc-client
    redirect_uri应用回调地址,注册阶段由应用提供,如:http://192.168.1.32:8022/auth/callback
    response_type固定值:code
    scope固定值:openid profile email

    2)在 kubebb 的登陆页面,用户输入用户名/密码登陆(如果用户已经登陆,这步会自动跳过)

    3)登录成功后,跳转回应用注册的回调地址,跳转示例如下:

    http://192.168.1.32:8022/auth/callback?code=kf7dmmvhdipdcjczydklwi6pu&state=<state-info>
    参数名称描述
    code授权码,由 kubebb 认证服务 生成,用于后面获取 token
    state与应用跳转到 kubebb 认证服务登陆 URL 时传递的 state 值一样。

    4)应用后台调用 kubebb token API 获取 token 信息,调用示例如下:

    curl -XPOST 'https://192.168.2.216/oidc/token' \
    -H 'Authorization: Basic c2FtcGxlLWFwcC0yMTg6WlhoaGJYQnNaUzFoY0hBdGMyVmpjbVYwJw==' \
    -H 'Content-Type: application/x-www-form-urlencoded' \
    --data-urlencode 'code=kf7dmmvhdipdcjczydklwi6pu' \
    --data-urlencode 'grant_type=authorization_code' \
    --data-urlencode 'redirect_uri=http://192.168.1.32:8022/auth/callback'
    参数名称描述
    Authorization格式 Basic XXXX,其中 XXXX 是 client_id:client_secret 的 base64 编码
    Content-Type固定值:application/x-www-form-urlencoded
    code授权码
    grant_type固定值:authorization_code
    redirect_uri应用回调地址,kubebb 认证服务会验证该地址是否与应用注册的回调地址一致

    该请求会返回token信息

    ID Token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImQxY2Y2MzA3YmQ5Yzk3OTJiMzdmMGJiM2M1Njk1ZDQ0MWJlZTMzNjcifQ.eyJpc3MiOiJodHRwczovLzE5Mi4xNjguMi4yMTYiLCJzdWIiOiJDZ0V4RWdsMFpXNTRZMnh2ZFdRIiwiYXVkIjoic2FtcGxlLWFwcC0yMTgiLCJleHAiOjE2MzAwMzA0ODEsImlhdCI6MTYyOTk0NDA4MSwiYXRfaGFzaCI6Ik1PUjk0enktTUZNcU5zZUZTM1ZzRXciLCJjX2hhc2giOiJpbkoteDVKUEFCRXhaaEpRaEx3T3pBIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJuYW1lIjoiYWRtaW4iLCJwaG9uZSI6IjE3MzQzMTM1MDUxIiwidXNlcmlkIjoiMSJ9.FrC6oKRsManuP9opqugknJmOE78uKmxX6uteM1flCDVRqRv-riG0C5AOX4K9BTnT9GIlu3H24jydT4ybSissz_wL_mLzoTQWoQ9uMMmd4w1aiGqgO6mIaEh3XvTqtoQv1ltONSkp49bykpdIXbDJxy0PScU0k-0XFNJIMSBwn8SEubgH7NO3xwFzsjaLqBfolxC5YXBuWS8n-FEOqNTg-mx-n_Fu2oemJCT-8qWMqY6FNjRSC3D-2ABkCbl4g76vPLgJ-I6dU6eaJvaBW6S4BzhCX0SitxYrxcXjOGviX1HKOXXSUC1n1HfQpOpNW-FA2G3F-kON94rYr1AEdIwSVw

    Access Token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImQxY2Y2MzA3YmQ5Yzk3OTJiMzdmMGJiM2M1Njk1ZDQ0MWJlZTMzNjcifQ.eyJpc3MiOiJodHRwczovLzE5Mi4xNjguMi4yMTYiLCJzdWIiOiJDZ0V4RWdsMFpXNTRZMnh2ZFdRIiwiYXVkIjoic2FtcGxlLWFwcC0yMTgiLCJleHAiOjE2MzAwMzA0ODEsImlhdCI6MTYyOTk0NDA4MSwiYXRfaGFzaCI6IlhYS3RzUkhZS043WnZGOUFxcXVSd3ciLCJlbWFpbCI6ImFkbWluQGV4YW1wbGUuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsIm5hbWUiOiJhZG1pbiIsInBob25lIjoiMTczNDMxMzUwNTEiLCJ1c2VyaWQiOiIxIn0.VXUxba9cy4S3ZUtyfbF6A3Xg5fZYL-m6nqR09qiJgJmNUCEkHr_b4xKXXvDJwsgoC0zdUxShd1BpOgN4vi8A7zZ676-PybB2dVT6x3EPpwCVC8_NVYwXijeELWJJ0nU9aAq6p_m-XdXOuPzutbmMLSWVfrY-CS2WSdQISuKxb28slTptzCF4OY3dZugOHZ_v10KAxsMo0Aul4d3C_EXOaiUKzw0OTD7xyYOm8MmJvhzQEYSodHAkbJWnOusKEBtFKJ5hhAqPLCymY2VleN-7Jbqr-DYrDSDtd7FF1vCbDL0-rTwQ5_79FIAu_fusOcVc26M7GlvOtGcniaweNtm1dg

    返回的 ID Token 是标准 JWT Token,应用可以从 ID Token 中解析出用户信息,例如:

        {
    "iss": "https://192.168.2.216/oidc",
    "sub": "CgExEgl0ZW54Y2xvdWQ",
    "aud": "sample-app",
    "exp": 1630030481,
    "iat": 1629944081,
    "at_hash": "MOR94zy-MFMqNseFS3VsEw",
    "c_hash": "inJ-x5JPABExZhJQhLwOzA",
    "email": "admin@example.com",
    "email_verified": true,
    "name": "admin",
    "phone": "17343135051",
    "userid": "1"
    }
    • 基于该 JWT Token,应用可以将用户基础信息同步到自己的系统内,在基于统一账号、认证的前提下,实现同自身账号、权限体系的融合。
    • Access Token 一般用于访问 kubebb 认证服务获取用户详细信息,可以按需使用。
    - + \ No newline at end of file diff --git a/docs/building-base/intro/index.html b/docs/building-base/intro/index.html index 4281b01f0..9b991f47a 100644 --- a/docs/building-base/intro/index.html +++ b/docs/building-base/intro/index.html @@ -5,14 +5,14 @@ 介绍 | Framework as a Building Block for Kubernetes - +

    介绍

    技术组件

    平台开发采取前后端分离,以 K8S 为核心的开发框架,遵循 K8S 的扩展机制及 API 规范,整体开发架构的基本逻辑如下图所示: 图 2

    1. 所有组件的开发、扩展的认证都通过统一认证中心进行认证
    2. 认证由微前端的主框架 DockApp 统一进行,其他微前端的扩展不需要单独支持同认证中心的处理
    3. 开发架构上整体可以按照三层来看
    • 第一层,前端采用微前端架构,尽量采用低代码方式进行开发,提高代码自动化生成比例
    • 第二层,根据业务需求增加 OpenAPI,形成统一的 BFF 层,对 API 进行聚合,提供前端所需要的业务场景数据
    • 后端采用 CRD + controller 的 Operator 模式进行开发,形成数据驱动的流程开发模式
    1. 对外 API 主要包括两部分:
    • 从 BFF 层提供的 OpenAPI
    • 从 K8S 层提供的资源 API

    使用域名访问

    在代理服务器/负载均衡上,使用 nip.io 来支持 http/https 域名的绑定方式,便于默认统一采用域名进行配置。

    • 使用 nip.io 进行访问 http://<ip-address>.nip.io,比如 http://192.168.1.123.nip.io
    - + \ No newline at end of file diff --git "a/docs/category/\344\275\216\347\240\201kit/index.html" "b/docs/category/\344\275\216\347\240\201kit/index.html" index 90acfb2d6..d06cc7c3d 100644 --- "a/docs/category/\344\275\216\347\240\201kit/index.html" +++ "b/docs/category/\344\275\216\347\240\201kit/index.html" @@ -5,13 +5,13 @@ 低码Kit | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/docs/category/\345\206\205\346\240\270kit/index.html" "b/docs/category/\345\206\205\346\240\270kit/index.html" index 51400abb9..9c0ad71e3 100644 --- "a/docs/category/\345\206\205\346\240\270kit/index.html" +++ "b/docs/category/\345\206\205\346\240\270kit/index.html" @@ -5,13 +5,13 @@ 内核Kit | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/docs/category/\345\272\225\345\272\247kit/index.html" "b/docs/category/\345\272\225\345\272\247kit/index.html" index 453475d2e..6337c75c1 100644 --- "a/docs/category/\345\272\225\345\272\247kit/index.html" +++ "b/docs/category/\345\272\225\345\272\247kit/index.html" @@ -5,13 +5,13 @@ 底座Kit | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" index a29311a1e..9a236ebfc 100644 --- "a/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ "b/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" @@ -5,13 +5,13 @@ 快速开始 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/docs/category/\346\246\202\345\277\265/index.html" "b/docs/category/\346\246\202\345\277\265/index.html" index 6d5e46f54..236687f26 100644 --- "a/docs/category/\346\246\202\345\277\265/index.html" +++ "b/docs/category/\346\246\202\345\277\265/index.html" @@ -5,13 +5,13 @@ 概念 | Framework as a Building Block for Kubernetes - +
    - + \ No newline at end of file diff --git "a/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" "b/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" index 798450881..016a9685a 100644 --- "a/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" +++ "b/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" @@ -5,13 +5,13 @@ 用户指南 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" "b/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" index d673f9d14..56d7bb915 100644 --- "a/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" +++ "b/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" @@ -5,13 +5,13 @@ 组件市场 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" "b/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" index 7b7564c79..a2a57542a 100644 --- "a/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" +++ "b/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" @@ -5,13 +5,13 @@ 组件开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" "b/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" index 6d50170d1..37306beae 100644 --- "a/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" +++ "b/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" @@ -5,13 +5,13 @@ 自定义配置 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/docs/component-market/blockchain/index.html b/docs/component-market/blockchain/index.html index aded1b4f3..23450e1eb 100644 --- a/docs/component-market/blockchain/index.html +++ b/docs/component-market/blockchain/index.html @@ -5,13 +5,13 @@ 区块链 - 联盟链 | Framework as a Building Block for Kubernetes - +

    区块链 - 联盟链

    区块链组件目前还没有完成组件化,但是依然可以基于底座Kit进行部署,详细参考 bestchains 平台安装

    • 注意修改 scripts/e2e.sh 中国呢 ingressNode,kubeProxyNode 的配置
    • 部署后即可在管理平台看到区块链相关菜单及功能

    bestchains 的详细使用,请参考帮助文档

    - + \ No newline at end of file diff --git a/docs/component-market/kubedashboard/index.html b/docs/component-market/kubedashboard/index.html index 96ea90525..7f350d7c3 100644 --- a/docs/component-market/kubedashboard/index.html +++ b/docs/component-market/kubedashboard/index.html @@ -5,13 +5,13 @@ kubedashboard 管理工具 | Framework as a Building Block for Kubernetes - +

    kubedashboard 管理工具

    这里介绍一下如何基于 kubebb 的底座来部署 Kubernetes Dashboard,并使用统一的 OIDC 认证、单点登录及原生 Kubernetes RBAC 认证。

    • 工具代码地址:kubernetes dashboard

    • Prerequisite

      • 部署 kubebb 的底座组件 u4a-component,提供基于 K8S 构建的基本的账号、认证、权限和审计功能。
      • 获取部署组件
      $ git clone https://github.com/kubebb/addon-components.git
      # 进入对应目录
      $ cd kube-dashboard

    安装 Kubernetes dashboard

    1. 编辑 values.yaml,按需要替换镜像地址,替换 <replaced- 开头的的属性值,其中 OIDC 的相关信息可以在 configmap中获取。
    # 获取 OIDC client 相关配置
    kubectl edit cm oidc-server -n u4a-system
    # 记录 issuer,staticClients 的 id、secret 的值

    修改 values.yaml

    dashboard:
    dashboardImage: hub.tenxcloud.com/addon_system/kube-dashboard:v2.7.0
    proxyImage: hub.tenxcloud.com/addon_system/keycloak-gatekeeper:latest
    metricsImage: hub.tenxcloud.com/addon_system/kube-dashboard-metrics-scraper:v1.0.8

    ingress:
    class: portal-ingress
    host: kubedashboard.<replaced-ingress-nginx-ip>.nip.io

    # You must check and update the value of each variable below
    kubeOidcProxy:
    issuerUrl: <replaced-issuer-url> # https://portal.172.22.96.209.nip.io/oidc
    clientId: <replaced-client-id>
    clientSecret: <replaced-client-secret>
    usernameClaim: preferred_username
    groupClaim: groups
    hostConfig:
    enabled: true
    hostAliases:
    - hostnames:
    # MUST update this value
    - portal.<replaced-ingress-nginx-ip>.nip.io
    ip: <replaced-ingress-nginx-ip>
    1. 运行 helm 安装插件
    # 如果需要,创建单独的 namespace 来部署此插件,比如 addon-system
    kubectl create ns addon-system
    # 部署 kube-dashboard 插件
    helm install kube-dashboard -n addon-system .
    • Note: 此时,dashboard 的 pod 会处于 ContainerCreating 的状态,这是因为缺少所依赖的配置文件,接下来我们需要准备此配置文件。
    1. 创建 kube-dashboard 所需要的 kubeconfig 文件, 以便 kube-dashboard 可以使用统一的 kube-oidc-proxy 进行认证,同时,也需要在配置中使用正确的证书和连接 token 信息
    # copy the kubeconfig template
    $ cp sample-kubeconfig kubeconfig
    # edit kubeconfig file to use the correct cluster.certificate-authority-data, cluster.server, user.token

    # Step 1
    $ export CLUSTER_CA=$(kubectl get secret -n u4a-system oidc-server-root-secret -o jsonpath='{.data.ca\.crt}')
    $ use the value from $CLUSTER_CA to replace cluster.certificate-authority-data(<certificate-authority-data>) in kubeconfig file

    # Step 2
    $ export USER_TOKEN_NAME=$(kubectl -n addon-system get serviceaccount kubernetes-dashboard -o=jsonpath='{.secrets[0].name}')
    $ export USER_TOKEN_VALUE=$(kubectl -n addon-system get secret/${USER_TOKEN_NAME} -o=go-template='{{.data.token}}' | base64 --decode)
    # use the value from $USER_TOKEN_VALUE to replace user.token(<user-token>) in kubeconfig file

    # Step 3 replace cluster.server(<cluster-server>) with the address of kube-oidc-proxy

    # Step 4 create the configmap
    $ kubectl create cm dashboard-kubeconfig --from-file=kubeconfig -n addon-system
    1. 重启 kube-dashboard
    $ kubectl delete pod -n addon-system $(kubectl  get pod -n addon-system | grep kubernetes-dashboard | awk '{print $1}')
    1. 在 OIDC 服务中添加 kube-dashboard 的 callback 地址.
    $ kubectl edit cm oidc-server -n u4a-system
    # find redirectURIs and add a new redirect url 'https://<kubedashboard-host-name>/oauth/callback'
    1. 使用 kubedashboard.<replaced-ingress-nginx-ip>.nip.io 地址访问 kube-dashboard. 如果处于未登录状态,会被重定向到统一的认证服务进行登录,成功登录后即刻携带授权的 token 回到 kube-dashboard 进行正常使用了。

    2. 可以通过给登录的用户授权不同的 RBAC 策略,来验证是否只有授权的资源才能被该用户访问。

    Uninstall

    通过 helm uninstall 来卸载该插件

    helm uninstall kube-dashboard -n addon-system
    - + \ No newline at end of file diff --git a/docs/component-market/kubelogin/index.html b/docs/component-market/kubelogin/index.html index 38f83aa68..68f7dbb90 100644 --- a/docs/component-market/kubelogin/index.html +++ b/docs/component-market/kubelogin/index.html @@ -5,14 +5,14 @@ 使用 kubelogin 工具 | Framework as a Building Block for Kubernetes - +

    使用 kubelogin 工具

    Here is the steps about how to install kubelogin to integrate with OIDC server for kubectl tool, so you can do authentication with Kubernetes.

    • Refer to kubelogin for details.
    • Prerequisite Install u4a-component and it'll provide the account, authentication, authorization and audit funcationality built on Kubernetes.

    Install kubelogin

    Get the binary here download and download the one matching your OS.

    Then you need to put the kubelogin binary on your path under the name kubectl-oidc_login so that the kubectl plugin mechanism can find it when you invoke kubectl oidc-login.

    Prepare kubeconfig file

    1. Backup your original config file under ~/.kube/config and create a new one.
    $ cd ~/.kube
    $ cp config config_backup
    $ kubectl config set-credentials oidc \
    --exec-api-version=client.authentication.k8s.io/v1beta1 \
    --exec-command=kubectl \
    --exec-arg=oidc-login \
    --exec-arg=get-token \
    --exec-arg=--oidc-extra-scope=email \
    --exec-arg=--oidc-extra-scope=profile \
    --exec-arg=--oidc-issuer-url=https://portal.172.22.96.209.nip.io/oidc \
    --exec-arg=--oidc-client-id=bff-client \
    --exec-arg=--oidc-client-secret=61324af0-1234-4f61-b110-ef57013267d6 \
    --exec-arg=--insecure-skip-tls-verify
    1. Point the cluster to kube-oidc-server or k8s-apiserver if oidc is enabled.
    - cluster:
    certificate-authority-data: ....
    server: https://172.22.96.133 # Update this value
    name: cluster-name
    1. Add http://localhost:8000 as a valid redirect URL of your OIDC server, so it can redirect to local server after successful login.

    2. Switch current context to oidc

    $ kubectl config set-context --current --user=oidc

    Run kubectl get nodes, kubectl executes kubelogin before calling the Kubernetes APIs. Kubelogin automatically opens the browser, and you can log in to the provider.

    After successful login, you'll get a Authenticated response.

    1. If you get Unable to connect to the server: x509: certificate signed by unknown authority error after kubectl get nodes. Remove certificate-authority-data, and add insecure-skip-tls-verify as true.
    - cluster:
    # certificate-authority-data: ....
    server: https://172.22.96.133
    insecure-skip-tls-verify: true # Add it here
    name: cluster-name

    You can also use a valid certificate data, for example:

    export CLUSTER_CA=$(kubectl get secret -n u4a-system oidc-proxy-cert-tls -o jsonpath='{.data.ca\.crt}')
    # Use the data from CLUSTER_CA and set to certificate-authority-data

    Then you can run any kubectl using the logged in user, Kubernetes RBAC and audit will take effect for the user.

    Get id token from cached file

    The id_token will be cached in ~/.kube/cache/oidc-login/\<cahced-file>, you can use cat to get the content and token from this file. For example:

    {"id_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6IjBkMzEyM2U1MWIxN2IzZTNlNDYzNjgxZTMzZTFkOTNkM2RiY2IwZDkifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc0MzU3OTU0LCJpYXQiOjE2NzQyNzE1NTQsIm5vbmNlIjoiVHhJVlE4VlFINW9PTGtLeGV1ekk3VWp3VVU0WUYyOEQ1N18xLWVpVWEtVSIsImF0X2hhc2giOiJOamZKZWJ1Ry1uUlVlWDJNY2dfZzVRIiwiY19oYXNoIjoiQWVQdUtsTmN5RjgyTy1xWFFqUzEwdyIsImVtYWlsIjoiYWRtaW5AdGVueGNsb3VkLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJuYW1lIjoiYWRtaW4iLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJhZG1pbiIsInBob25lIjoiIiwidXNlcmlkIjoiYWRtaW4ifQ.YtmRZbS7-B0s0vVh9myH1FYcWuKoKNNYkPZQ5asbrQE2n8eC7w74n8D7pvM6v44kvBnp27hNOeo06EK4leNR2Inb2UJtd2QBS1L9i4A3V_vm06o4DkvqfyNcbD7-hL6ES0XkzIKimG3WMMJIznvuA71W_88t77U7jC7wvtKbT7k1KZWgOV6VappWlz7uecuBSQahoCku5AO-s25H1O-FbodOYtL8-ju0sqiHrgmbNaV-f6Wuvvk9XkquAe_dztqWCJ0axfUW7u4J-M947mlR1JlWwbhm-nQXgvugyMVh3FjFOjwi7jR3BA3Me-iuS_XPNSWx-DB0dfsCfErCJ9DvBA"}

    Get id token using username/password

    1. Enable passwordConnector in the oidc-server configuration
    # kubectl edit cm oidc-server -n u4a-system
    oauth2:
    # Enable this one
    passwordConnector: k8scrd
    skipApprovalScreen: true
    1. Get id token using kubelogin or curl
    • kubelogin
    kubelogin get-token --oidc-issuer-url=https://portal.172.22.96.209.nip.io/oidc --oidc-client-id=bff-client --oidc-client-secret=61324af0-1234-4f61-b110-ef57013267d6 --insecure-skip-tls-verify --grant-type=password --username=admin --password=admiN\$123

    # here is the response, get the token from the json
    {"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{"interactive":false},"status":{"expirationTimestamp":"2023-02-11T04:37:32Z","token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImY2ZjFjMjFkNzFhOGEyYmU3ZTg2YjQyYWIwOTYwY2MxNzU3NjdiM2MifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc2MDkwMjUyLCJpYXQiOjE2NzYwMDM4NTIsImF0X2hhc2giOiJyLWtsUnBQcEd3U0I5TFQyelVQSWtRIiwicGhvbmUiOiIifQ.tFOmGN1w79I_s5pWZZK4zEEHwCyuJRwkNtacmxVcCY-Jms-JOzXUJTxnNm8XzIBC3cZqt5U6oNXMuk68MHq0v3g2tQKJeAwV1aojJrIIp5QHefXMUjl_hTaFe1tRgwsvZqBWhExLi1yaTSUfjmP_SZEb23A0R_AWvc7ClO7sbuKQlkPG_gi2TPCBOeTx0UmlQ14w6U3dIJhR57hXfttdSr2nRqKma8bp_jAiUiWaKLSWSyc3tQsxWl7LeAAbRM3URx-3winVIEPEpUgwIBXnrr-ba9BZwZoD5NGytOGw4xA80eGDmmMIG8U2QarJKsZErpzS7EWbiPBLgS2_Wg1eBA"}}
    • curl
    curl -k -XPOST https://portal.172.22.96.209.nip.io/oidc/token -H "Authorization: Basic <base64 of oidc-client-id:oidc-client-secret>" -H "Content-Type: application/x-www-form-urlencoded"  --data 'grant_type=password&password=password&scope=openid&username=username'

    # here is the response, and get id_token from the json
    {"access_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImY2ZjFjMjFkNzFhOGEyYmU3ZTg2YjQyYWIwOTYwY2MxNzU3NjdiM2MifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc2MDkyOTk0LCJpYXQiOjE2NzYwMDY1OTQsImF0X2hhc2giOiJtM2xsQWprUXdlQnhJbUJlQkgxRG1RIiwicGhvbmUiOiIifQ.iel5l_mzlVf2LjbMqzqXb3sqb7L195a-fY4Aaon2_CVn1lBMzOf2qDYbtVF3KhGHxNlaKRxig63uCDfyts84BMD5-Uaz_x4_mq5QaMVYVYEUw9NWsLP-jQ0bTSZE7MZKlxz_a3AGW_fXwW0Y02dqemugBfC3IagBhroYI2PSTKcNCCQz2aao-ZSQ5-rysKSyo0VPDtcY_K8ikpDChLM9GhUKzbdIvctO6mGBOOKHRkiCAbRegOCFhJ6-0O4k6b-m3rXyJkQAIBfesOPIAFxhQQhg3y9wDEVxbBTZ99fwfvfIuSxN_vsITKCsqpRr7t-30jqReIKsYktyzZ15jiJhKg","token_type":"bearer","expires_in":86399,"id_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImY2ZjFjMjFkNzFhOGEyYmU3ZTg2YjQyYWIwOTYwY2MxNzU3NjdiM2MifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc2MDkyOTk0LCJpYXQiOjE2NzYwMDY1OTQsImF0X2hhc2giOiJRT3NNWGdSeDRYaUJJTVZwSElXeUlRIiwicGhvbmUiOiIifQ.ZDU7AouftfpLAs2SDE3Kb86ggVyDEwrgA3jtUxitKUQwKqosjWiaEEGc3w824FAC3eDZhFr1w5uXT6R30O2s0DPzPb0nesDN8wa2ZscU9ESjZrKAAgpgM7uE1vU41mi7GfdZEUHabx83XFvu69KvmA9OKnqaSdyi3-aPYHyBP5GfNYoQ-mteCBsAbRF8l6fe1VREIYV3sQrBC8b9s1Ony4F8YFWgFE4G_1gxV-0qz8IxgzhLGUgehuwsHTUjMLvyGgTiFrFvrPsftEuEGtOQbKswngWQGlYWSsUIWb79Fdk_-wD08fyM9YUGJyb0Bg_HO2M95CFsSASB4HDO4QHOXw"}

    Logout

    You can remove the local cache files under ~/.kube/cache/oidc-login/<cached-file> to logout current user.

    - + \ No newline at end of file diff --git a/docs/component-market/logging/index.html b/docs/component-market/logging/index.html index c4bf3b985..70080dab6 100644 --- a/docs/component-market/logging/index.html +++ b/docs/component-market/logging/index.html @@ -5,7 +5,7 @@ 日志组件 | Framework as a Building Block for Kubernetes - + @@ -18,7 +18,7 @@ hub.tenxcloud.com/system_containers/elasticsearch 7.10.1-ik 3bf941c09b95 8 months ago 963MB hub.tenxcloud.com/system_containers/kubectl v1.20.8 403754878e80 3 months ago 112MB hub.tenxcloud.com/system_containers/fluentd-elk v5.0-kfk 63cd90e77b9c 18 months ago 347MB

    调整values.yml

    • .Values.rbacSidecar.enabled: false
    • .Values.elasticsearch.secure: false
    • .Values.ingress.enabled true

    访问 elasticsearch

    1. 获取ingress信息
    ES_HOST=$(kubectl get ingress ingress-es -n addon-system | grep ingress-es | awk '{print $3}')
    INGRESS_IP=$(kubectl get ingress ingress-es -n addon-system | grep ingress-es | awk '{print $4}')
    1. 访问elasticsearch
    curl http://$INGRESS_IP -H "Host: $ES_HOST"

    会返回如下提示信息,说明日志服务正常启动:

    {
    "name": "es-allinone-es-0",
    "cluster_name": "es",
    "cluster_uuid": "ATBDAzVHQeSDb7gaKdgNUw",
    "version": {
    "number": "7.10.1",
    "build_flavor": "default",
    "build_type": "tar",
    "build_hash": "7a15d2a",
    "build_date": "2020-08-12T07:27:20.804867Z",
    "build_snapshot": false,
    "lucene_version": "7.7.3",
    "minimum_wire_compatibility_version": "5.6.0",
    "minimum_index_compatibility_version": "5.0.0"
    },
    "tagline": "You Know, for Search"
    }
    - + \ No newline at end of file diff --git a/docs/component-market/monitoring/index.html b/docs/component-market/monitoring/index.html index b6df42267..e4af412e2 100644 --- a/docs/component-market/monitoring/index.html +++ b/docs/component-market/monitoring/index.html @@ -5,7 +5,7 @@ 监控组件 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ 各个组件的作用如下:

    • node-exporter:采集到主机的运行指标如 CPU、内存、磁盘等信息;
    • victoriametrics:一个快速高效、经济并且可扩展的监控解决方案和时序数据库,负责监控数据的抓取、存储、查询,并可以根据告警规则触发告警;
    • metrics-server: Kubernetes 集群核心监控数据的聚合器,定时从 Kubelet 的 Summary API 采集指标信息),可以通过 Metrics API 的形式获取 Metrics 数据;
    • kube-state-metrics:采集 deployment,Pod、daemonset、cronjob 等 k8s 资源对象的监控数据,提供监控指标;
    • grafana:一个可视化工具,它提供了强大和优雅的方式去创建、共享、浏览数据,并提供了很多漂亮的模板,当需要直接查看监控数据时候,可以装上;
    • monitoring-operator: 负责管理上述监控组件.

    安装步骤

    前置条件

    • 如果监控组件 vmselect 如果开启了 sidecar,kube-rbac-proxy 支持 OIDC,则需要提前部署好 OIDC 相关的内容,可以通过执行

      kubectl  get pod -n u4a-system

      查看是否有 oidc-server,检查相关组件是否已经安装好;

    • 如果需要使用 ingress,则需要提前部署好 ingress-controller;

    • vmstorage 需要进行数据持久化,需要提前准备好 StorageClass;

    • 创建好 Group observability,该组具有访问监控数据的权限;

    1.准备镜像,push 到对应环境的 harbor 仓库

    • 需要以下镜像
    # 主要入口 operator
    hub.tenxcloud.com/kubebb/monitoring-operator:v0.1.2

    # vm 社区相关镜像
    hub.tenxcloud.com/kubebb/vm-operator:v0.35.1
    hub.tenxcloud.com/kubebb/vminsert:v1.91.3-cluster
    hub.tenxcloud.com/kubebb/vmstorage:v1.91.3-cluster
    hub.tenxcloud.com/kubebb/vmselect:v1.91.3-cluster
    hub.tenxcloud.com/kubebb/vmagent:v1.91.3
    hub.tenxcloud.com/kubebb/vmalert:v1.91.3

    # 其他依赖镜像
    hub.tenxcloud.com/kubebb/kube-rbac-proxy:v0.13.0-32f11472
    hub.tenxcloud.com/kubebb/node-exporter:v2.5.0
    hub.tenxcloud.com/kubebb/configmap-reload:v0.3.0
    hub.tenxcloud.com/kubebb/prometheus-config-reloader:v0.58.0
    hub.tenxcloud.com/kubebb/prom-rule-reloader:v0.1.2
    hub.tenxcloud.com/kubebb/alertmanager:v0.20.0
    hub.tenxcloud.com/kubebb/kube-state-metrics:v1.9.7 (选装)
    hub.tenxcloud.com/kubebb/metrics-server:v0.4.1 (选装)
    hub.tenxcloud.com/kubebb/grafana:10.0.2(选装)

    2.获取 helm 包,并解压

    tar zxvf monitoring-operator-0.1.0.tgz
    cd monitoring-operator

    3.修改 charts 包的 values.yaml

    参照 values.yaml 里面的注释,主要有以下内容需要修改:

    • 根据实际环境,修改镜像地址;
    • 带有 enabled 的是可以控制改组件是否可以启用,false 则不安装,true 会安装,没有 enabled 参数会默认装上;
    • 如果开启 nodePort,先检查端口是否被占用,不使用设置为 0 即可;
    • 如果开启 ingress,需要修改 ingress 资源的注解,注解 key 是 kubernetes.io/ingress.class。注解的值可以查看 ingress-controller 的 deploy 里面的 args 参数,如
      kubeclt  edit  deploy -n kube-system ingress-urygcdmyts
      取 args 里面的值- --ingress-class=nginx-ingress-urygcdmyts,nginx-ingress-urygcdmyts 就是要填入注解的值;

    4.创建 namesapce

    kubectl --as=admin  --as-group=iam.tenxcloud.com create -f - <<EOF 
    apiVersion: v1
    kind: Namespace
    metadata:
    labels:
    capsule.clastix.io/tenant: system-tenant
    name: addon-system
    EOF
    • 如果创建 ns 前就存在,可以之前部署过监控,为了确保后续不会报错,先清除旧的 addon-system 下的资源,并删除 vm 相关的 crd,查找 vm 的 crd 命令是 kubectl get crd | grep victoriametrics.com

    5.生成 ca 证书(只有 vmselect 开启了 sidecar,支持 oidc 参数时需要)

    kube-rbac-proxy 支持 OIDC,args 需要设置参数 oidc-issuer、oidc-clientID、oidc-ca-file,若 oidc-server 部署在 u4a-system 下,可以这样去获取相关的参数,供参考:

    • 生成证书:
    kubectl get secret -n u4a-system  oidc-server-root-secret  -oyaml > oidc-sidecar-secret.yaml

    修改 yaml 的 namesapce 为 addon-system,创建一个新的 secret

    kubectl create -f oidc-sidecar-secret.yaml
    • oidcIssuer,oidcClientID 参数的获取
    kubectl  get cm -n u4a-system   oidc-server -o yaml

    oidcIssuer 取其中的 issuer 的内容即可,比如:https://oidc.192.168.90.217.nip.io

    oidcClientID 取其中的 staticClients 下的 id 内容即可,比如 bff-client

    6.helm install

    • 执行 helm 命令,monitoring-operator 是应用的名称,根据实际需要修改
    helm install monitoring-operator -n addon-system ./

    7.检查组件是否运行成功

    kubectl get po -n addon-system

    检查的 Pod 是否正常运行;

    8.功能验证

    • 部署成功后,可以通过 ingress 地址去方式访问数据,查看 ingress 的 hosts 地址命令如下:

      kubectl  -n addon-system get ingress

      如果 vmselect 开启了 nodePort,那么也可以通过主机 IP:nodePort 的方式去访问监控数据

    • 将用户加入组 observability,该组具有访问监控数据的权限,获取用户 token,访问监控数据带上 token,验证权限,没有权限则出现 Unauthorized; 请求命令参考:

      curl -k "monitoring.192.168.90.217.nip.io/select/0/prometheus/api/v1/query" -d "query=up" -H"Authorization: bearer eyJhbGciOi..."
    - + \ No newline at end of file diff --git a/docs/contribute/index.html b/docs/contribute/index.html index 8f0e4ee33..6997a1bf4 100644 --- a/docs/contribute/index.html +++ b/docs/contribute/index.html @@ -5,7 +5,7 @@ 贡献指南 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ 在仓库主页面的左手边。然后你就可以在你的 GitHub 用户名中看到你的仓库了。

  • Clone 你自己的仓库到本地来开发。使用 git clone https://github.com/<your-username>/<your-project>.git 来克隆代码仓库到你的本地机器。然后你可以创建新的分支来完成你想做的改动。

  • 设置远程上游 上游设置为 https://github.com/kubebb/<project>.git 例如:

    git remote add upstream https://github.com/kubebb/core.git
    git remote set-url --push upstream no-pushing

    增加 upstream ,我们可以轻松地将本地分支与上游分支同步。

  • 创建一个分支 添加新特性或者修复问题。 更新本地工作目录:

    cd <project>
    git fetch upstream
    git checkout main
    git rebase upstream/main

    创建新的分支:

    git checkout -b <new-branch>

    在新的分支改动后可以构建和测试你的代码。

  • PR规范

    通过创建 PR 是对 KubeBB 项目的文件进行修改的唯一途径.

    git commit --signoff -m "description of this PR"

    为了帮助 reviewer 更好地理解你创建的 PR 的目的,PR描述需符合如下规范:

    <type>: <description>

    [optional body]

    其中,type 种类包括:

    • feat - 引入了新功能
    • fix - 修复了一个错误
    • chore - 与修复或功能无关的更改,不修改源代码或测试文件(例如更新依赖项)
    • refactor - 重构的代码,既不修复错误也不添加功能
    • docs - 更新文档,如README或其他markdown文件
    • style - 不影响代码含义的更改,通常与代码格式相关,如空格、缺少分号等。
    • test - 包括新的测试或更正之前的测试
    • perf - 性能改进
    • ci - 与持续集成相关
    • build - 影响构建系统或外部依赖项的更改
    • revert - 撤销先前的提交

    如果PR为对应解决某个 issue ,必须在PR出,添加 Fix: #1 #2 ,如下图:

    pr_example

    代码规范

    参与帮助任何事情

    我们选择 GitHub 作为 KubeBB 合作的主要场所。所以 KubeBB 的最新更新总是在这里。尽管通过 PR 的贡献是一种明确的帮助方式,我们仍然呼吁任何其他方式:

    • 在 issue 中回复其他人的问题
    • 帮助解决其他人的问题
    • 帮助 review 其他人的 PR
    • 参与讨论
    • 写技术博客
    • 等等

    加入社区

    如果您想成为 KubeBB GitHub 组织的成员,请参考下面的介绍:

    加入 KubeBB Github 组织

    在要求加入社区之前,我们要求你先做少量的贡献,以证明你有继续为 KubeBB 贡献的意愿。

    • 注意 任何人都可以为 KubeBB 做出贡献,加入 KubeBB Github 组织并不是一个强制性的步骤。

    有很多方法可以为 KubeBB 做出贡献:

    • 提交 PR
    • 报告错误或提供反馈
    • 回答 GitHub 上的问题

    提出您的申请

    • 在 KubeBB 仓库中创建一个 issue,并尽可能罗列您所做的全部工作。
    • 请 AT 2 个现有的 reviewer 以获取同意。
    • 请求被批准后,管理员将向你发出邀请。
      • 这是一个手动过程,通常每周运行几次。
      • 如果一个星期过去了,没有收到邀请,请通过邮件或者钉钉联系我们。
    - + \ No newline at end of file diff --git a/docs/core/concepts/buildingbase_resources/index.html b/docs/core/concepts/buildingbase_resources/index.html index f08175457..c1f0990b4 100644 --- a/docs/core/concepts/buildingbase_resources/index.html +++ b/docs/core/concepts/buildingbase_resources/index.html @@ -5,13 +5,13 @@ 扩展资源(底座) | Framework as a Building Block for Kubernetes - +

    扩展资源(底座)

    底座扩展资源用于适配底座的门户服务,不涉及核心的组件生命周期管理。目前主要有两种:

    • Menu: 门户菜单
    • Portal: 门户路由

    菜单是底座门户资源,用于与微前端页面结合,实现可定制化的门户菜单。

    定义

    代码定义位于 Menus,详细介绍如下:

    提示

    说明 对于下面的 yaml,我们想要访问 bar 字段,书写格式为 spec.foo.bar

    spec:
    foo:
    bar: xx
    • spec.id: 菜单组ID
    • spec.text: 菜单中文名称
    • spec.textEn: 菜单英文名称
    • spec.column: 菜单组所在列序号
    • spec.rankingInColumn: 菜单在当前组中的排序,数字越小越靠前
    • spec.icon: 菜单图标
    • spec.replaceSiderBackNextPathnamePattern: 给替换菜单的返回按钮使用,当新的 pathname 是替换菜单,且替换菜单的返回按钮需要返回到当前 pathname 时,配置此属性
    • spec.pathname: 菜单路由
    • spec.redirect: 跳转菜单路由,优先级高于 pathname,指定后点击菜单会跳转到 redirect 相应路由
    • spec.target: 同 a 标签的 target 属性
    • spec.requiredRoles: 菜单可见需要的角色
    • spec.requiredModuleBits: 菜单可对应的 module 二进制位 (有一个满足即可见)
    • spec.tenant: 菜单对应路由是否可以切换租户
    • spec.project: 菜单对应路由是否可以切换项目
    • spec.cluster: 菜单对应路由是否可以切换集群
    • spec.isRenderSelectCurrent: 是否渲染选择项目、集群
    • spec.useChildrenReplaceSider: 是否在进入子页面后将 sider 替换
    • spec.getTitleForReplaceSider: 获取 title 的函数
    • spec.parent: 父菜单 ID
    • spec.parentOwnerReferences: 父菜单依赖
    • spec.disabled: menu 显示控制

    门户路由

    门户路由是底座门户资源,用于配置访问路径。

    定义

    代码定义位于 Menus,详细介绍如下:

    提示

    说明 对于下面的 yaml,我们想要访问 bar 字段,书写格式为 spec.foo.bar

    spec:
    foo:
    bar: xx
    • spec.path: 请求的访问路径
    • spec.entry: 静态资源的访问路径
    - + \ No newline at end of file diff --git a/docs/core/concepts/component/index.html b/docs/core/concepts/component/index.html index e493c65fc..833ea5df1 100644 --- a/docs/core/concepts/component/index.html +++ b/docs/core/concepts/component/index.html @@ -5,13 +5,13 @@ 组件 | Framework as a Building Block for Kubernetes - +

    组件

    组件是将 chart package 映射为集群资源的一个概念,组件定义了 chart package 的基础描述信息,版本信息等。组件一般由仓库创建出来,无需手动创建。

    定义

    CRD 的代码定义位于 ComponentTypes。组件的信息都定在 status 中, 接下来会详细介绍每个字段的含义及其作用。

    • status.name

      该字段用来保存 chart package 的名字,必须符合kubernetes的命名规范。

    • status.displayName

      该字段用来保存 chart package 的展示名,该字段内容为组件最新版本的注解core.kubebb.k8s.com.cn/displayname,允许为空。

    • status.versions

      该字段是数组,用来保存 chart package 的多个版本。每个版本包含的信息如下

      • status.versions[index].appVersion 定义 chart packge 里面的应用的版本信息。
      • status.versions[index].annotations 定义该版本的注解信息,如组件的展示名。
      • status.versions[index].createdAt 创建时间
      • status.versions[index].updatedAt 更新时间
      • status.versions[index].deprecated 当前版本是否废弃
      • status.versions[index].version chart package 的版本信息
      • status.versions[index].digest 数字签名
    • status.description

      chart package 的描述信息

    • status.maintainers

      该字段是数组类型,每一项都是 chart package 的维护者。每一项的包含的信息如下

      • status.maintainers[index].name 维护者名字
      • status.maintainers[index].email 维护者的邮箱
      • status.maintainers[index].url 维护者的网站
    • status.home

      组件的官网。

    • status.soureces

      该字段是字符串数组类型,定义组件代码仓库。

    • status.keywords

      该字段是字符串数组类型,定义与该组件关联的关键词。

    • status.icon

      定义该组件的图标

    • status.deprecated

      定义当前组件是否废弃

    工作原理

    仓库Watcher同步仓库服务的组件列表,并创建/更新组件。因此:

    • 组件不建议主动创建,应该都通过组件仓库对应的Watcher自动同步获得
    • 同属一个仓库的组件可通过kubebb.component.repository=<repository-name>检索
    - + \ No newline at end of file diff --git a/docs/core/concepts/componentplan/index.html b/docs/core/concepts/componentplan/index.html index 1e7418079..e1b41f796 100644 --- a/docs/core/concepts/componentplan/index.html +++ b/docs/core/concepts/componentplan/index.html @@ -5,7 +5,7 @@ 组件部署 | Framework as a Building Block for Kubernetes - + @@ -17,7 +17,7 @@ 字段中,类似于先进行 helm install/upgrade --dry-run 后,将生成的 manifest 再进行 kubectl diff 操作,
  • 只有 ComponentPlan 中的 spec.approvedtrue,对应的 helm release 才会真正安装。
  • 单个 ComponentPlan 的镜像替换 (即 spec.override.images 字段)的规则遵循 kustomize:ImageTagTransformer 规范,代码实现也是直接调用了 kustomize 的这部分代码,降低了用户学习成本,保证了代码的兼容性和有效性。
  • 单个 ComponentPlan 的镜像替换和整个 Repository 的镜像替换,都是通过 Helm:post-rendering 技术实现的。
  • 镜像覆盖策略

    image-changed

    ComponentPlan 和 Helm release 的关系

    componentplan

    - + \ No newline at end of file diff --git a/docs/core/concepts/rating/index.html b/docs/core/concepts/rating/index.html index a1c831249..11d3cb44e 100644 --- a/docs/core/concepts/rating/index.html +++ b/docs/core/concepts/rating/index.html @@ -5,7 +5,7 @@ 组件评级 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ ClusterRole 定义了所有的 Task 在运行时需要的权限,有新的权限需求,直接更新这个 ClusterRole 即可。

    1. ServiceAccount

    名为 kubebb-system.kubebb-rating。这个 serviceaccount 会在每个 Repository 的 namespace 下都创建一个。

    1. ClusterRoleBinding

    名为 kubebb-system.kubebb-rating,将 serviceaccount kubebb-system.kubebb-rating 与 clusterrole kubebb-system.kubebb-rating 绑定。

    kubectl get clusterrole,clusterrolebinding kubebb-system.kubebb-rating 
    NAME CREATED AT
    clusterrole.rbac.authorization.k8s.io/kubebb-system.kubebb-rating 2023-08-21T09:24:12Z

    NAME ROLE AGE
    clusterrolebinding.rbac.authorization.k8s.io/kubebb-system.kubebb-rating ClusterRole/kubebb-system.kubebb-rating 8m8s
    1. Pipeline, Task

    一个名为 kubebb 的 pipeline 和 名字分别为 kubebb-rback, kubebb-helm-lintTaskTask 定义了要执行的任务的具体动作,而 Pipeline 则是定义了要执行哪些 Task 以及这些 Task 之间的执行顺序等。

    kubectl get pipeline -nkubebb-system
    NAME AGE
    kubebb 4m19s

    kubectl get task -nkubebb-system
    NAME AGE
    kubebb-helm-lint 4m25s
    kubebb-rback 4m25s

    支持用户自己定义 TaskPipeline, 但是需要讲这些资源放到与 operator 相同的 namespace 下。 ClusterRole, ClusterRoleBinding, ServiceAccount 是给 pipelinerun 在执行 Task 用的,避免 Task 因为权限不足而失败。

    核心逻辑

    1. Rating 创建时

    Rating 添加两个标签 rating.component=<component-name>, rating.repository=<repository-name> 用来记录与当前 Rating 关联的组件和仓库的名字。

    1. Rating 更新时

    目前对于 spec, status 的更新不做处理,只有当 metadata 发生变化的时候,才会进入处理逻辑。

    根据 spec 定义的 pipeline 列表,开始创建 PipelineRun, 同时 watch PipelineRun 的变化,将其状态,以及 Task, TaskRun 的信息同步到 Rating

    1. Rating 删除时

    Rating 被删除,他所创建的 PipelineRun 同样会被删除。

    使用

    一个 rating CR 的例子:

    # rating.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Rating
    metadata:
    name: rating
    namespace: kubebb-system
    spec:
    componentName: kubebb.kubebb-core
    pipelineParams:
    - pipelineName: kubebb
    params:
    - name: URL
    value:
    type: string
    stringVal: https://github.com/kubebb/components/releases/download/kubebb-core-v0.1.10/kubebb-core-v0.1.10.tgz
    arrayVal: []
    objectVal: {}
    - name: COMPONENT_NAME
    value:
    stringVal: kubebb-core
    type: string
    - name: VERSION
    value:
    stringVal: v0.1.10
    type: string
    - name: REPOSITORY_NAME
    value:
    stringVal: kubebb
    type: string

    上述 yaml 所定义的 Rating 包含一个默认的 pipeline,并且将 pipeline 需要的参数传递进去。 执行上面的 yaml

    kubectl apply -f rating.yaml

    可以持续观察 Rating 的变化

    kubectl -nkubebb-system get rating -oyaml -w

    如果运行不成功,会在 status 中给出错误原因。 运行成功会得到如下的 status

    status:
    pipelineRuns:
    rating.kubebb:
    actualWeight: 2
    conditions:
    - lastTransitionTime: "2023-08-23T05:39:53Z"
    message: 'Tasks Completed: 2 (Failed: 0, Cancelled 0), Skipped: 0'
    reason: Succeeded
    status: "True"
    type: Succeeded
    expectWeight: 2
    pipelineName: kubebb
    tasks:
    - conditions:
    - lastTransitionTime: "2023-08-23T05:39:53Z"
    message: All Steps have completed executing
    reason: Succeeded
    status: "True"
    type: Succeeded
    name: kubebb-rback
    taskRunName: rating.kubebb-kubebb-rback
    - conditions:
    - lastTransitionTime: "2023-08-23T05:39:53Z"
    message: All Steps have completed executing
    reason: Succeeded
    status: "True"
    type: Succeeded
    name: kubebb-helm-lint
    taskRunName: rating.kubebb-kubebb-helm-lint

    如果想要知道每个 task 运行的输出日志,可以先得到 pod 列表,然后查看pod日志即可。

    kubectl get po -nkubebb-system
    NAME READY STATUS RESTARTS AGE
    kubebb-5dbf45964c-26jpp 1/1 Running 0 3m53s
    rating.kubebb-kubebb-helm-lint-pod 0/1 Completed 0 2m9s
    rating.kubebb-kubebb-rback-pod 0/1 Completed 0 2m9s
    - + \ No newline at end of file diff --git a/docs/core/concepts/repository/index.html b/docs/core/concepts/repository/index.html index 22a22f85c..60cb6f09b 100644 --- a/docs/core/concepts/repository/index.html +++ b/docs/core/concepts/repository/index.html @@ -5,7 +5,7 @@ 组件仓库 | Framework as a Building Block for Kubernetes - + @@ -17,7 +17,7 @@ 对 wordpress 的版本定义了多虑条件,精确匹配 16.1.14, 16.1.13 两个版本。 对仓库中所有来自 docker.io 的镜像,替换为 192.168.1.1 ,并将镜像路径为 library 的镜像替换为 system-container ,比如仓库中有镜像 docker.io/library/nginx:v1.2.3 会替换为 192.168.1.1/system-container/nginx:v1.2.3

    OCI 仓库的额外说明

    支持地址

    仓库支持使用 OCI 镜像仓库的地址,目前支持如下几种方式:

    • 原生 harbor 2.x 以上版本:
      • 项目纬度,将会尝试获取这个项目下所有镜像的所有 tag,例如:oci://demo.goharbor.io/helm-test (demo.goharbor.io 是 harbor 的在线测试服务器,该服务器数据每 2 天清空一次,详情见 docker 文档的相关说明)
      • 镜像维度,将会尝试获取这个镜像的所有 tag,例如:oci://demo.goharbor.io/helm-test/nginx
    • dockerhub:
      • 项目纬度,将会尝试获取这个项目下所有镜像的所有 tag,例如:oci://registry-1.docker.io/bitnamicharts
      • 镜像维度,将会尝试获取这个镜像的所有 tag,例如:oci://registry-1.docker.io/bitnamicharts/wordpress
    • github package
      • github package 组织维度:
        • 整个组织的地址,将会尝试获取该组织下所有镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example
        • 组织中单独上传的镜像地址,将会尝试获取该组织下所有镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example/redis
        • 组织中的仓库地址,将会尝试获取该仓库下所有镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example/helm-oci-example
        • 组织中某个仓库的镜像地址,将会尝试获取该镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example/helm-oci-example/nginx
      • github package 个人维度:
        • 该用户的地址,将会尝试获取该用户名下所有镜像的所有 tag,例如:oci://ghcr.io/abirdcfly
        • 用户单独上传的镜像地址,将会尝试获取该用户名下所有镜像的所有 tag,例如:oci://ghcr.io/abirdcfly/redis
        • 用户的仓库地址,将会尝试获取该仓库下所有镜像的所有 tag,例如:oci://ghcr.io/abirdcfly/helm-oci-example
        • 用户某个仓库的镜像地址,将会尝试获取该镜像的所有 tag,例如:oci://ghcr.io/abirdcfly/helm-oci-example/nginx

    限制

    1. 限于 github package API 的限制

    若要使用 REST API 管理 GitHub Packages,必须使用 personal access token (classic) 进行身份验证。 要访问包元数据,令牌必须包含 read:packages 范围。

    在使用 github package 作为 OCI 的存储地址时,需要提供一个 personal access token (classic) ,用户可以使用环境变量 GITHUB_PAT_TOKEN 来覆盖系统默认提供的 token。

    1. 限于 原生 harborgithub package 并没有在 API 返回详情中写明镜像的类型,所以从 API 返回结果中无法区分该 OCI 镜像是否存储的是 Helm 包还是普通 docker 镜像。因此将会一并尝试,可能会在日志或资源状态中看到错误。我们建议用户单独将某个 OCI 仓库用作 Helm 包存储。

    2. 目前还不支持私有仓库。将在后续版本支持。

    3. 因为工作原理不同,chartmuseum 类型的仓库会提供索引文件,而 OCI 仓库只能通过拉取具体压缩包并解析内容,OCI 仓库的获取要慢于 chartmuseum 仓库,并且各个存储地址都有自己的 API 请求速率限制。默认将 OCI 仓库的解析的并发数设置为 5,可以通过环境变量 OCI_PULL_WORKER 覆盖该设置,数字越大,解析并发数越多,也越可能遇到 429 Too Many Requests 错误。

    工作原理

    仓库以 Kubernetes Operator 方式实现。周期性的获取 chart repository 的数据,对集群中组件更新或者创建,一般不会删除组件,而是将在 chart repository 中不存在的组件标记为废弃

    1. Repository 创建,更新时

    创建或者更新 Repository 时,会检查该资源是否添加 finalizers, 以及 URL 变更历史是否正确更新。

    当所有的更新都处理完成后,将会启动 chartmuseum watcher,在每次获取到若干的 chart package 后,与集群中已经存在的 Component 对比,将会执行 新增 component, 更新 component

    我们不会删除已经创建的 Component, 而是在发现集群中存在 Component 且并不存在于当前的 chart repository 中,那么会将其标记为废弃。

    1. Repoistory 删除时

    Repository 创建出来的 Component 都会添加 OwnerReferences,删除 Repository 的时候会自动删除关联的 Component

    1. 镜像覆盖策略

    image-changed

    - + \ No newline at end of file diff --git a/docs/core/concepts/subscription/index.html b/docs/core/concepts/subscription/index.html index 9ac925069..2e13aca62 100644 --- a/docs/core/concepts/subscription/index.html +++ b/docs/core/concepts/subscription/index.html @@ -5,7 +5,7 @@ 组件订阅 | Framework as a Building Block for Kubernetes - + @@ -19,7 +19,7 @@ 组件安装计划的安装方式,默认为 auto,可选项为 automanual

  • spec.schedule 可选字段 组件安装计划的安装时间,默认为空,只有 spec.componentPlanInstallMethodauto,且上游发布了新版本时才有生效,Cron 格式,例如 45 20 * * *,代表每天 20:45 后再进行新版本安装。

    # ┌───────────── 分 (0 - 59)
    # │ ┌───────────── 时 (0 - 23)
    # │ │ ┌───────────── 月的第几天 (1 - 31)
    # │ │ │ ┌───────────── 月份 (1 - 12)
    # │ │ │ │ ┌───────────── 周的第几天 (0 - 6) (周日到周六)
    # │ │ │ │ │
    # │ │ │ │ │
    # │ │ │ │ │
    # * * * * *
  • spec.其他 订阅中完整的包含了组件安装计划中的自定义配置字段。详细内容见组件安装计划的文档。

  • 工作原理

    订阅以 Kubernetes Operator 方式实现。当订阅控制器监视发现集群中订阅对应的组件创建或更新时,判断订阅未处理该更新事件时,用订阅中的组件安装计划配置创建一个名为 sub-<订阅名>-<安装版本>ComponentPlan,触发后续的组件安装步骤。

    - + \ No newline at end of file diff --git a/docs/core/intro/index.html b/docs/core/intro/index.html index 9fd4da7a9..a9113b059 100644 --- a/docs/core/intro/index.html +++ b/docs/core/intro/index.html @@ -5,14 +5,14 @@ 介绍 | Framework as a Building Block for Kubernetes - +

    介绍

    内核基于kubernetes operator模式进行开发,提供完整的组件生命周期管理、组件订阅和自动化部署能力,并通过tekton扩展实现组件自动化评级和安装前校验等能力。

    整体架构

    KubeBB Core架构图

    核心优势

    声明式组件生命周期管理

    组件生命周期

    组件的生命周期主要可以划分为三个阶段:

    component_lifecycle

    1. 研发阶段

    开发者通过低码平台(optional)完成组件的开发工作,根据不同的组件类型,选择不同的方式完成组件的打包,并将打包后的资源发布到仓库服务中。

    组件资源一般包含两类:

    • 镜像资源: 组件开发完成一般需要构建成一个镜像,并推送到镜像仓库
    • 安装包:组件安装部署时的各种资源配置信息(通常为Helm charts),并推送到Charts包仓库

    镜像资源一般通过公共镜像仓库(Dockerhub)或私有镜像仓库管理,我们不做特殊处理。组件仓库服务主要用于存储安装包(Charts)。

    2. 部署阶段

    系统管理员从组件仓库服务中手动查找存储的组件列表,获取可用的组件信息(发布者、版本、安装配置等),并根据实际情况,完成组件配置并安装到系统中。安装完成后,需要手动去检查组件版本更新,并谨慎的完整组件的升级。

    3. 使用阶段

    普通用户在组件安装到系统后,通过底座Kit提供的统一访问入口和统一用户认证来访问具体的组件服务。

    声明式的优势

    声明式的组件生命周期管理有以下优势:

    • 可读性: 更易于理解和阅读组件定义本身,因为它们表达了想要的结果,而不是实现的步骤。
    • 可维护性:更易于维护组件,因为它们更容易理解,更容易修改,而且更少有错误。
    • 可重用性:更容易重用组件,因为它们通常是独立于上下文的,可以在不同的环境中使用。
    • 可扩展性:更易于扩展组件,因为它们通常是基于组件和模块的,可以简单地组合起来创建更复杂的系统。
    • 可靠性:更可靠,因为它们通常是基于静态配置的,而不是基于运行时状态的。这意味着它们更少出现运行时错误和意外行为。

    多维度组件评级

    通过集成Tekton流水线组件,实现自动化的组件评级,并通过CRD Rating完成评测数据总结和分析。

    目前组件评级围绕三个维度展开:

    • 安全性: 评估组件的安全性和软件供应链的安全性。
    • 可靠性: 评估组件本身是否已经过良好的功能测试、性能测试
    • 可用性: 评估组件是否有充足的文档、示例来指导用户使用

    全面适配Helm生态

    Helm是一种成熟的包管理工具,提供一种简单的方式来管理Kubernetes应用程序的部署和升级。它拥有庞大的社区和众多优秀的项目。 因此,内核从设计之初就确定了必须全面兼容Helm生态

    这一特点体现在内核设计的各个方面,我们支持:

    扩展适配底座服务

    提示
    1. 低代码开发平台定义MenusRoute等底座相关资源,并打包到组件模版中
    2. 内核获取底座自定义资源后,自动解析、配置、创建对应资源

    底座服务支持通过自定义菜单路由扩展平台服务,为支撑适配这一能力,我们做了以下努力:

    • 移植Menu资源类型
    • 移植Route配置

    从而通过内核串联云梯低代码开发平台底座服务

    - + \ No newline at end of file diff --git a/docs/core/rating/index.html b/docs/core/rating/index.html index 3b9db72ba..024490527 100644 --- a/docs/core/rating/index.html +++ b/docs/core/rating/index.html @@ -5,13 +5,13 @@ 组件评级 | Framework as a Building Block for Kubernetes - +

    组件评级

    组件评级的目的是通过尽可能多的自动化测试从多个维度来评估一个组件。因此,组件评级三个部分的内容:

    • 定义并完成组件的自动化测试
    • 收集处理测试数据
    • 评估获取组件等级

    为了实现上述提到的三个部分的内容,我们选择:

    • 通过Tekton安全性可靠性可用性三个维度定义多种任务来完成自动化组件测试
    • 定义CRD Rating及其控制器,实时监听Tekton资源,收集测试数据
    • 通过KubeAGI/arcadia实现AI制定组件评估规则,并基于测试数据实时更新组件等级
    提示

    核心流程

    rating_workflow

    任务列表

    提示

    所有任务权重目前都为1,后续需要根据实际的任务重要性赋予不同的权重,并以此获得最终的评分

    评测任务类型描述权重状态
    rating-security-rback安全性通过 dryrun 的方式获取完整的组件 RBAC1已支持
    rating-security-slsa安全性验证 SLSA 供应链安全等级1研发中
    rating-reliability-linting可靠性通过 helm lint 验证组件是否符合规范1已支持
    rating-reliability-testing可靠性通过 helm testing1研发中
    rating-reliability-deployment-testing可靠性验证部署后的组件,功能和性能是否符合预期1研发中
    rating-availability-sample可用性验证组件包是否包含 ComponentPlan 示例1研发中

    安全性任务

    rating-security-rback 根据 chart 包里的内容得到完整的安装文件,并根据这些文件生成权限关系图,用户可以根据这个关系图判断权限是否过大而选择是否安装。

    分以下4个步骤完成:

    1. 下载chart包

    通过 helm pull 下载指定的chart包

    1. 生成 .dot 文件

    通过 helm template 将 chart 包要安装的内容完全列举出来,并通过 yqjq 命令转换成如下格式:

    {
    "kind":"List",
    "apiVersion": "v1",
    "items": [
    {
    "kind": "ServiceAccount",
    "apiVersion": "v1",
    "metadata": {
    "name": "sa",
    }
    }
    ]
    }

    通过 rback 命令将以上的 json 内容转换成 .dot 文件。

    1. 存储到 ConfigMap

    将上述得到 .dot 文件存储到 ConfigMap 中。

    1. 将 ConfigMap 名字写到输出

    Pipeline 支持输出一些运行结果,将CongiMap的名字写到输出内容中,方便后续用户使用。

    可靠性任务

    rating-reliability-linting 用来检查 chart 包里书写格式是否正确,以保证平台可以完整的给用户展示相关信息。

    分以下2个步骤完成:

    1. 下载 chart 包

    通过 helm pull 下载 chart 包。

    1. 检查 lint

    通过 helm lint 检查包的书写格式,输出内容会跟随日志输出。

    可用性任务

    (待补充)

    - + \ No newline at end of file diff --git a/docs/core/roadmap/index.html b/docs/core/roadmap/index.html index d1562d58a..3412e7c6f 100644 --- a/docs/core/roadmap/index.html +++ b/docs/core/roadmap/index.html @@ -5,13 +5,13 @@ 路线图 | Framework as a Building Block for Kubernetes - +

    路线图

    v0.1.0

    • 支持管理组件仓库 Repository
      • 支持与 Helm 仓库兼容的 Repository Server
      • Watcher 监视 Repository
    • 实现 Component 管理
      • Watcher 实现 Components 的 CRUD 操作
    • 支持 ComponentPlanSubscription
      • 允许用户订阅 Component 的最新版本更改
      • 使用与 Helm Chart 兼容的 ComponentPlan 计划组件部署

    v0.2.0

    • 支持内核各控制器的Events记录
    • 适配 Kubebb 底座服务
    • 基于Tekton Pipeline安全性可靠性可用性 三个维度对 Component 进行评级Rating
    • 基于Tekton Pipeline实现ComponentPlan组件部署前的预先校验Check
    • 组件仓库Repository 中启用 authOCI
    • 实现与低代码平台集成
    - + \ No newline at end of file diff --git a/docs/core/userguide/component-dev/index.html b/docs/core/userguide/component-dev/index.html index a4e96b4b1..48ee49d53 100644 --- a/docs/core/userguide/component-dev/index.html +++ b/docs/core/userguide/component-dev/index.html @@ -5,13 +5,13 @@ 组件开发 | Framework as a Building Block for Kubernetes - +
    -

    组件开发

    Kubebb的组件安装包采用Helm模式,遵循Helm charts开发规则。除此之外,我们额外定义添加了一些特殊字段来满足一些组件的特殊性。

    组件类型

    从功能角度,我们将组件划分为两类:

    • 系统组件,如U4A、TMF等,组件的运行需要系统管理权限

    • 普通功能组件,如minio、weaviate等,组件可运行在任何租户-项目中,没有特殊限制

    通用配置

    参考Helm官方文档

    组件高级配置

    为支持不同组件对安装位置、权限的可控,特此额外约定了多个配置字段

    Chart.yaml

    Chart.yaml中包含组件的核心定义、版本、维护者等信息,属于Helm预定义的内容。为了支持额外的特殊需求,我们决定通过annotations来自由定义。如下所示:

    annotations:
    core.kubebb.k8s.com.cn/displayname: "内核"
    core.kubebb.k8s.com.cn/restrict-tenants: "system-tenant"
    core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"
    • core.kubebb.k8s.com.cn/displayname: 用于填充组件的展示名,支持中英文
    • core.kubebb.k8s.com.cn/restrict-tenants: 用于设置组件安装位置的限制租户,多个租户需要通过,隔开
    • ore.kubebb.k8s.com.cn/restricted-namespaces: 用于设置组件安装位置的限制项目/命名空间,多个命名空间通过,隔开
    - +

    组件开发

    Kubebb的组件安装包采用Helm模式,遵循Helm charts开发规则。除此之外,我们额外定义添加了一些特殊字段来满足一些组件的特殊性。

    组件类型

    从功能角度,我们将组件划分为两类:

    • 系统组件,如U4A、TMF等,组件的运行需要系统管理权限

    • 普通功能组件,如minio、weaviate等,组件可运行在任何租户-项目中,没有特殊限制

    通用配置

    参考Helm官方文档

    组件高级配置

    为支持不同组件对安装位置、权限的可控,特此额外约定了多个配置字段

    Chart.yaml

    Chart.yaml中包含组件的核心定义、版本、维护者等信息,属于Helm预定义的内容。为了支持额外的特殊需求,我们决定通过annotations来自由定义。如下所示:

    annotations:
    core.kubebb.k8s.com.cn/displayname: "内核"
    core.kubebb.k8s.com.cn/restricted-tenants: "system-tenant"
    core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"
    • core.kubebb.k8s.com.cn/displayname: 用于填充组件的展示名,支持中英文
    • core.kubebb.k8s.com.cn/restrict-tenants: 用于设置组件安装位置的限制租户,多个租户需要通过,隔开
    • ore.kubebb.k8s.com.cn/restricted-namespaces: 用于设置组件安装位置的限制项目/命名空间,多个命名空间通过,隔开
    + \ No newline at end of file diff --git a/docs/core/userguide/enablerating/index.html b/docs/core/userguide/enablerating/index.html index 189873a21..2f5c95970 100644 --- a/docs/core/userguide/enablerating/index.html +++ b/docs/core/userguide/enablerating/index.html @@ -5,13 +5,13 @@ 启用Rating | Framework as a Building Block for Kubernetes - +

    启用Rating

    默认情况下,Rating组件评级功能是关闭的,需要手动启用。我们建议按照以下步骤启用Rating:

    提示

    详细了解更多Rating有关内容,请参考组件评级设计Rating CRD定义

    Rating依赖两个组件,分别是:

    • Tekton提供流水线能力,完成组件的自动化测试
    • KubeAGI Arcadia提供AI数据分析能力,完整组件的AI评测

    因此, 需要先安装好Tekton和Arcadia, 才能使用Rating功能。

    1.安装kubebb core

    参考安装内核完成内核(未启用Rating)安装。

    2. 安装Tekton流水线

    官方组件仓库提供了Tekton组件安装包Tekton安装示例。可以使用内核快速完成tekton的安装。

    执行以下命令前需要确保组件kubebb.tekton-operator已经同步完成

    命令如下:

    # 默认安装到default命名空间
    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/tekton-operator/componentplan.yaml

    查看安装状态:

    kubectl get pods --watch

    如果安装完成,输出如下:

    ❯ kubectl get pods
    NAME READY STATUS RESTARTS AGE
    my-tekton-tekton-operator-68bdffc888-8dtfx 2/2 Running 0 25m
    my-tekton-tekton-operator-webhook-78bdfcbc77-6k6cx 1/1 Running 0 25m

    如果长时间安装未完成,可查看对应的Componentplan资源的状态。

    3. 安装Arcadia AI组件

    Arcadia组件位于另外的一个组件仓库

    1. 添加arcadia组件仓库
    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/repos/repository_arcadia.yaml

    成功后,可通过如下命令查看仓库中的组件:

    kubectl get components -nkubebb-system -l kubebb.component.repository=arcadia

    如果一切正常,输入如下:

    ❯ kubectl get components -nkubebb-system -l kubebb.component.repository=arcadia
    NAME AGE
    arcadia.arcadia 32s
    arcadia.jupyterlab 32s
    arcadia.llms 32s
    1. 安装Arcadia AI组件

    此处建议采用组件订阅自动安装的模式,默认安装到default命名空间

    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/arcadia/subscription.yaml
    1. 查看安装状态
    ❯ kubectl get pods --watch
    NAME READY STATUS RESTARTS AGE
    my-tekton-tekton-operator-68bdffc888-8dtfx 2/2 Running 0 48m
    my-tekton-tekton-operator-webhook-78bdfcbc77-6k6cx 1/1 Running 0 48m
    arcadia-5cb86f8787-jvd7j 0/1 Pending 0 0s
    arcadia-5cb86f8787-jvd7j 0/1 Pending 0 0s
    arcadia-5cb86f8787-jvd7j 0/1 ContainerCreating 0 0s
    arcadia-5cb86f8787-jvd7j 0/1 Running 0 20s
    arcadia-5cb86f8787-jvd7j 1/1 Running 0 30s

    4. 更新内核

    通过设置参数deployment.rating_enable=true来启用Rating

    helm upgrade  -nkubebb-system kubebb-core kubebb/kubebb-core  --set deployment.rating_enable=true

    查看内核Pod状态:

    ❯ kubectl get pods -nkubebb-system --watch
    NAME READY STATUS RESTARTS AGE
    kubebb-core-65ddc99994-25k49 0/1 Running 0 7s
    kubebb-core-6d78d7d8fd-vxbc6 1/1 Running 0 119s
    kubebb-core-65ddc99994-25k49 1/1 Running 0 10s
    kubebb-core-6d78d7d8fd-vxbc6 1/1 Terminating 0 2m2s
    kubebb-core-6d78d7d8fd-vxbc6 0/1 Terminating 0 2m3s
    kubebb-core-6d78d7d8fd-vxbc6 0/1 Terminating 0 2m3s
    kubebb-core-6d78d7d8fd-vxbc6 0/1 Terminating 0 2m3s

    如果升级成功,则可在内核Pod内看到如下日志:

    1.6935407235060694e+09 INFO Starting EventSource {"controller": "rating", "controllerGroup": "core.kubebb.k8s.com.cn", "controllerKind": "Rating", "source": "kind source: *v1alpha1.Rating"}
    1.6935407235063274e+09 INFO Starting EventSource {"controller": "rating", "controllerGroup": "core.kubebb.k8s.com.cn", "controllerKind": "Rating", "source": "kind source: *v1beta1.PipelineRun"}
    - + \ No newline at end of file diff --git a/docs/core/userguide/helmtofuture/index.html b/docs/core/userguide/helmtofuture/index.html index 3fcfa02cc..3b7ec47ae 100644 --- a/docs/core/userguide/helmtofuture/index.html +++ b/docs/core/userguide/helmtofuture/index.html @@ -5,13 +5,13 @@ 从 Helm 命令迁移 | Framework as a Building Block for Kubernetes - +

    从 Helm 命令迁移

    内核被设计为尽量兼容 Helm 命令,对于一个熟悉 Helm 命令的开发者或者用户,使用内核将变得很容易。

    helm repo add 添加仓库

    helm repo add bitnami https://charts.bitnami.com/bitnami

    添加仓库对应创建 Repository 资源

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: bitnami
    spec:
    url: https://charts.bitnami.com/bitnami

    helm install 安装 chart

    cat << EOF > values.yaml
    replicaCount: 2
    EOF

    helm install nginx bitnami/nginx --version 15.0.2 -f values.yaml --set image.registry=ddd.ccc

    安装 Chart 对应创建 ComponentPlan 资源。

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx
    spec:
    approved: true
    component:
    name: bitnami.nginx
    namespace: default
    name: nginx
    override:
    valuesFrom:
    - kind: ConfigMap
    name: nginx
    valuesKey: values.yaml
    set:
    - image.registry=ddd.ccc
    version: 15.0.2
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: nginx
    data:
    values.yaml: |
    replicaCount: 2

    helm upgrade 更新版本

    helm upgrade nginx bitnami/nginx --set image.registry=docker.io

    升级版本,这里有 2 个方法:

    1. 用新的配置创建一个 ComponentPlan,只要保证 metadata.namespace(对应 helm releasenamespace)和 spec.name(对应 helm releasename)一致,就会升级对应的 helm release
    2. 在原来的 ComponentPlan 基础上修改。相比较前者,这种方式不会保留历史,更灵活。前一种方式可以进行回滚。

    helm uninstall 删除 release

    helm uninstall nginx

    删除 release,对应删除 ComponentPlan。因为 ComponentPlanhelm release 的关系是可以多对一,要注意,需要删除 status.latestTrue(表明当前 ComponentPlan 对应 helm release 的最新版本)或者 status.installedRevision 和当前 helm release 版本一致的 Componentplan

    helm rollback 回滚 release

    helm rollback nginx 1

    回滚,只需要在想要回滚到的 ComponentPlan 上增加一个 label:core.kubebb.k8s.com.cn/rollback: ture 即可。

    - + \ No newline at end of file diff --git a/docs/core/userguide/imageoverride/index.html b/docs/core/userguide/imageoverride/index.html index d3f2f4b9e..173d0ea25 100644 --- a/docs/core/userguide/imageoverride/index.html +++ b/docs/core/userguide/imageoverride/index.html @@ -5,13 +5,13 @@ “镜像替换”功能的完整说明 | Framework as a Building Block for Kubernetes - +

    “镜像替换”功能的完整说明

    介绍

    镜像替换指的是使用指定的镜像名称替换 helm chart 包中的镜像从而正常安装的功能。常用于 kubernetes 离线环境,无法访问原始镜像仓库时使用。

    这时我们通常会做 2 件事:首先找一个可以访问原始镜像仓库的环境下载镜像,并将镜像按一定的规则上传到自己的镜像仓库中,然后在安装时修改镜像地址。本功能就是为了方便的完成第 2 步而设计的。

    有如下优势:

    1. 无需更改 helm chart 包。
    2. 按需更改,颗粒度小。
    3. 分为仓库组件替换和组件替换,可分别配置,互相解耦,互不影响。

    image-changed

    以上图为例,helm chart 包中的镜像为 docker.com/library/nginx:1.25.1,我们实际安装 helm chart 包时,需要的镜像地址为 192.168.1.1/system-container/nginx:latest,镜像替换功能确保了我们安装时使用后一个地址进行安装。

    镜像替换功能实际是通过 Helm:post-rendering 技术实现的。

    参数说明

    Docker 镜像格式说明

    我们仍然以 docker.com/library/nginx:1.25.1 为例来说明:

    根据 docker 官方文档基本格式为:[HOST[:PORT_NUMBER]/]PATH:TAG,其中:

    • HOST: 可选,主机名指定映像所在的位置。主机名必须符合标准 DNS 规则,但不得包含下划线。如果未指定主机名,则 Docker 默认使用位于 registry-1.docker.io 的公共镜像仓库。
    • PORT_NUMBER: 如果存在主机名,则可以选择在其后面跟随格式为 :8080 的镜像仓库端口号。
    • PATH: 路径由斜杠分隔的部分组成。每个部分可以包含小写字母、数字和分隔符。分隔符定义为一个句点、一个或两个下划线、或者一个或多个连字符。部分不能以分隔符开始或结束。虽然 OCI 规范支持两个以上斜杠分隔的部分,但大多数镜像仓库仅支持两个斜杠分隔的部分。对于 Docker 的公共镜像仓库,路径格式如下:
      • [NAMESPACE/]REPOSITORY: 第一个可选部分通常是用户或组织的命名空间。第二个强制部分是存储库名称。当命名空间不存在时,Docker 使用 library 作为默认命名空间。
    • 在镜像名称之后,可选的 TAG 是一个自定义的、人类可读的清单标识符,通常是镜像的特定版本或变体。该标签必须是有效的 ASCII,并且可以包含小写和大写字母、数字、下划线、句点和连字符。它不能以句点或连字符开头,且不得超过 128 个字符。如果未指定标记,则 Docker 命令默认使用 latest

    镜像替换 配置说明

    仓库部分

    一个仓库的例子如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repository-bitnami-special-version
    namespace: kubebb-system
    spec:
    # 省略其他部分
    imageOverride:
    - registry: docker.io
    newRegistry: 192.168.1.1
    pathOverride:
    path: library
    newPath: system-container

    每项参数的具体说明:

    spec.imageOverride 非必需,该字段是数组,定义了一系列仓库级别的镜像覆盖策略。

    每一项内容包括:

    • spec.imageOverride[].registry 该镜像仓库域名地址,可以包含端口,例如:docker.io192.168.1.1:5000
    • spec.imageOverride[].newRegistry 要将 registry 替换后的镜像仓库域名地址,可以包含端口。
    • spec.imageOverride[].pathOverride 非必需,数组。
      • spec.imageOverride[].pathOverride.path 旧的镜像仓库路径,比如镜像地址 docker.io/library/nginx:latest 中的 path 为 library
      • spec.imageOverride[].pathOverride.newPath 要将 path 替换后的镜像仓库新路径,可以为空。

    组件安装部分

    下面是一个 ComponentPlan 示例:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx-15.0.2
    namespace: kubebb-system
    spec:
    # 省略其他部分
    override:
    images:
    - name: docker.io/bitnami/nginx
    newTag: latest

    每项参数的具体说明:

    spec.override.images 数组。类似 kustomize 的镜像自定义参数

    • spec.override.images[].name

      原始镜像名称,tag 可选,如果包含 tag,则匹配精确到 tag 一致才替换,比如,如果该项为 docker.io/bitnami/nginx:v1,那么只匹配 tag 为 v1 的 nginx 镜像,如果有 docker.io/bitnami/nginx:v2 不会被替换。

    • spec.override.images[].newName

      替代原始镜像名称的名称

    • spec.override.images[].newTag

      替代原始 tag 的新 tag 名称

    • spec.override.images[].digest

      替代原始 tag 的新 digest,如果 digest 有值,会忽略 newTag 的值。

    具体案例

    批量复制仓库

    假设我们希望将 helm chart 包 bitnami nginx chart version:15.0.2 部署到离线 kubernetes 环境,这里涉及到的镜像为:

    原始镜像用途必选
    docker.io/bitnami/nginx:1.25.1-debian-11-r0nginx deployment 使用必选
    docker.io/bitnami/git:2.41.0-debian-11-r4nginx deployment 导入配置使用可选
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91nginx deployment 暴露 metric 使用可选

    假设我们将 docker.io/bitnami/ 镜像仓库批量复制到了我们自己的镜像仓库 192.168.1.1:5000 中,那么上面镜像的地址将变更如下:

    原始镜像本地镜像
    docker.io/bitnami/nginx:1.25.1-debian-11-r0192.168.1.1:5000/nginx:1.25.1-debian-11-r0
    docker.io/bitnami/git:2.41.0-debian-11-r4192.168.1.1:5000/git:2.41.0-debian-11-r4
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91192.168.1.1:5000/nginx-exporter:0.11.0-debian-11-r91

    那么,我们只需要在仓库中修改配置如下即可

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repository-bitnami-special-version
    namespace: kubebb-system
    spec:
    # 省略其他部分
    imageOverride:
    - registry: docker.io
    newRegistry: 192.168.1.1:5000
    pathOverride:
    path: bitnami
    newPath: ""

    使用自定义镜像

    假设我们希望将 helm chart 包 bitnami nginx chart version:15.0.2 部署到可访问 docker.io 的 kubernetes 环境中,这里涉及到的镜像为:

    原始镜像用途必选
    docker.io/bitnami/nginx:1.25.1-debian-11-r0nginx deployment 使用必选
    docker.io/bitnami/git:2.41.0-debian-11-r4nginx deployment 导入配置使用可选
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91nginx deployment 暴露 metric 使用可选

    假设我们希望使用自己编译的 nginx 镜像 192.168.1.1:5000/tmp/nginx:2023,那么上面镜像的地址变更如下:

    原始镜像本地镜像
    docker.io/bitnami/nginx:1.25.1-debian-11-r0192.168.1.1:5000/tmp/nginx:2023
    docker.io/bitnami/git:2.41.0-debian-11-r4docker.io/bitnami/git:2.41.0-debian-11-r4
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91

    那么,我们只需要在安装时的 ComponentPlan 中配置如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx-15.0.2
    namespace: kubebb-system
    spec:
    # 省略其他部分
    override:
    images:
    - name: docker.io/bitnami/nginx
    newName: 192.168.1.1:5000/tmp/nginx
    newTag: "2023"

    批量复制仓库,且使用自定义镜像

    假设我们希望将 helm chart 包 bitnami nginx chart version:15.0.2 部署到离线 kubernetes 环境,这里涉及到的镜像为:

    原始镜像用途必选
    docker.io/bitnami/nginx:1.25.1-debian-11-r0nginx deployment 使用必选
    docker.io/bitnami/git:2.41.0-debian-11-r4nginx deployment 导入配置使用可选
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91nginx deployment 暴露 metric 使用可选

    假设我们将 docker.io/bitnami/ 镜像仓库批量复制到了我们自己的镜像仓库 192.168.1.1:5000/bitnami-mirror/ 中,并且我们希望使用自己编译的 nginx 镜像 192.168.1.1:5000/tmp/nginx:2023,那么上面镜像的地址变更如下:

    原始镜像本地镜像
    docker.io/bitnami/nginx:1.25.1-debian-11-r0192.168.1.1:5000/tmp/nginx:2023
    docker.io/bitnami/git:2.41.0-debian-11-r4192.168.1.1:5000/bitnami-mirror/git:2.41.0-debian-11-r4
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91192.168.1.1:5000/bitnami-mirror/nginx-exporter:0.11.0-debian-11-r91

    那么,我们首先需要在仓库中配置如下

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repository-bitnami-special-version
    namespace: kubebb-system
    spec:
    # 省略其他部分
    imageOverride:
    - registry: docker.io
    newRegistry: 192.168.1.1:5000
    pathOverride:
    path: bitnami
    newPath: bitnami-mirror

    然后在在安装时的 ComponentPlan 中配置如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx-15.0.2
    namespace: kubebb-system
    spec:
    # 省略其他部分
    override:
    images:
    - name: docker.io/bitnami/nginx
    newName: 192.168.1.1:5000/tmp/nginx
    newTag: "2023"

    对比

    和 helm 安装命令中的 --set image=xxx 对比

    很多 helm chart 包在 values.yaml 中提供了变量来存储镜像的地址。

    一些设计的更加精巧的 helm chart 包还详细的区分了镜像的 registry ,repository 和 tag。(比如上面提到的 helm chart 包 bitnami nginx chart version:15.0.2 提供了 image.registry(默认为 docker.io) image.repository(默认为 bitnami/nginx) image.tag(默认为 1.25.1-debian-11-r0) 和 image.digest(默认为空) 四个变量)

    使用这些变量来修改镜像地址是可行的。

    但是不是所有的 helm chart 包都遵循了这些实践,另一方面,helm chart 包使用哪个变量来替换镜像并没有统一的规范。

    使用镜像替换功能可以无视上述问题,实现统一镜像替换。

    另一个场景是,当我们复制镜像仓库时(比如修改 harbor 的对外地址,或者将镜像离线安装到另一个环境),如果使用 helm 的安装命令 --set image=xxx 来安装,我们需要修改每一个命令为新的仓库地址。而使用镜像替换功能,我们只需要修改仓库的配置,组件的配置无需修改。

    - + \ No newline at end of file diff --git a/docs/core/userguide/privatecluster/index.html b/docs/core/userguide/privatecluster/index.html index 7aebe9d17..9fbdfaf18 100644 --- a/docs/core/userguide/privatecluster/index.html +++ b/docs/core/userguide/privatecluster/index.html @@ -5,7 +5,7 @@ 私有集群部署方案 | Framework as a Building Block for Kubernetes - + @@ -16,7 +16,7 @@ 这里还是以 chartmuseum 测试

    cd components/charts/chartmuseum
    helm pacakge .

    # 成功将会输出 {"saved":true}
    curl --data-binary "@chartmuseum-3.10.1.tgz" http://localhost:8080/api/charts

    4.4 结果查看

    # 查看 index.yaml
    curl http://localhost:8080/index.yaml

    4.4 上传其他的chart包

    对于 components/charts 下面的其他的chart包用到哪些,按照 4.3 步骤就可以完成上传。


    5.使用 kubebb-core

    5.1 部署一个 Repository

    # repo.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: kubebb
    namespace: kubebb-system
    spec:
    url: http://chartmuseum.default.svc.cluster.local:8080
    pullStategy:
    intervalSeconds: 120
    retry: 5

    创建 repository

    # 1. 创建 repository
    kubectl apply -f repo.yaml

    # 2. 检查 components 是否创建
    kubectl get components.core.kubebb.k8s.com.cn -n kubebb-system
    NAME AGE
    kubebb.chartmuseum 2s
    kubebb.kubebb-core 2s

    可以看到创建了 repository 后,相关的 components 已经被创建出来了。

    5.2 部署一个 Component

    部署 component ,需要使用 componentplan 这个资源。这里我们选择再次部署一个 chartmuseum。前面通过 helm 部署的 chartmuseum 是给系统存储chart用的,这里部署,是为了测试功能正常(没搞其他的chart包)。

    # componentplan.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: chartmuseum-test
    namespace: default
    spec:
    approved: true
    name: chartmuseum-test
    version: 3.10.1
    override:
    set:
    - image.repository=localhost:5001/chartmuseum
    - env.open.DISABLE_API=false
    component:
    name: kubebb.chartmuseum
    namespace: kubebb-system

    创建compnentplan

    kubectl apply -f componentplan.yaml

    部署完成后,可以看到 chartmuseum-test 的pod也起来了。

    kubectl get po

    NAME READY STATUS RESTARTS AGE
    chartmuseum-6c4bc46898-msp7r 1/1 Running 0 107s
    chartmuseum-test-86d66fd5d7-lp2rn 1/1 Running 0 11s

    5.3 Repository Image 重写

    这个步骤是为了测试 image 更新策略,如果不需要可以不用操作。 我们还是用之前的helm部署的 chartmuseum,里面有一个chartmuseum 的包, 要用到镜像

    ghcr.io/helm/chartmuseum:v0.16.0

    # repo-override-image.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repo-override-image
    namespace: kubebb-system
    spec:
    url: http://chartmuseum.default.svc.cluster.local:8080
    pullStategy:
    intervalSeconds: 120
    retry: 5
    imageOverride:
    - registry: ghcr.io
    newRegistry: localhost:5001
    pathOverride:
    path: helm
    newPath: ""

    创建Repository

    kubectl apply -f repo-override-image.yaml

    创建 repository 后,查看 components

    kubectl get components -A
    NAMESPACE NAME AGE
    kubebb-system kubebb.chartmuseum 18m
    kubebb-system kubebb.kubebb-core 18m
    kubebb-system repo-override-image.chartmuseum 5s
    kubebb-system repo-override-image.kubebb-core 5s

    再次安装 chartmuseum 注意,这里我们不再设置 chartmuseum 所使用的镜像。

    # componentplan-default-override.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: chartmuseum-test233
    namespace: default
    spec:
    approved: true
    name: chartmuseum-test233
    version: 3.10.1
    override:
    set:
    - env.open.DISABLE_API=false
    component:
    name: repo-override-image.chartmuseum
    namespace: kubebb-system

    创建 componentplan

    kubectl apply -f componentplan-default-override.yaml

    检查pod的运行情况

    kubectl get po

    NAME READY STATUS RESTARTS AGE
    chartmuseum-6c4bc46898-msp7r 1/1 Running 0 32m
    chartmuseum-test-86d66fd5d7-lp2rn 1/1 Running 0 31m
    chartmuseum-test233-544cbfb87c-b6pdd 1/1 Running 0 12s

    部署完成✅!

    - + \ No newline at end of file diff --git a/docs/intro/index.html b/docs/intro/index.html index 8ba7ce11e..2a3cae3c2 100644 --- a/docs/intro/index.html +++ b/docs/intro/index.html @@ -5,14 +5,14 @@ 总览 | Framework as a Building Block for Kubernetes - +

    总览

    KubeBB(Kubernetes Building Blocks)是一种由内核驱动的组件生命周期管理平台,集成开箱即用的云原生底座低码组件开发能力,整合实现云原生三层组件模式

    overview

    Kubebb提供三个套件

    提供声明式的组件生命周期管理和组件市场,并通过Tekton流水线强化低代码平台组件与底座服务的集成。

    提供开箱即用的云原生服务门户,包括用户、OIDC认证、权限、审计、租户管理、门户服务等基础组件以及证书管理、Nignx Ingress等集群组件。

    依托Low-Code Engine和具有Git特性的关系数据库Dolt打造,并借助底座门户的菜单和路由资源和内核套件的组件管理能力,实现组件开发、测试到上线的全链路能力。

    三个核心套件之间的关系可以类比一下操作系统:

    • Kubernetes ~ 操作系统内核
    • Core ~ 软件安装器
    • 底座Kit ~ 操作系统的系统软件,如GUI、用户系统、网络等
    • 低码组件开发Kit ~ 操作系统软件开发工具

    内核Kit

    内核Kit的是现阶段我们重点关注并研发的项目,完全遵循开源项目管理规范。现阶段我们的目标:

    1. 声明式的组件全生命周期管理

    基于Operator Pattern开发,实现声明式的组件全生命周期管理。

    component-lifecycle

    提供四个核心的CRD实现:

    定义了组件仓库的访问信息、轮询策略和过滤选项,从而实现周期性地向仓库服务获取最新的组件列表信息。

    记录组件的基础描述、版本列表、是否废弃等信息

    定义组件安装部署的手动批准、组件引用、版本设置、类helm的配置覆盖策略,从而实现组件的可追踪部署、升级和回滚。

    定义了用户订阅组件版本更新

    一个扩展CRD实现,集成Tekton Pipeline:

    2. 开放组件市场

    组件市场是内核能力的产品化,作为一个适配底座服务的组件发布到官方组件仓库中使用,扩展KubeBB生态。

    底座Kit

    底座Kit通过集成以下组件从而提供统一的认证中心和门户入口:

    portal

    低码Kit

    低码Kit提供三层组件开发模式前端模块研发、出码能力,并借助内核Kit完成标准化打包、测试、发布。

    lowcode_development

    技术架构

    平台开发采取前后端分离,以 K8S 为核心的开发框架,遵循 K8S 的扩展机制及 API 规范,整体开发架构的基本逻辑如下图所示: 图 2

    1. 所有组件的开发、扩展的认证都通过统一认证中心进行认证
    2. 认证由微前端的主框架 DockApp 统一进行,其他微前端的扩展不需要单独支持同认证中心的处理
    3. 开发架构上整体可以按照三层来看
    • 第一层,前端采用微前端架构,尽量采用低代码方式进行开发,提高代码自动化生成比例
    • 第二层,根据业务需求增加 OpenAPI,形成统一的 BFF 层,对 API 进行聚合,提供前端所需要的业务场景数据
    • 后端采用 CRD + controller 的 Operator 模式进行开发,形成数据驱动的流程开发模式
    1. 对外 API 主要包括两部分:
    • 从 BFF 层提供的 OpenAPI
    • 从 K8S 层提供的资源 API

    获取更多组件

    浏览 组件市场,安装更多需要的服务组件到门户中,比如:

    - + \ No newline at end of file diff --git a/docs/lowcode-development/development/bff-apis/index.html b/docs/lowcode-development/development/bff-apis/index.html index a1ce474cc..7fa8e3406 100644 --- a/docs/lowcode-development/development/bff-apis/index.html +++ b/docs/lowcode-development/development/bff-apis/index.html @@ -5,13 +5,13 @@ BFF层API开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/docs/lowcode-development/development/develop-hello-world/index.html b/docs/lowcode-development/development/develop-hello-world/index.html index bd739e03f..bde8e498d 100644 --- a/docs/lowcode-development/development/develop-hello-world/index.html +++ b/docs/lowcode-development/development/develop-hello-world/index.html @@ -5,13 +5,13 @@ 发布一个 Hello World 组件 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/docs/lowcode-development/development/frontend/index.html b/docs/lowcode-development/development/frontend/index.html index 535d70e50..98dcc750e 100644 --- a/docs/lowcode-development/development/frontend/index.html +++ b/docs/lowcode-development/development/frontend/index.html @@ -5,13 +5,13 @@ 前端开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/docs/lowcode-development/development/low-code-engine/index.html b/docs/lowcode-development/development/low-code-engine/index.html index f00d90412..8308baff1 100644 --- a/docs/lowcode-development/development/low-code-engine/index.html +++ b/docs/lowcode-development/development/low-code-engine/index.html @@ -5,13 +5,13 @@ 前端基于低代码引擎的开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/docs/lowcode-development/development_pattern/index.html b/docs/lowcode-development/development_pattern/index.html index e6abb809e..b714c4f75 100644 --- a/docs/lowcode-development/development_pattern/index.html +++ b/docs/lowcode-development/development_pattern/index.html @@ -5,14 +5,14 @@ 开发模式 | Framework as a Building Block for Kubernetes - +

    开发模式

    KubeBB组件的开发采取前后端分离,以 K8S 为核心的开发框架,遵循 K8S 的扩展机制及 API 规范。整体开发架构的基本逻辑如下图所示: dev_arch

    1. 所有组件的开发、扩展的认证都通过统一认证中心进行认证
    2. 认证由微前端的主框架 DockApp 统一进行,其他微前端的扩展不需要单独支持同认证中心的处理

    三层模式

    基于低码开发的组件,将采用如下三层开发模式:

    • 第一层: 前端采用微前端架构,采用低代码方式进行开发,提高代码自动化生成比例
    • 第二层: 根据业务需求增加 OpenAPI,形成统一的 BFF 层,对 API 进行聚合,提供前端所需要的业务场景数据
    • 第三层: 后端采用CRD + controller 的 Operator 模式进行开发,形成数据驱动的流程开发模式
    - + \ No newline at end of file diff --git a/docs/lowcode-development/intro/index.html b/docs/lowcode-development/intro/index.html index 7ca03d35d..9496f3d04 100644 --- a/docs/lowcode-development/intro/index.html +++ b/docs/lowcode-development/intro/index.html @@ -5,13 +5,13 @@ 介绍 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/docs/quick-start/buildingbase_quickstart/index.html b/docs/quick-start/buildingbase_quickstart/index.html index e8528a808..a134e74da 100644 --- a/docs/quick-start/buildingbase_quickstart/index.html +++ b/docs/quick-start/buildingbase_quickstart/index.html @@ -5,13 +5,13 @@ 安装底座 | Framework as a Building Block for Kubernetes - +

    安装底座

    本章节主要介绍底座组件的部署步骤,包括相关的开源技术组件、前提条件以及快速部署,并将部署的集群添加到服务门户上。

    提示

    安装前需完成安装内核

    部署

    1. 创建官方组件仓库

    可参考使用官方组件仓库

    2. 创建底座组件空间

    提示

    目前仅支持使用命名空间u4a-system

        kubectl create namespace u4a-system

    3. 部署Cluster Component

    组件部署信息cluster_componentplan.yaml如下:

    详细可参考

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: cluster-component
    namespace: u4a-system
    spec:
    approved: true
    name: cluster-component
    version: 0.1.3
    override:
    set:
    - ingress-nginx.controller.nodeSelector.kubernetes\.io/hostname=kubebb-core-control-plane
    component:
    name: kubebb.cluster-component
    namespace: kubebb-system

    需调整参数:

    • override.set.ingress-nginx.controller.nodeSelector.kubernetes\.io/hostname 将作为ingress-nginx服务节点

    此处基于kind开发集群kubebb-core-control-plane节点。

    通过一下命令部署:

        kubectl apply -nu4a-system -f cluster_componentplan.yaml

    Cluster Component部署完成后,可通过以下命令查看组件部署状态:

        kubectl get componentplan -nu4a-system cluster-component -oyaml

    当组件部署状态如下时,表示组件部署成功:

    status:
    conditions:
    - lastTransitionTime: "2023-07-25T08:15:41Z"
    reason: ""
    status: "True"
    type: Approved
    - lastTransitionTime: "2023-07-25T08:15:44Z"
    reason: InstallSuccess
    status: "True"
    type: Actioned
    - lastTransitionTime: "2023-07-25T08:15:44Z"
    reason: ""
    status: "True"
    type: Succeeded

    4. 部署U4A Component

    组件部署信息u4a_componentplan.yaml如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: u4a-component
    namespace: u4a-system
    spec:
    approved: true
    name: u4a-component
    version: 0.1.5
    wait: true
    override:
    valuesFrom:
    - kind: ConfigMap
    name: u4acm
    valuesKey: "values.yaml"
    component:
    name: kubebb.u4a-component
    namespace: kubebb-system

    其中,组件U4A-Component的参数通过ConfigMap方式注入,ConfigMap的创建流程可参考U4A组件部署流程

    通过一下命令部署:

        kubectl apply -nu4a-system -f u4a_componentplan.yaml

    U4A Component部署完成后,可通过以下命令查看组件部署状态:

        kubectl get componentplan -nu4a-system u4a-component -oyaml

    5. 访问底座服务门户

    通过以下命令获取门户服务的访问地址:

    (base) ➜  ~ kubectl get ingress -nu4a-system
    NAME CLASS HOSTS ADDRESS PORTS AGE
    bff-server-ingress <none> portal.172.18.0.2.nip.io 80, 443 4h55m
    bff-server-ingress-socket <none> portal.172.18.0.2.nip.io 80, 443 4h55m
    kube-oidc-proxy-server-ingress <none> k8s.172.18.0.2.nip.io 80, 443 4h55m

    通过浏览器访问https://portal.172.18.0.2.nip.io即可进入服务门户。默认的用户名密码为

    • 用户名: admin
    • 密码: kubebb-admin

    注意: 由于使用了nip.io作为域名解析服务,因此需要将HOSTS中的域名解析到ADDRESS对应的IP地址上。

    卸载

    1. 卸载U4A Component

        kubectl delete componentplan -nu4a-system u4a-component

    2. 卸载Cluster Component

        kubectl delete componentplan -nu4a-system cluster-component
    - + \ No newline at end of file diff --git a/docs/quick-start/core_quickstart/index.html b/docs/quick-start/core_quickstart/index.html index 1a32c1116..ed07efc1d 100644 --- a/docs/quick-start/core_quickstart/index.html +++ b/docs/quick-start/core_quickstart/index.html @@ -5,13 +5,13 @@ 安装内核 | Framework as a Building Block for Kubernetes - +

    安装内核

    提示

    安装前需完成预先准备

    安装

    提示

    Kubebb官方提供了helm仓库,方便用户安装: https://kubebb.github.io/components/

    1. 添加helm仓库
    helm repo add kubebb https://kubebb.github.io/components/
    helm repo update
    1. 创建命名空间

    请根据实际情况修改命名空间名称

    kubectl create namespace kubebb-system
    1. 安装
    helm install -nkubebb-system kubebb-core kubebb/kubebb-core
    1. 查看安装状态
    kubectl get pods -nkubebb-system 

    如果一切正常,输入如下:

    NAME                           READY   STATUS    RESTARTS   AGE
    kubebb-core-6bd7c5f679-742mq 1/1 Running 0 21h

    快速体验

    内核安装完成后可通过官方组件仓库快速体验组件化的部署:

    提示

    kubebb官方组件仓库,内核安装过程中默认添加,提供多个认证仓库、组件和组件应用.

    1. 通过以下命令查看仓库列表:
    kubectl get repository -nkubebb-system

    默认情况下,至少包含仓库kubebb

    (base) ➜  charts git:(dev) kubectl get repository -nkubebb-system
    NAME AGE
    kubebb 14m

    如果没有看到kubebb,可手动添加:

    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/repos/repository_kubebb.yaml
    1. 获取官方仓库中的组件
    kubectl get components -nkubebb-system  -l kubebb.component.repository=kubebb

    如果一切正常,输出如下:

    NAME                       AGE
    kubebb.bc-apis 135m
    kubebb.bc-depository 135m
    kubebb.bc-explorer 135m
    kubebb.cluster-component 135m
    kubebb.fabric-operator 135m
    kubebb.ingress-nginx 135m
    kubebb.kubebb 135m
    kubebb.kubebb-core 135m
    kubebb.minio 135m
    kubebb.tekton-operator 135m
    kubebb.u4a-component 135m
    kubebb.weaviate 135m
    1. 部署一个组件

    以部署kubebb.minio为例

    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/minio/componentplan.yaml

    查看组件部署状态:

    kubectl get componentplan my-minio -oyaml

    查看组件Pod状态

    kubectl get pods -l core.kubebb.k8s.com.cn/componentplan=my-minio

    如果一切正常,输出如下:

    NAME             READY   STATUS    RESTARTS   AGE
    my-minio-0 1/1 Running 0 42h
    my-minio-1 1/1 Running 0 42h
    my-minio-2 1/1 Running 0 42h

    部署一个私有仓库

    1. 在官方仓库中部署chartmuseum
    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/chartmuseum/componentplan.yaml
    1. 添加仓库
    # repository_chartmuseum.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: chartmuseum
    namespace: kubebb-system
    spec:
    url: http://chartmuseum.kubebb-system.svc.cluster.local:8080
    pullStategy:
    intervalSeconds: 120
    retry: 5

    创建仓库

    kubectl apply -f repository_chartmuseum.yaml 

    执行结果

    kubectl get repository -nkubebb-system
    NAME AGE
    chartmuseum 4m41s
    kubebb 15h

    端口暴露

    kubectl port-forward service/chartmuseum 8080:8080 -nkubebb-system

    上传自定义chart

    helm create mychart
    cd mychart
    helm package .
    curl --data-binary "@mychart-0.1.0.tgz" http://localhost:8080/api/charts

    从私有仓库中查看

    kubectl get component -l kubebb.component.repository=chartmuseum -nkubebb-system
    NAME AGE
    chartmuseum.mychart 4m27s
    - + \ No newline at end of file diff --git a/docs/quick-start/prerequisite/index.html b/docs/quick-start/prerequisite/index.html index e4f22aa47..da154c9af 100644 --- a/docs/quick-start/prerequisite/index.html +++ b/docs/quick-start/prerequisite/index.html @@ -5,13 +5,13 @@ 预先准备 | Framework as a Building Block for Kubernetes - +

    预先准备

    基础环境

    Kubernetes集群

    提示

    如果没有kubernets集群,可按照下述教程通过kind部署一个开发集群。默认情况下,为适配building base,集群至少有一个节点需要为Ingress Controller服务节点,并暴露80443端口。

    Kind开发集群

    1. 安装kind

    参考: https://kind.sigs.k8s.io/docs/user/quick-start/#installation

    Linux环境为例:

    # For AMD64 / x86_64
    [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64
    # For ARM64
    [ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-arm64
    chmod +x ./kind
    sudo mv ./kind /usr/local/bin/kind
    1. 准备单节点集群配置文件kind-config.yaml
    提示
    kind: Cluster
    apiVersion: kind.x-k8s.io/v1alpha4
    name: kubebb-core
    nodes:
    - role: control-plane
    image: kindest/node:v1.24.13
    kubeadmConfigPatches:
    - |
    kind: InitConfiguration
    nodeRegistration:
    kubeletExtraArgs:
    node-labels: "ingress-ready=true"
    extraPortMappings:
    - containerPort: 80
    hostPort: 80
    protocol: TCP
    - containerPort: 443
    hostPort: 443
    protocol: TCP
    1. 创建集群
    kind create cluster --config=kind-config.yaml
    1. 查看集群状态
    kubectl cluster-info --context kind-kubebb-core

    如果一切正常,输出如下:

    Kubernetes control plane is running at https://127.0.0.1:42607
    CoreDNS is running at https://127.0.0.1:42607/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

    To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
    1. 查看集群节点
    kubectl get nodes

    如果一切正常,输出如下:

    NAME                              STATUS   ROLES           AGE   VERSION
    kubebb-core-control-plane Ready control-plane 21m v1.24.13

    通过docker ps可发现该节点已经暴露了80443端口:

    (base) ➜  building-base git:(azure) docker ps
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    e4e3820cdb5a kindest/node:v1.24.13 "/usr/local/bin/entr…" 22 minutes ago Up 22 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp, 127.0.0.1:33611->6443/tcp kubebb-core-control-plane
    - + \ No newline at end of file diff --git a/docs/quick-start/try_customization/index.html b/docs/quick-start/try_customization/index.html index 89b92ca69..81825bb35 100644 --- a/docs/quick-start/try_customization/index.html +++ b/docs/quick-start/try_customization/index.html @@ -5,14 +5,14 @@ 体验自定义配置 | Framework as a Building Block for Kubernetes - +

    体验自定义配置

    1. 自定义门户的主色调
    kubectl edit cm portal-global-configs -n u4a-system

    修改 primaryColor 即可自定义门户主色调

    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: portal-global-configs
    data:
    global-configs: |
    {"theme": {"primaryColor": "#FE8F35"}}
    1. 自定义菜单

    kubebb 的所有菜单均基于 menu 的 CRD 进行定义,如果需要添加自己的菜单,可以参考以下 memnu 示例:

    # 主菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu
    spec:
    column: 1
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: ""
    kind: ""
    name: ""
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 测试菜单
    textEn: "Test Menu"
    ---
    # 测试菜单索引菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-index
    spec:
    getTitleForReplaceSider: {}
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 菜单索引项
    textEn: “Menu Index Item"
    ---
    # 子菜单,具备实际链接功能
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-submenu1
    spec:
    getTitleForReplaceSider: {}
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu-index
    uid: ""
    pathname: /demo-feature1
    rankingInColumn: 200
    text: 测试子菜单
    textEn: "Test Submenu"

    使用 kubectl apply -f 即可将菜单项部署到环境中,如下图所示: 图 1

    1. 多语言 & 白天/黑夜模式

    1)通过右上角的语言切换按钮进行多语言切换,目前支持中文、英文两种语言

    2)通过右上角的按钮切换白天/黑夜模式

    - + \ No newline at end of file diff --git a/en/404.html b/en/404.html index e497966f4..f2877520f 100644 --- a/en/404.html +++ b/en/404.html @@ -5,13 +5,13 @@ Page Not Found | Framework as a Building Block for Kubernetes - +

    Page Not Found

    We could not find what you were looking for.

    Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

    - + \ No newline at end of file diff --git a/en/assets/js/f665e660.23c86fe1.js b/en/assets/js/f665e660.fd257e44.js similarity index 85% rename from en/assets/js/f665e660.23c86fe1.js rename to en/assets/js/f665e660.fd257e44.js index 14aaa7aa8..eddb1de34 100644 --- a/en/assets/js/f665e660.23c86fe1.js +++ b/en/assets/js/f665e660.fd257e44.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2437],{3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>b});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var c=r.createContext({}),p=function(e){var t=r.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=p(e.components);return r.createElement(c.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),u=p(n),d=a,b=u["".concat(c,".").concat(d)]||u[d]||m[d]||o;return n?r.createElement(b,i(i({ref:t},s),{},{components:n})):r.createElement(b,i({ref:t},s))}));function b(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=d;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>l,toc:()=>p});var r=n(7462),a=(n(7294),n(3905));const o={sidebar_position:5},i="\u7ec4\u4ef6\u5f00\u53d1",l={unversionedId:"core/userguide/component-dev",id:"core/userguide/component-dev",title:"\u7ec4\u4ef6\u5f00\u53d1",description:"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528Helm\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002",source:"@site/docs/core/userguide/component-dev.md",sourceDirName:"core/userguide",slug:"/core/userguide/component-dev",permalink:"/website/en/docs/core/userguide/component-dev",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"\u4ece Helm \u547d\u4ee4\u8fc1\u79fb",permalink:"/website/en/docs/core/userguide/helmtofuture"},next:{title:"\u7ec4\u4ef6\u8bc4\u7ea7",permalink:"/website/en/docs/core/rating"}},c={},p=[{value:"\u7ec4\u4ef6\u7c7b\u578b",id:"\u7ec4\u4ef6\u7c7b\u578b",level:2},{value:"\u901a\u7528\u914d\u7f6e",id:"\u901a\u7528\u914d\u7f6e",level:2},{value:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",level:2},{value:"Chart.yaml",id:"chartyaml",level:3}],s={toc:p},u="wrapper";function m(e){let{components:t,...n}=e;return(0,a.kt)(u,(0,r.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"\u7ec4\u4ef6\u5f00\u53d1"},"\u7ec4\u4ef6\u5f00\u53d1"),(0,a.kt)("p",null,"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002"),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u7c7b\u578b"},"\u7ec4\u4ef6\u7c7b\u578b"),(0,a.kt)("p",null,"\u4ece\u529f\u80fd\u89d2\u5ea6\uff0c\u6211\u4eec\u5c06\u7ec4\u4ef6\u5212\u5206\u4e3a\u4e24\u7c7b:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u7cfb\u7edf\u7ec4\u4ef6,\u5982U4A\u3001TMF\u7b49,\u7ec4\u4ef6\u7684\u8fd0\u884c\u9700\u8981\u7cfb\u7edf\u7ba1\u7406\u6743\u9650")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u666e\u901a\u529f\u80fd\u7ec4\u4ef6\uff0c\u5982minio\u3001weaviate\u7b49\uff0c\u7ec4\u4ef6\u53ef\u8fd0\u884c\u5728\u4efb\u4f55",(0,a.kt)("inlineCode",{parentName:"p"},"\u79df\u6237-\u9879\u76ee"),"\u4e2d\uff0c\u6ca1\u6709\u7279\u6b8a\u9650\u5236"))),(0,a.kt)("h2",{id:"\u901a\u7528\u914d\u7f6e"},"\u901a\u7528\u914d\u7f6e"),(0,a.kt)("p",null,"\u53c2\u8003",(0,a.kt)("a",{parentName:"p",href:"https://helm.sh/docs/"},"Helm\u5b98\u65b9\u6587\u6863")),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"},"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"),(0,a.kt)("p",null,"\u4e3a\u652f\u6301\u4e0d\u540c\u7ec4\u4ef6\u5bf9\u5b89\u88c5\u4f4d\u7f6e\u3001\u6743\u9650\u7684\u53ef\u63a7\uff0c\u7279\u6b64\u989d\u5916\u7ea6\u5b9a\u4e86\u591a\u4e2a\u914d\u7f6e\u5b57\u6bb5"),(0,a.kt)("h3",{id:"chartyaml"},"Chart.yaml"),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"Chart.yaml"),"\u4e2d\u5305\u542b\u7ec4\u4ef6\u7684\u6838\u5fc3\u5b9a\u4e49\u3001\u7248\u672c\u3001\u7ef4\u62a4\u8005\u7b49\u4fe1\u606f\uff0c\u5c5e\u4e8e",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u9884\u5b9a\u4e49\u7684\u5185\u5bb9\u3002\u4e3a\u4e86\u652f\u6301\u989d\u5916\u7684\u7279\u6b8a\u9700\u6c42\uff0c\u6211\u4eec\u51b3\u5b9a\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"p"},"annotations"),"\u6765\u81ea\u7531\u5b9a\u4e49\u3002\u5982\u4e0b\u6240\u793a:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},'annotations:\n core.kubebb.k8s.com.cn/displayname: "\u5185\u6838"\n core.kubebb.k8s.com.cn/restrict-tenants: "system-tenant"\n core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"\n')),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/displayname"),": \u7528\u4e8e\u586b\u5145\u7ec4\u4ef6\u7684\u5c55\u793a\u540d\uff0c\u652f\u6301\u4e2d\u82f1\u6587"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/restrict-tenants"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u79df\u6237\uff0c\u591a\u4e2a\u79df\u6237\u9700\u8981\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"ore.kubebb.k8s.com.cn/restricted-namespaces"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u9879\u76ee/\u547d\u540d\u7a7a\u95f4\uff0c\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00")))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2437],{3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>b});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var c=r.createContext({}),p=function(e){var t=r.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=p(e.components);return r.createElement(c.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),u=p(n),d=a,b=u["".concat(c,".").concat(d)]||u[d]||m[d]||o;return n?r.createElement(b,i(i({ref:t},s),{},{components:n})):r.createElement(b,i({ref:t},s))}));function b(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=d;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l[u]="string"==typeof e?e:a,i[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>m,frontMatter:()=>o,metadata:()=>l,toc:()=>p});var r=n(7462),a=(n(7294),n(3905));const o={sidebar_position:5},i="\u7ec4\u4ef6\u5f00\u53d1",l={unversionedId:"core/userguide/component-dev",id:"core/userguide/component-dev",title:"\u7ec4\u4ef6\u5f00\u53d1",description:"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528Helm\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002",source:"@site/docs/core/userguide/component-dev.md",sourceDirName:"core/userguide",slug:"/core/userguide/component-dev",permalink:"/website/en/docs/core/userguide/component-dev",draft:!1,tags:[],version:"current",sidebarPosition:5,frontMatter:{sidebar_position:5},sidebar:"tutorialSidebar",previous:{title:"\u4ece Helm \u547d\u4ee4\u8fc1\u79fb",permalink:"/website/en/docs/core/userguide/helmtofuture"},next:{title:"\u7ec4\u4ef6\u8bc4\u7ea7",permalink:"/website/en/docs/core/rating"}},c={},p=[{value:"\u7ec4\u4ef6\u7c7b\u578b",id:"\u7ec4\u4ef6\u7c7b\u578b",level:2},{value:"\u901a\u7528\u914d\u7f6e",id:"\u901a\u7528\u914d\u7f6e",level:2},{value:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e",level:2},{value:"Chart.yaml",id:"chartyaml",level:3}],s={toc:p},u="wrapper";function m(e){let{components:t,...n}=e;return(0,a.kt)(u,(0,r.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"\u7ec4\u4ef6\u5f00\u53d1"},"\u7ec4\u4ef6\u5f00\u53d1"),(0,a.kt)("p",null,"Kubebb\u7684\u7ec4\u4ef6\u5b89\u88c5\u5305\u91c7\u7528",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u6a21\u5f0f\uff0c\u9075\u5faaHelm charts\u5f00\u53d1\u89c4\u5219\u3002\u9664\u6b64\u4e4b\u5916\uff0c\u6211\u4eec\u989d\u5916\u5b9a\u4e49\u6dfb\u52a0\u4e86\u4e00\u4e9b\u7279\u6b8a\u5b57\u6bb5\u6765\u6ee1\u8db3\u4e00\u4e9b\u7ec4\u4ef6\u7684\u7279\u6b8a\u6027\u3002"),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u7c7b\u578b"},"\u7ec4\u4ef6\u7c7b\u578b"),(0,a.kt)("p",null,"\u4ece\u529f\u80fd\u89d2\u5ea6\uff0c\u6211\u4eec\u5c06\u7ec4\u4ef6\u5212\u5206\u4e3a\u4e24\u7c7b:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u7cfb\u7edf\u7ec4\u4ef6,\u5982U4A\u3001TMF\u7b49,\u7ec4\u4ef6\u7684\u8fd0\u884c\u9700\u8981\u7cfb\u7edf\u7ba1\u7406\u6743\u9650")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"\u666e\u901a\u529f\u80fd\u7ec4\u4ef6\uff0c\u5982minio\u3001weaviate\u7b49\uff0c\u7ec4\u4ef6\u53ef\u8fd0\u884c\u5728\u4efb\u4f55",(0,a.kt)("inlineCode",{parentName:"p"},"\u79df\u6237-\u9879\u76ee"),"\u4e2d\uff0c\u6ca1\u6709\u7279\u6b8a\u9650\u5236"))),(0,a.kt)("h2",{id:"\u901a\u7528\u914d\u7f6e"},"\u901a\u7528\u914d\u7f6e"),(0,a.kt)("p",null,"\u53c2\u8003",(0,a.kt)("a",{parentName:"p",href:"https://helm.sh/docs/"},"Helm\u5b98\u65b9\u6587\u6863")),(0,a.kt)("h2",{id:"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"},"\u7ec4\u4ef6\u9ad8\u7ea7\u914d\u7f6e"),(0,a.kt)("p",null,"\u4e3a\u652f\u6301\u4e0d\u540c\u7ec4\u4ef6\u5bf9\u5b89\u88c5\u4f4d\u7f6e\u3001\u6743\u9650\u7684\u53ef\u63a7\uff0c\u7279\u6b64\u989d\u5916\u7ea6\u5b9a\u4e86\u591a\u4e2a\u914d\u7f6e\u5b57\u6bb5"),(0,a.kt)("h3",{id:"chartyaml"},"Chart.yaml"),(0,a.kt)("p",null,(0,a.kt)("inlineCode",{parentName:"p"},"Chart.yaml"),"\u4e2d\u5305\u542b\u7ec4\u4ef6\u7684\u6838\u5fc3\u5b9a\u4e49\u3001\u7248\u672c\u3001\u7ef4\u62a4\u8005\u7b49\u4fe1\u606f\uff0c\u5c5e\u4e8e",(0,a.kt)("inlineCode",{parentName:"p"},"Helm"),"\u9884\u5b9a\u4e49\u7684\u5185\u5bb9\u3002\u4e3a\u4e86\u652f\u6301\u989d\u5916\u7684\u7279\u6b8a\u9700\u6c42\uff0c\u6211\u4eec\u51b3\u5b9a\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"p"},"annotations"),"\u6765\u81ea\u7531\u5b9a\u4e49\u3002\u5982\u4e0b\u6240\u793a:"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-yaml"},'annotations:\n core.kubebb.k8s.com.cn/displayname: "\u5185\u6838"\n core.kubebb.k8s.com.cn/restricted-tenants: "system-tenant"\n core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"\n')),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/displayname"),": \u7528\u4e8e\u586b\u5145\u7ec4\u4ef6\u7684\u5c55\u793a\u540d\uff0c\u652f\u6301\u4e2d\u82f1\u6587"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"core.kubebb.k8s.com.cn/restrict-tenants"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u79df\u6237\uff0c\u591a\u4e2a\u79df\u6237\u9700\u8981\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"ore.kubebb.k8s.com.cn/restricted-namespaces"),": \u7528\u4e8e\u8bbe\u7f6e\u7ec4\u4ef6\u5b89\u88c5\u4f4d\u7f6e\u7684\u9650\u5236\u9879\u76ee/\u547d\u540d\u7a7a\u95f4\uff0c\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u901a\u8fc7",(0,a.kt)("inlineCode",{parentName:"li"},","),"\u9694\u5f00")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/en/assets/js/runtime~main.b916e1e7.js b/en/assets/js/runtime~main.8803d87f.js similarity index 99% rename from en/assets/js/runtime~main.b916e1e7.js rename to en/assets/js/runtime~main.8803d87f.js index 226731c0b..abb640cc8 100644 --- a/en/assets/js/runtime~main.b916e1e7.js +++ b/en/assets/js/runtime~main.8803d87f.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,d,c,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=b,r.c=t,e=[],r.O=(a,f,d,c)=>{if(!f){var b=1/0;for(i=0;i=c)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[f,d,c]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,d){if(1&d&&(e=this(e)),8&d)return e;if("object"==typeof e&&e){if(4&d&&e.__esModule)return e;if(16&d&&"function"==typeof e.then)return e}var c=Object.create(null);r.r(c);var b={};a=a||[null,f({}),f([]),f(f)];for(var t=2&d&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(c,b),c},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",99:"3d36d8f8",277:"c4348237",406:"ead696ea",813:"5d9ef359",948:"8717b14a",1285:"5c4c6e60",1352:"1a244cdc",1383:"449dbafe",1505:"7c2ff145",1599:"6dc07dca",1649:"8d745e6e",1808:"2aa8b0d3",1914:"d9f32620",1915:"a9a78b31",2217:"8aa54216",2237:"acf368fd",2267:"59362658",2352:"4a2fc361",2362:"e273c56f",2437:"f665e660",2535:"814f3328",2774:"22646bfb",3085:"1f391b9e",3089:"a6aa9e1f",3494:"58c97f69",3497:"c650a001",3514:"73664a40",3608:"9e4087bc",3998:"e83fc973",4013:"01a85c17",4193:"c4f5d8e4",4195:"59db27b9",4288:"ad895e75",4776:"552535a5",4823:"cd7e3398",5062:"69369ae2",5070:"62fa1b0b",5093:"b0dde2ea",5513:"6f9ac6d0",5604:"4c747d7c",5662:"3c7910a2",5680:"ff43d3e1",5819:"809ee581",6004:"a76bfff0",6070:"27a2546e",6103:"ccc49370",6210:"180ea7ad",6287:"e90a2c2a",6306:"1d408bef",6320:"647f233e",6513:"1bba06cb",6741:"791926a5",6890:"41ebaef0",7133:"f03e9ca9",7331:"e6809710",7414:"393be207",7492:"81fe2174",7494:"da46f5e6",7527:"f345e2d0",7530:"a95e9274",7745:"085a15b4",7918:"17896441",7960:"ef6edb73",8288:"3fabc7c4",8610:"6875c492",8636:"f4f34a3a",8841:"b46b210f",8916:"e0f3ca4f",8994:"9546be45",9003:"925b3f96",9067:"c4afd168",9243:"9115b1fc",9449:"3724ddc1",9514:"1be78505",9575:"a5ddeb9f",9594:"a650ca47",9642:"7661071f",9671:"0e384e19",9723:"9684cbb9",9735:"4ba7e5a3",9748:"22167790",9817:"14eb3368",9889:"f1f0d3d7"}[e]||e)+"."+{53:"ad4e0915",99:"6347763c",210:"4d2f5804",277:"4c140266",406:"94d463bb",813:"ad02cc8b",948:"269a85c1",1285:"50fe42eb",1352:"73b27d4e",1383:"c313d4a9",1505:"d5ffaf36",1599:"3a4880a7",1649:"8eda03d8",1808:"f5bf5a84",1914:"2f150157",1915:"a4df916c",2217:"91246ac1",2237:"c271512a",2267:"d3cf6c5a",2352:"4b2855a7",2362:"04a690f5",2437:"23c86fe1",2529:"da2bcb01",2535:"bee19867",2774:"42e0ad33",3085:"f8464388",3089:"1e1af270",3494:"e3e64ba6",3497:"dad3a104",3514:"5501e765",3608:"9a815895",3998:"522054a4",4013:"5653d10a",4193:"0db3bdb7",4195:"0d352cf7",4288:"2b05fa55",4776:"aa34f3f9",4823:"9ee2921e",4972:"9374abde",5062:"f229d6b9",5070:"868a5eef",5093:"767fbd9f",5513:"919f99df",5604:"322f0451",5662:"7a5c2910",5680:"ff8c6bf7",5819:"e1a0e29e",6004:"d74d0a39",6070:"1e485dfc",6103:"5cfe080a",6210:"003bea24",6287:"c67a8e85",6306:"107a45a6",6320:"468a3349",6513:"e1ef867f",6741:"967dafe3",6890:"c7361bf9",7133:"aee63f33",7331:"3bd3b7c3",7414:"c124ecc0",7492:"6bbee95d",7494:"c2895288",7527:"dc981ed5",7530:"2bbc7b5a",7745:"9d13ed3e",7918:"bacd5894",7960:"b2e20e23",8288:"2065d837",8610:"da158881",8636:"23a4200e",8841:"3de754e4",8916:"122704cc",8994:"1bf24f03",9003:"c4db91ca",9067:"9e952f7b",9243:"42ec6c7b",9449:"b9ba6373",9514:"82b3557a",9575:"125052c9",9594:"f08c628d",9642:"3fa71cce",9671:"fa1c515d",9723:"2d2b8230",9735:"9e98a33d",9748:"9d78aa76",9817:"3bb53ce2",9889:"9a314588"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),d={},c="website:",r.l=(e,a,f,b)=>{if(d[e])d[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var c=d[e];if(delete d[e],t.parentNode&&t.parentNode.removeChild(t),c&&c.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/website/en/",r.gca=function(e){return e={17896441:"7918",22167790:"9748",59362658:"2267","935f2afb":"53","3d36d8f8":"99",c4348237:"277",ead696ea:"406","5d9ef359":"813","8717b14a":"948","5c4c6e60":"1285","1a244cdc":"1352","449dbafe":"1383","7c2ff145":"1505","6dc07dca":"1599","8d745e6e":"1649","2aa8b0d3":"1808",d9f32620:"1914",a9a78b31:"1915","8aa54216":"2217",acf368fd:"2237","4a2fc361":"2352",e273c56f:"2362",f665e660:"2437","814f3328":"2535","22646bfb":"2774","1f391b9e":"3085",a6aa9e1f:"3089","58c97f69":"3494",c650a001:"3497","73664a40":"3514","9e4087bc":"3608",e83fc973:"3998","01a85c17":"4013",c4f5d8e4:"4193","59db27b9":"4195",ad895e75:"4288","552535a5":"4776",cd7e3398:"4823","69369ae2":"5062","62fa1b0b":"5070",b0dde2ea:"5093","6f9ac6d0":"5513","4c747d7c":"5604","3c7910a2":"5662",ff43d3e1:"5680","809ee581":"5819",a76bfff0:"6004","27a2546e":"6070",ccc49370:"6103","180ea7ad":"6210",e90a2c2a:"6287","1d408bef":"6306","647f233e":"6320","1bba06cb":"6513","791926a5":"6741","41ebaef0":"6890",f03e9ca9:"7133",e6809710:"7331","393be207":"7414","81fe2174":"7492",da46f5e6:"7494",f345e2d0:"7527",a95e9274:"7530","085a15b4":"7745",ef6edb73:"7960","3fabc7c4":"8288","6875c492":"8610",f4f34a3a:"8636",b46b210f:"8841",e0f3ca4f:"8916","9546be45":"8994","925b3f96":"9003",c4afd168:"9067","9115b1fc":"9243","3724ddc1":"9449","1be78505":"9514",a5ddeb9f:"9575",a650ca47:"9594","7661071f":"9642","0e384e19":"9671","9684cbb9":"9723","4ba7e5a3":"9735","14eb3368":"9817",f1f0d3d7:"9889"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var d=r.o(e,a)?e[a]:void 0;if(0!==d)if(d)f.push(d[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var c=new Promise(((f,c)=>d=e[a]=[f,c]));f.push(d[2]=c);var b=r.p+r.u(a),t=new Error;r.l(b,(f=>{if(r.o(e,a)&&(0!==(d=e[a])&&(e[a]=void 0),d)){var c=f&&("load"===f.type?"missing":f.type),b=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+c+": "+b+")",t.name="ChunkLoadError",t.type=c,t.request=b,d[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var d,c,b=f[0],t=f[1],o=f[2],n=0;if(b.some((a=>0!==e[a]))){for(d in t)r.o(t,d)&&(r.m[d]=t[d]);if(o)var i=o(r)}for(a&&a(f);n{"use strict";var e,a,f,d,c,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=b,r.c=t,e=[],r.O=(a,f,d,c)=>{if(!f){var b=1/0;for(i=0;i=c)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[f,d,c]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,d){if(1&d&&(e=this(e)),8&d)return e;if("object"==typeof e&&e){if(4&d&&e.__esModule)return e;if(16&d&&"function"==typeof e.then)return e}var c=Object.create(null);r.r(c);var b={};a=a||[null,f({}),f([]),f(f)];for(var t=2&d&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(c,b),c},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",99:"3d36d8f8",277:"c4348237",406:"ead696ea",813:"5d9ef359",948:"8717b14a",1285:"5c4c6e60",1352:"1a244cdc",1383:"449dbafe",1505:"7c2ff145",1599:"6dc07dca",1649:"8d745e6e",1808:"2aa8b0d3",1914:"d9f32620",1915:"a9a78b31",2217:"8aa54216",2237:"acf368fd",2267:"59362658",2352:"4a2fc361",2362:"e273c56f",2437:"f665e660",2535:"814f3328",2774:"22646bfb",3085:"1f391b9e",3089:"a6aa9e1f",3494:"58c97f69",3497:"c650a001",3514:"73664a40",3608:"9e4087bc",3998:"e83fc973",4013:"01a85c17",4193:"c4f5d8e4",4195:"59db27b9",4288:"ad895e75",4776:"552535a5",4823:"cd7e3398",5062:"69369ae2",5070:"62fa1b0b",5093:"b0dde2ea",5513:"6f9ac6d0",5604:"4c747d7c",5662:"3c7910a2",5680:"ff43d3e1",5819:"809ee581",6004:"a76bfff0",6070:"27a2546e",6103:"ccc49370",6210:"180ea7ad",6287:"e90a2c2a",6306:"1d408bef",6320:"647f233e",6513:"1bba06cb",6741:"791926a5",6890:"41ebaef0",7133:"f03e9ca9",7331:"e6809710",7414:"393be207",7492:"81fe2174",7494:"da46f5e6",7527:"f345e2d0",7530:"a95e9274",7745:"085a15b4",7918:"17896441",7960:"ef6edb73",8288:"3fabc7c4",8610:"6875c492",8636:"f4f34a3a",8841:"b46b210f",8916:"e0f3ca4f",8994:"9546be45",9003:"925b3f96",9067:"c4afd168",9243:"9115b1fc",9449:"3724ddc1",9514:"1be78505",9575:"a5ddeb9f",9594:"a650ca47",9642:"7661071f",9671:"0e384e19",9723:"9684cbb9",9735:"4ba7e5a3",9748:"22167790",9817:"14eb3368",9889:"f1f0d3d7"}[e]||e)+"."+{53:"ad4e0915",99:"6347763c",210:"4d2f5804",277:"4c140266",406:"94d463bb",813:"ad02cc8b",948:"269a85c1",1285:"50fe42eb",1352:"73b27d4e",1383:"c313d4a9",1505:"d5ffaf36",1599:"3a4880a7",1649:"8eda03d8",1808:"f5bf5a84",1914:"2f150157",1915:"a4df916c",2217:"91246ac1",2237:"c271512a",2267:"d3cf6c5a",2352:"4b2855a7",2362:"04a690f5",2437:"fd257e44",2529:"da2bcb01",2535:"bee19867",2774:"42e0ad33",3085:"f8464388",3089:"1e1af270",3494:"e3e64ba6",3497:"dad3a104",3514:"5501e765",3608:"9a815895",3998:"522054a4",4013:"5653d10a",4193:"0db3bdb7",4195:"0d352cf7",4288:"2b05fa55",4776:"aa34f3f9",4823:"9ee2921e",4972:"9374abde",5062:"f229d6b9",5070:"868a5eef",5093:"767fbd9f",5513:"919f99df",5604:"322f0451",5662:"7a5c2910",5680:"ff8c6bf7",5819:"e1a0e29e",6004:"d74d0a39",6070:"1e485dfc",6103:"5cfe080a",6210:"003bea24",6287:"c67a8e85",6306:"107a45a6",6320:"468a3349",6513:"e1ef867f",6741:"967dafe3",6890:"c7361bf9",7133:"aee63f33",7331:"3bd3b7c3",7414:"c124ecc0",7492:"6bbee95d",7494:"c2895288",7527:"dc981ed5",7530:"2bbc7b5a",7745:"9d13ed3e",7918:"bacd5894",7960:"b2e20e23",8288:"2065d837",8610:"da158881",8636:"23a4200e",8841:"3de754e4",8916:"122704cc",8994:"1bf24f03",9003:"c4db91ca",9067:"9e952f7b",9243:"42ec6c7b",9449:"b9ba6373",9514:"82b3557a",9575:"125052c9",9594:"f08c628d",9642:"3fa71cce",9671:"fa1c515d",9723:"2d2b8230",9735:"9e98a33d",9748:"9d78aa76",9817:"3bb53ce2",9889:"9a314588"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),d={},c="website:",r.l=(e,a,f,b)=>{if(d[e])d[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var c=d[e];if(delete d[e],t.parentNode&&t.parentNode.removeChild(t),c&&c.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/website/en/",r.gca=function(e){return e={17896441:"7918",22167790:"9748",59362658:"2267","935f2afb":"53","3d36d8f8":"99",c4348237:"277",ead696ea:"406","5d9ef359":"813","8717b14a":"948","5c4c6e60":"1285","1a244cdc":"1352","449dbafe":"1383","7c2ff145":"1505","6dc07dca":"1599","8d745e6e":"1649","2aa8b0d3":"1808",d9f32620:"1914",a9a78b31:"1915","8aa54216":"2217",acf368fd:"2237","4a2fc361":"2352",e273c56f:"2362",f665e660:"2437","814f3328":"2535","22646bfb":"2774","1f391b9e":"3085",a6aa9e1f:"3089","58c97f69":"3494",c650a001:"3497","73664a40":"3514","9e4087bc":"3608",e83fc973:"3998","01a85c17":"4013",c4f5d8e4:"4193","59db27b9":"4195",ad895e75:"4288","552535a5":"4776",cd7e3398:"4823","69369ae2":"5062","62fa1b0b":"5070",b0dde2ea:"5093","6f9ac6d0":"5513","4c747d7c":"5604","3c7910a2":"5662",ff43d3e1:"5680","809ee581":"5819",a76bfff0:"6004","27a2546e":"6070",ccc49370:"6103","180ea7ad":"6210",e90a2c2a:"6287","1d408bef":"6306","647f233e":"6320","1bba06cb":"6513","791926a5":"6741","41ebaef0":"6890",f03e9ca9:"7133",e6809710:"7331","393be207":"7414","81fe2174":"7492",da46f5e6:"7494",f345e2d0:"7527",a95e9274:"7530","085a15b4":"7745",ef6edb73:"7960","3fabc7c4":"8288","6875c492":"8610",f4f34a3a:"8636",b46b210f:"8841",e0f3ca4f:"8916","9546be45":"8994","925b3f96":"9003",c4afd168:"9067","9115b1fc":"9243","3724ddc1":"9449","1be78505":"9514",a5ddeb9f:"9575",a650ca47:"9594","7661071f":"9642","0e384e19":"9671","9684cbb9":"9723","4ba7e5a3":"9735","14eb3368":"9817",f1f0d3d7:"9889"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var d=r.o(e,a)?e[a]:void 0;if(0!==d)if(d)f.push(d[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var c=new Promise(((f,c)=>d=e[a]=[f,c]));f.push(d[2]=c);var b=r.p+r.u(a),t=new Error;r.l(b,(f=>{if(r.o(e,a)&&(0!==(d=e[a])&&(e[a]=void 0),d)){var c=f&&("load"===f.type?"missing":f.type),b=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+c+": "+b+")",t.name="ChunkLoadError",t.type=c,t.request=b,d[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var d,c,b=f[0],t=f[1],o=f[2],n=0;if(b.some((a=>0!==e[a]))){for(d in t)r.o(t,d)&&(r.m[d]=t[d]);if(o)var i=o(r)}for(a&&a(f);n Archive | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/blog/first-blog-post/index.html b/en/blog/first-blog-post/index.html index cf1929454..4a9c9b636 100644 --- a/en/blog/first-blog-post/index.html +++ b/en/blog/first-blog-post/index.html @@ -5,13 +5,13 @@ First Blog Post | Framework as a Building Block for Kubernetes - +

    First Blog Post

    · One min read
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/en/blog/index.html b/en/blog/index.html index d868be314..6b059b704 100644 --- a/en/blog/index.html +++ b/en/blog/index.html @@ -5,13 +5,13 @@ Blog | Framework as a Building Block for Kubernetes - +

    · One min read
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    · One min read
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/en/blog/long-blog-post/index.html b/en/blog/long-blog-post/index.html index 38e9786dc..184606836 100644 --- a/en/blog/long-blog-post/index.html +++ b/en/blog/long-blog-post/index.html @@ -5,13 +5,13 @@ Long Blog Post | Framework as a Building Block for Kubernetes - +

    Long Blog Post

    · 3 min read
    Endilie Yacop Sucipto

    This is the summary of a very long blog post,

    Use a <!-- truncate --> comment to limit blog post size in the list view.

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/en/blog/mdx-blog-post/index.html b/en/blog/mdx-blog-post/index.html index 6028471d7..19fe6c1d3 100644 --- a/en/blog/mdx-blog-post/index.html +++ b/en/blog/mdx-blog-post/index.html @@ -5,13 +5,13 @@ MDX Blog Post | Framework as a Building Block for Kubernetes - +

    MDX Blog Post

    · One min read
    Sébastien Lorber

    Blog posts support Docusaurus Markdown features, such as MDX.

    tip

    Use the power of React to create interactive blog posts.

    <button onClick={() => alert('button clicked!')}>Click me!</button>
    - + \ No newline at end of file diff --git a/en/blog/tags/docusaurus/index.html b/en/blog/tags/docusaurus/index.html index f1526fc0a..04be0791b 100644 --- a/en/blog/tags/docusaurus/index.html +++ b/en/blog/tags/docusaurus/index.html @@ -5,13 +5,13 @@ 4 posts tagged with "docusaurus" | Framework as a Building Block for Kubernetes - +

    4 posts tagged with "docusaurus"

    View All Tags

    · One min read
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    · One min read
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/en/blog/tags/facebook/index.html b/en/blog/tags/facebook/index.html index ed31a539b..9cb33d907 100644 --- a/en/blog/tags/facebook/index.html +++ b/en/blog/tags/facebook/index.html @@ -5,13 +5,13 @@ One post tagged with "facebook" | Framework as a Building Block for Kubernetes - +

    One post tagged with "facebook"

    View All Tags

    · One min read
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    - + \ No newline at end of file diff --git a/en/blog/tags/hello/index.html b/en/blog/tags/hello/index.html index fc084e989..18310f269 100644 --- a/en/blog/tags/hello/index.html +++ b/en/blog/tags/hello/index.html @@ -5,13 +5,13 @@ 2 posts tagged with "hello" | Framework as a Building Block for Kubernetes - +

    2 posts tagged with "hello"

    View All Tags

    · One min read
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    - + \ No newline at end of file diff --git a/en/blog/tags/hola/index.html b/en/blog/tags/hola/index.html index c67511f5e..4aa6acd72 100644 --- a/en/blog/tags/hola/index.html +++ b/en/blog/tags/hola/index.html @@ -5,13 +5,13 @@ One post tagged with "hola" | Framework as a Building Block for Kubernetes - +

    One post tagged with "hola"

    View All Tags

    · One min read
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/en/blog/tags/index.html b/en/blog/tags/index.html index 3303369a8..de9e07b38 100644 --- a/en/blog/tags/index.html +++ b/en/blog/tags/index.html @@ -5,13 +5,13 @@ Tags | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/blog/welcome/index.html b/en/blog/welcome/index.html index 3438a003d..56de30209 100644 --- a/en/blog/welcome/index.html +++ b/en/blog/welcome/index.html @@ -5,13 +5,13 @@ Welcome | Framework as a Building Block for Kubernetes - +

    Welcome

    · One min read
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    - + \ No newline at end of file diff --git a/en/docs/FAQ/index.html b/en/docs/FAQ/index.html index 150f6b7f9..57ff4932d 100644 --- a/en/docs/FAQ/index.html +++ b/en/docs/FAQ/index.html @@ -5,13 +5,13 @@ 常见问题 | Framework as a Building Block for Kubernetes - +

    常见问题

    BuildingBase相关

    部署问题

    1. 租户管理控制器probe liveness校验失败,导致无法启动

    • 环境
      • Azure VM (Ubuntu 22.04)

    问题重现

    运行helm install --wait -n u4a-system u4a-component .后,发现租户管理控制器一直处于CrashLoopBackOff状态:

    ➜  ~ k get pods -nu4a-system
    NAME READY STATUS RESTARTS AGE
    bff-server-9cc54cbc5-gcp6x 1/1 Running 0 17m
    capsule-controller-manager-5b9864f9bf-7mkhb 0/1 CrashLoopBackOff 8 (27s ago) 17m
    cert-manager-79d7998d9-c7q8n 1/1 Running 0 33m
    cert-manager-cainjector-57bb7f44dd-c9sj6 1/1 Running 0 33m
    cert-manager-webhook-65b494ccf4-4blzx 1/1 Running 0 33m
    cluster-component-ingress-nginx-controller-86d6bfdbf6-qj6hf 1/1 Running 0 33m
    kube-oidc-proxy-fc6b54b8c-ddc2s 1/1 Running 0 17m
    oidc-server-84cbfcc9f5-bmmf9 2/2 Running 0 17m
    resource-view-controller-94645667-ttvst 1/1 Running 0 17m

    查看日志发现如下错误:

    Events:
    Type Reason Age From Message
    ---- ------ ---- ---- -------
    Normal Scheduled 4m35s default-scheduler Successfully assigned u4a-system/capsule-controller-manager-5b9864f9bf-7mkhb to kubebb-core-worker
    Normal Pulling 4m34s kubelet Pulling image "hub.tenxcloud.com/u4a-component/capsule:v0.1.2-20221122"
    Normal Pulled 3m57s kubelet Successfully pulled image "hub.tenxcloud.com/u4a-component/capsule:v0.1.2-20221122" in 36.616134797s
    Warning Unhealthy 3m53s kubelet Liveness probe failed: Get "http://10.244.1.6:10080/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
    Warning Unhealthy 3m44s kubelet Readiness probe failed: Get "http://10.244.1.6:10080/readyz": dial tcp 10.244.1.6:10080: connect: connection refused
    Warning Unhealthy 3m44s kubelet Liveness probe failed: Get "http://10.244.1.6:10080/healthz": dial tcp 10.244.1.6:10080: connect: connection refused
    Normal Created 3m34s (x3 over 3m57s) kubelet Created container manager
    Normal Started 3m34s (x3 over 3m57s) kubelet Started container manager
    Warning Unhealthy 3m31s (x7 over 3m55s) kubelet Readiness probe failed: Get "http://10.244.1.6:10080/readyz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
    Warning BackOff 3m13s (x4 over 3m42s) kubelet Back-off restarting failed container
    Normal Pulled 3m1s (x3 over 3m50s) kubelet Container image "hub.tenxcloud.com/u4a-component/capsule:v0.1.2-20221122" already present on machine

    问题原因

    感谢 @0xff-dev 提供的解决方案

    去除liveness probe后,能看到容器报错,发现是由于capsule初始化过程中超出了操作系统允许的最大文件打开数,导致capsule无法启动。

    解决方案

    感谢 @0xff-dev 提供的解决方案,需设置 fs.inotify.max_user_instances=81920

    由微软的New Bing解答得到的答案:

    fs.inotify.max_user_instances 是一个内核参数,它表示每个用户可以创建的 inotify 实例的最大数量。inotify 是一个用来监视文件系统变化的机制。如果你想要修改这个参数,你有两种方法:

    • 临时修改:你可以使用 sysctl -w 命令来临时修改这个参数,例如:
    sudo sysctl -w fs.inotify.max_user_instances=81920

    这个命令会把 fs.inotify.max_user_instances 的值设置为 81920,并写入/proc/sys/fs/inotify/max_user_instances 文件。但是这个修改只在重启之前有效,重启后会恢复默认值。

    • 永久修改:你可以在 /etc/sysctl.conf 文件中添加一行:
    fs.inotify.max_user_instances=81920

    然后运行 sudo sysctl -p命令来加载这个文件中的设置。这样,这个修改就会在每次重启后生效。

    如果你想了解更多关于 sysctl 和 fs.inotify.max_user_instances 的信息,请参考以下链接:

    - + \ No newline at end of file diff --git a/en/docs/building-base/add-cluster/index.html b/en/docs/building-base/add-cluster/index.html index 8ab47bb0b..c112f2c87 100644 --- a/en/docs/building-base/add-cluster/index.html +++ b/en/docs/building-base/add-cluster/index.html @@ -5,13 +5,13 @@ 添加集群 | Framework as a Building Block for Kubernetes - +

    添加集群

    1. 为集群管理创建一个 namespace,可以使用 cluster-system,用来保存集群信息
    kubectl create ns cluster-system
    1. 获取添加集群的 token
    export TOKENNAME=$(kubectl get serviceaccount/host-cluster-reader -n u4a-system -o jsonpath='{.secrets[0].name}')
    kubectl get secret $TOKENNAME -n u4a-system -o jsonpath='{.data.token}' | base64 -d
    1. 登录管理平台,进入 “集群管理”,参考 安装底座,点击“添加集群”。

    2. 输入集群名称,按需修改集群后缀,这里使用“API Token”方式接入集群。

    • API Host,使用支持 OIDC 协议的 K8s API 地址,可以通过 kubectl get ingress -nu4a-system 查看kube-oidc-proxy-server-ingress 对应的 Host 信息,比如 https://k8s.172.22.96.136.nip.io(注意结尾不要有 /)
    • API Token,输入第 2 步获取的 token 信息
    1. 添加成功后,可以在列表上看到集群信息及其状态;选择“租户管理”,会看到名称为 "system-tenant" 的一个系统租户
    - + \ No newline at end of file diff --git a/en/docs/building-base/configuration/3rd-party-account/index.html b/en/docs/building-base/configuration/3rd-party-account/index.html index eb1220b22..b0557d990 100644 --- a/en/docs/building-base/configuration/3rd-party-account/index.html +++ b/en/docs/building-base/configuration/3rd-party-account/index.html @@ -5,7 +5,7 @@ 使用第三方系统登录 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ 图 1

  • 在左侧菜单导航处选择“应用”,然后在新建应用中,填写必要信息:

    • 名称:根据需要填写
    • 回调地址:<u4a-component 地址>/oidc/callback
    • 范围:勾选 read_user 和 openid 图 2
    1. 保存应用,点击页面下刚刚保存的应用,得到下图。 图 3

    记录 应用程序ID(clientid)、密码(clientsecret) 以及 回调 URL(redirecturl),需要在oidc-server 端进行相应配置。

    1. 编辑 oidc-server 的配置文件,增加一种新的 connector 配置,kubectl edit cm oidc-server -n u4a-system,按以下示例进行配置:
    connectors:
    - type: k8scrd
    ...
    - type: gitlab ## 固定值 gitlab
    name: gitlab ## 名称
    id: gitlab ## 固定值 gitlab
    config:
    baseURL: http://gitlab.172.22.50.155.nip.io ## gitlab对外访问的地址
    clientID: ef2b579e5b4c1cf9ae5b0b2acb166271ebff5892e84aa113689d4646ffcb29e7 ## gitlab配置的应用的clientID, 可在上一步骤”配置gitlab“中获取
    clientSecret: 3a9e79368a70bcdf1e4ac1df64e4220e7af798876333c9642a8edb782e6eb558 ## gitlab配置的应用的密码, 可在上一步骤”配置gitlab“中获取
    redirectURI: https://portal.172.22.96.209.nip.io/oidc/callback ## gitlab配置的callback地址, 可在上一步骤”配置gitlab“中获取
    1. 开启第三方系统对接,目前支持 gitlab/github 两种,仅需要将对应的 enabled 字段设置为 true 即可。
    • kubectl edit connector3rd connector3rd
    kind: Connector3rd
    metadata:
    annotations:
    helm.sh/hook: post-install,post-upgrade
    helm.sh/hook-weight: "-5"
    name: connector3rd
    spec:
    connectors:
    - description: gitlab description
    enabled: false # 修改为 true
    icon: <保持不变>
    id: gitlab
    name: gitlab
    - description: github description
    enabled: false
    icon: <保持不变>
    id: github
    name: github
    1. 配置完毕后,重启 oidc-server 服务即可生效,此时再次访问登录页面,既可看到对应的登录图标。

    图 4

    - + \ No newline at end of file diff --git a/en/docs/building-base/configuration/audit-config/index.html b/en/docs/building-base/configuration/audit-config/index.html index 95c16d399..3a4feaa5e 100644 --- a/en/docs/building-base/configuration/audit-config/index.html +++ b/en/docs/building-base/configuration/audit-config/index.html @@ -5,14 +5,14 @@ 配置审计能力 | Framework as a Building Block for Kubernetes - +

    配置审计能力

    tip

    注意,审计功能依赖对审计日志的采集功能,需要在集群设置中配置日志服务 ElasticSearch 的地址,日志服务的配置参考日志组件

    1、编辑 audit-policy.yaml 文件对审计进行配置,kubectl edit cm audit-policy-conf -n u4a-system

    按照以下模式进行定义:

    apiVersion: audit.k8s.io/v1beta1
    kind: Policy
    # Don't generate audit events for all requests in RequestReceived stage.
    omitStages:
    - "RequestReceived"
    rules:
    # Don't audit namespace: kube-system/cluster-system/system-bigdata
    - level: None
    resources:
    - group: "" # core API group
    resources: ["secrets", "configmaps"]
    namespaces: ["kube-system", "cluster-system", "system-bigdata"]
    # Only enable 'write' verbs audit log for secret and configmap
    - level: Metadata
    verbs: ["create","delete","deletecollection","patch","update"]
    resources:
    - group: "" # core API group
    resources: ["secrets", "configmaps"]

    各个组件,需要根据自己的资源类型,来针对性的将自己的资源添加到审计的 rule 列表中,默认对写操作进行审计即可。

    2、配置 kube-apiserver 参数(也可以在 kube-oidc-proxy 上进行类似的配置,在无法操作原有 K8S 集群的条件下,使用 kube-oidc-proxy 的配置),添加:

    # 审计策略配置文件地址
    - --audit-policy-file=/etc/kubernetes/pki/audit-policy.yaml
    # 审计日志的路径文件
    - --audit-log-path=/var/log/apiserver/audit/audit.log
    # 日志保存策略
    - --audit-log-maxage=7
    - --audit-log-maxbackup=10
    - --audit-log-maxsize=10

    如果是在 K8S 上直接修改,则需要修改 /etc/kubernetes/manifests/kube-apiserver.yaml 来添加该参数,修改后确认 kube-apiserver 容器发生重启,此时配置才能生效

    3、配置 fluentd 进行采集(索引模版跟原来的 fluentd 冲突,所以需要单独的fluentd 进程专门采集 master 上的审计日志。 目前测试方法,从原有的 fluentd daemonset 复制一个,修改 ds 的名字,并使用以下配置文件进行挂载即可:

    apiVersion: v1
    data:
    fluent.conf: |2
    # for audit log
    <source>
    @type tail
    @id in_tail_kube_apiserver_audit
    multiline_flush_interval 5s
    path /var/log/apiserver/audit/audit.log
    pos_file /var/log/kube-apiserver-audit.log.pos
    tag kube-apiserver-audit
    <parse>
    @type json
    keep_time_key true
    time_key timestamp
    time_format %Y-%m-%dT%T.%L%Z
    </parse>
    </source>

    ## Used for health check
    <source>
    @type http
    port 9880
    bind 0.0.0.0
    </source>

    ## sink all log to elasticsearch directly
    <match **>
    @type elasticsearch
    @log_level debug
    include_tag_key true
    host elasticsearch-logging
    port 9200
    user "#{ENV['ES_USERNAME']}"
    password "#{ENV['ES_PASSWORD']}"
    scheme "#{ENV['ES_SCHEME']}"
    ca_file /etc/fluent/certs/ca.crt
    logstash_prefix audit-k8s
    logstash_format true
    # Set the chunk limit the same as for fluentd-gcp.
    reload_on_failure true
    reconnect_on_error true
    request_timeout 120s
    <buffer>
    @type file
    path /var/log/td-agent/buffer/elasticsearch
    chunk_limit_size 15MB
    total_limit_size 20GB
    flush_interval 3s
    flush_thread_count 8
    flush_mode interval
    # Never wait longer than 5 minutes between retries.
    retry_timeout 300
    retry_forever true
    </buffer>
    </match>
    kind: ConfigMap
    metadata:
    labels:
    component: fluentd
    k8s-app: fluentd
    # 新的 fluentd 使用这个 configmap
    name: fluentd-audit
    namespace: kube-system

    注意:

    • 新的 fluentd daemonset 只需要启动在 kube-oidc-proxy 部署的节点上(通过节点亲和性配置)
    • 将使用的 configmap 修改为上面的,名字 fluentd-audit

    4、最终在 es 里的审计记录格式如下,可以通过查询 es 来获取审计日志

    {
    "_index": "logstash-2022.08.20",
    "_type": "fluentd",
    "_id": "iRzLu4IBqmHyli33qpUa",
    "_version": 1,
    "_score": null,
    "_source": {
    "kind": "Event",
    "apiVersion": "audit.k8s.io/v1",
    "level": "Metadata",
    "auditID": "7c0072aa-b48a-4772-bc52-42e50c0e65ce",
    "stage": "ResponseComplete",
    "requestURI": "/api/v1/namespaces/addon-system/configmaps/42c733ea.clastix.capsule.io",
    "verb": "update",
    "user": {
    "username": "system:serviceaccount:addon-system:default",
    "uid": "c8cb442d-853c-4a53-9c83-c7a1520095c4",
    "groups": [
    "system:serviceaccounts",
    "system:serviceaccounts:addon-system",
    "system:authenticated"
    ]
    },
    "sourceIPs": [
    "172.22.96.146"
    ],
    "userAgent": "manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election",
    "objectRef": {
    "resource": "configmaps",
    "namespace": "addon-system",
    "name": "42c733ea.clastix.capsule.io",
    "uid": "c4542d46-0e07-41be-8420-f912a2918e51",
    "apiVersion": "v1",
    "resourceVersion": "236579314"
    },
    "responseStatus": {
    "metadata": {},
    "code": 200
    },
    "requestReceivedTimestamp": "2022-08-20T15:07:41.991582Z",
    "stageTimestamp": "2022-08-20T15:07:42.000098Z",
    "annotations": {
    "authentication.k8s.io/legacy-token": "system:serviceaccount:addon-system:default",
    "authorization.k8s.io/decision": "allow",
    "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"t7d.io.capsule-manager-rolebinding\" of ClusterRole \"cluster-admin\" to ServiceAccount \"default/addon-system\""
    },
    "@timestamp": "2022-08-20T15:07:42.000871648+00:00",
    "tag": "kube-apiserver-audit"
    },
    "fields": {
    "requestReceivedTimestamp": [
    "2022-08-20T15:07:41.991Z"
    ],
    "stageTimestamp": [
    "2022-08-20T15:07:42.000Z"
    ],
    "@timestamp": [
    "2022-08-20T15:07:42.000Z"
    ]
    },
    "sort": [
    1661008062000
    ]
    }

    5、如果不是标准的 K8S 资源类型的审计,应用/服务可以自己将审计写入到 /var/log/apiserver/audit 目录,比如命名为 service-audit.log,格式符合 K8S audit 消息格式即可,参考以下单条审计记录的格式:

    {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"8d8d8163-54e8-457d-94f6-7851e887f3cf","stage":"ResponseComplete","requestURI":"/api/v1/namespaces/u4a-system/secrets/sh.helm.release.v1.u4a-system.v1","verb":"delete","user":{"username":"admin","groups":["system:nodes","iam.tenxcloud.com"]},"sourceIPs":["172.16.31.254"],"userAgent":"helm/v0.0.0 (darwin/amd64) kubernetes/$Format","objectRef":{"resource":"secrets","namespace":"u4a-system","name":"sh.helm.release.v1.u4a-system.v1","apiVersion":"v1"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2022-08-24T05:36:36.524760Z","stageTimestamp":"2022-08-24T05:36:36.529890Z"}

    其中,重点关注的数据如下:

    1)kind,统一用 ”Event“,查询只查询 Event 的审计

    2)stage:目前都是“ResponseComplete“,只记录了响应结束的时间,没记录请求收到的时间

    3)verb:增删改查

    4)user.username,操作人

    5)sourceIPs:客户端 IP

    6)objectRef.resource:操作的资源

    7)objectRef.namespace:操作的项目/namespace

    8)responseStatus.code:返回代码

    9)requestReceivedTimestamp:请求到达时间
    - + \ No newline at end of file diff --git a/en/docs/building-base/configuration/customize-menu/index.html b/en/docs/building-base/configuration/customize-menu/index.html index d7c84b576..3c3e45586 100644 --- a/en/docs/building-base/configuration/customize-menu/index.html +++ b/en/docs/building-base/configuration/customize-menu/index.html @@ -5,14 +5,14 @@ 自定义菜单 | Framework as a Building Block for Kubernetes - +

    自定义菜单

    kubebb 的所有菜单均基于 menu 的 CRD 进行定义,如果需要添加自己的菜单,可以参考以下 memnu 示例:

    # 主菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu
    spec:
    column: 1
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: ""
    kind: ""
    name: ""
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 测试菜单
    textEn: "Test Menu"
    ---
    # 测试菜单索引菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-index
    spec:
    getTitleForReplaceSider: {}
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 菜单索引项
    textEn: “Menu Index Item"
    ---
    # 子菜单,具备实际链接功能
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-submenu1
    spec:
    getTitleForReplaceSider: {}
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu-index
    uid: ""
    pathname: /demo-feature1
    rankingInColumn: 200
    text: 测试子菜单
    textEn: "Test Submenu"

    使用 kubectl apply -f 即可将菜单项部署到环境中,如下图所示: 图 1

    - + \ No newline at end of file diff --git a/en/docs/building-base/configuration/customize-portal/index.html b/en/docs/building-base/configuration/customize-portal/index.html index 1c58bca52..d65c884fe 100644 --- a/en/docs/building-base/configuration/customize-portal/index.html +++ b/en/docs/building-base/configuration/customize-portal/index.html @@ -5,13 +5,13 @@ 自定义门户 | Framework as a Building Block for Kubernetes - +

    自定义门户

    1. 准备好需要替换的 logo,比如 logo-white.png、favicon.png,使用以下命令创建对应的 configmap
    kubectl create configmap portal-logos -n u4a-system \
    --from-file=logo-white.png=./logo-white.png \
    --from-file=favicon.ico=./favicon.png
    1. 修改 bff-server 的 deployment 将 configmap 挂载进去
    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: bff-server
    namespace: u4a-system
    spec:
    template:
    spec:
    volumes:
    - hostPath:
    path: /etc/localtime
    type: ""
    name: time-localtime
    # 存储卷中增加刚刚创建的 configmap portal-logos
    - configMap:
    name: portal-logos
    name: logos
    containers:
    volumeMounts:
    - mountPath: /etc/localtime
    name: time-localtime
    readOnly: true
    # 将上面的存储卷 logos 挂载到对应 logo 目录
    - mountPath: /usr/src/app/public/profile/img
    name: logos

    自定义主色调

    通过创建 portal-global-configs 的 configmap,即可自定义门户主色调,示意如下:

    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: portal-global-configs
    namespace: u4a-system
    data:
    global-configs: |
    {"theme": {"primaryColor": "#008F35"}}

    创建 configMap,刷新门户后即可生效

    kubectl create cm portal-global-configs -n u4a-system
    1. 修改 bff-server 的 deployment 将 configmap 挂载进去
    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: bff-server
    namespace: u4a-system
    spec:
    template:
    spec:
    volumes:
    - hostPath:
    path: /etc/localtime
    type: ""
    name: time-localtime
    # 存储卷中增加刚刚创建的 configmap portal-global-configs
    - configMap:
    name: portal-global-configs
    name: portal-global-configs
    containers:
    volumeMounts:
    - mountPath: /etc/localtime
    name: time-localtime
    readOnly: true
    # 将上面的存储卷 logos 挂载到对应 logo 目录:
    - mountPath: /usr/src/app/configs
    name: portal-global-configs
    - + \ No newline at end of file diff --git a/en/docs/building-base/configuration/issue-oidc-proxy-certs/index.html b/en/docs/building-base/configuration/issue-oidc-proxy-certs/index.html index 573d30df7..ad10823bf 100644 --- a/en/docs/building-base/configuration/issue-oidc-proxy-certs/index.html +++ b/en/docs/building-base/configuration/issue-oidc-proxy-certs/index.html @@ -5,7 +5,7 @@ 生成 oidc-proxy 的证书 | Framework as a Building Block for Kubernetes - + @@ -28,7 +28,7 @@ -reqexts req_ext \ -config openssl.cnf \ -out server.csr

    1. generate the base64 content of server.csr

    cat server.csr | base64 | tr -d "\n"

    1. create certificateSigningRequest in kubernetes cluster for oidc-proxy

    the request value need to replace the output content generated by Step 4

    cat <<EOF | kubectl apply -f -
    apiVersion: certificates.k8s.io/v1
    kind: CertificateSigningRequest
    metadata:
    name: myuser
    spec:
    request: <need replace by the content generated by step 4>
    signerName: kubernetes.io/kube-apiserver-client
    usages:
    - client auth
    EOF

    6 approval the certificatesigningRequest

    kubectl certificate approve myuser

    1. get the certifcate of oidc-proxy issuer by kubernetes cluster

    kubectl get csr myuser -oyaml | grep certificate: | awk '{print $2}' |base64 -d > ./server.cert

    1. after the step above, you get the server.key and server.cert and can rename the server.key and server.cert that you want. you can create secret for oidc-proxy by command:

    kubectl create secret

    - + \ No newline at end of file diff --git a/en/docs/building-base/configuration/oidc-integration/index.html b/en/docs/building-base/configuration/oidc-integration/index.html index 2a2faf0a7..785ab63ae 100644 --- a/en/docs/building-base/configuration/oidc-integration/index.html +++ b/en/docs/building-base/configuration/oidc-integration/index.html @@ -5,13 +5,13 @@ 集成单点登录 | Framework as a Building Block for Kubernetes - +

    集成单点登录

    通过 OIDC 协议,实现同 kubebb 的统一账号、认证及单点登录功能。

    1. 在 kubebb 服务端注册 OIDC 客户端

    通过修改 dex-server 使用的 ConfigMap 来添加 client:

    kubectl edit cm oidc-server -n u4a-system

    在 staticClients 处添加一个新的客户端,参考:

      staticClients:
    - id: my-oidc-client
    name: my-oidc-client
    secret: ZXhhbXBsZS1hcHAtc2VjcmV0 # 这里填写随机密钥,注意不要泄漏密钥
    redirectURIs: # 这里可以配置多个回调地址
    - "<my-oidc-client-callback-address>" # 这里填写登录成功后的回调地址,例如 "http://192.168.1.32:8022"

    重启 oidc-server

    kubectl delete <oidc-server-pod> -n u4a-system

    2. 接入 OIDC 登录

    1. 环境信息示例

    2. 应用注册信息

    3. 用户登录

    • 在应用侧,一般会引用相关的 OIDC SDK 来处理 OIDC 相关的处理流程,来帮助开发者简化交互流程,比如 golang 可以使用:https://github.com/coreos/go-oidc

    这里介绍如何通过 API 调用,来实现 OIDC 单点登录的基本流程:

    1)用户访问应用界面,应自动重定向到 kubebb 认证服务登陆 URL,URL 示例如下:

    https://192.168.2.216/oidc/auth?client_id=my-oidc-client&redirect_uri=http://192.168.1.32:8022/auth/callback&response_type=code&scope=openid+profile+email+offline_access
    参数名称描述
    client_id应用 ID,注册阶段由 kubebb 提供,如:my-oidc-client
    redirect_uri应用回调地址,注册阶段由应用提供,如:http://192.168.1.32:8022/auth/callback
    response_type固定值:code
    scope固定值:openid profile email

    2)在 kubebb 的登陆页面,用户输入用户名/密码登陆(如果用户已经登陆,这步会自动跳过)

    3)登录成功后,跳转回应用注册的回调地址,跳转示例如下:

    http://192.168.1.32:8022/auth/callback?code=kf7dmmvhdipdcjczydklwi6pu&state=<state-info>
    参数名称描述
    code授权码,由 kubebb 认证服务 生成,用于后面获取 token
    state与应用跳转到 kubebb 认证服务登陆 URL 时传递的 state 值一样。

    4)应用后台调用 kubebb token API 获取 token 信息,调用示例如下:

    curl -XPOST 'https://192.168.2.216/oidc/token' \
    -H 'Authorization: Basic c2FtcGxlLWFwcC0yMTg6WlhoaGJYQnNaUzFoY0hBdGMyVmpjbVYwJw==' \
    -H 'Content-Type: application/x-www-form-urlencoded' \
    --data-urlencode 'code=kf7dmmvhdipdcjczydklwi6pu' \
    --data-urlencode 'grant_type=authorization_code' \
    --data-urlencode 'redirect_uri=http://192.168.1.32:8022/auth/callback'
    参数名称描述
    Authorization格式 Basic XXXX,其中 XXXX 是 client_id:client_secret 的 base64 编码
    Content-Type固定值:application/x-www-form-urlencoded
    code授权码
    grant_type固定值:authorization_code
    redirect_uri应用回调地址,kubebb 认证服务会验证该地址是否与应用注册的回调地址一致

    该请求会返回token信息

    ID Token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImQxY2Y2MzA3YmQ5Yzk3OTJiMzdmMGJiM2M1Njk1ZDQ0MWJlZTMzNjcifQ.eyJpc3MiOiJodHRwczovLzE5Mi4xNjguMi4yMTYiLCJzdWIiOiJDZ0V4RWdsMFpXNTRZMnh2ZFdRIiwiYXVkIjoic2FtcGxlLWFwcC0yMTgiLCJleHAiOjE2MzAwMzA0ODEsImlhdCI6MTYyOTk0NDA4MSwiYXRfaGFzaCI6Ik1PUjk0enktTUZNcU5zZUZTM1ZzRXciLCJjX2hhc2giOiJpbkoteDVKUEFCRXhaaEpRaEx3T3pBIiwiZW1haWwiOiJhZG1pbkBleGFtcGxlLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJuYW1lIjoiYWRtaW4iLCJwaG9uZSI6IjE3MzQzMTM1MDUxIiwidXNlcmlkIjoiMSJ9.FrC6oKRsManuP9opqugknJmOE78uKmxX6uteM1flCDVRqRv-riG0C5AOX4K9BTnT9GIlu3H24jydT4ybSissz_wL_mLzoTQWoQ9uMMmd4w1aiGqgO6mIaEh3XvTqtoQv1ltONSkp49bykpdIXbDJxy0PScU0k-0XFNJIMSBwn8SEubgH7NO3xwFzsjaLqBfolxC5YXBuWS8n-FEOqNTg-mx-n_Fu2oemJCT-8qWMqY6FNjRSC3D-2ABkCbl4g76vPLgJ-I6dU6eaJvaBW6S4BzhCX0SitxYrxcXjOGviX1HKOXXSUC1n1HfQpOpNW-FA2G3F-kON94rYr1AEdIwSVw

    Access Token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImQxY2Y2MzA3YmQ5Yzk3OTJiMzdmMGJiM2M1Njk1ZDQ0MWJlZTMzNjcifQ.eyJpc3MiOiJodHRwczovLzE5Mi4xNjguMi4yMTYiLCJzdWIiOiJDZ0V4RWdsMFpXNTRZMnh2ZFdRIiwiYXVkIjoic2FtcGxlLWFwcC0yMTgiLCJleHAiOjE2MzAwMzA0ODEsImlhdCI6MTYyOTk0NDA4MSwiYXRfaGFzaCI6IlhYS3RzUkhZS043WnZGOUFxcXVSd3ciLCJlbWFpbCI6ImFkbWluQGV4YW1wbGUuY29tIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsIm5hbWUiOiJhZG1pbiIsInBob25lIjoiMTczNDMxMzUwNTEiLCJ1c2VyaWQiOiIxIn0.VXUxba9cy4S3ZUtyfbF6A3Xg5fZYL-m6nqR09qiJgJmNUCEkHr_b4xKXXvDJwsgoC0zdUxShd1BpOgN4vi8A7zZ676-PybB2dVT6x3EPpwCVC8_NVYwXijeELWJJ0nU9aAq6p_m-XdXOuPzutbmMLSWVfrY-CS2WSdQISuKxb28slTptzCF4OY3dZugOHZ_v10KAxsMo0Aul4d3C_EXOaiUKzw0OTD7xyYOm8MmJvhzQEYSodHAkbJWnOusKEBtFKJ5hhAqPLCymY2VleN-7Jbqr-DYrDSDtd7FF1vCbDL0-rTwQ5_79FIAu_fusOcVc26M7GlvOtGcniaweNtm1dg

    返回的 ID Token 是标准 JWT Token,应用可以从 ID Token 中解析出用户信息,例如:

        {
    "iss": "https://192.168.2.216/oidc",
    "sub": "CgExEgl0ZW54Y2xvdWQ",
    "aud": "sample-app",
    "exp": 1630030481,
    "iat": 1629944081,
    "at_hash": "MOR94zy-MFMqNseFS3VsEw",
    "c_hash": "inJ-x5JPABExZhJQhLwOzA",
    "email": "admin@example.com",
    "email_verified": true,
    "name": "admin",
    "phone": "17343135051",
    "userid": "1"
    }
    • 基于该 JWT Token,应用可以将用户基础信息同步到自己的系统内,在基于统一账号、认证的前提下,实现同自身账号、权限体系的融合。
    • Access Token 一般用于访问 kubebb 认证服务获取用户详细信息,可以按需使用。
    - + \ No newline at end of file diff --git a/en/docs/building-base/intro/index.html b/en/docs/building-base/intro/index.html index c5e8a7dfd..cd2b4c877 100644 --- a/en/docs/building-base/intro/index.html +++ b/en/docs/building-base/intro/index.html @@ -5,14 +5,14 @@ 介绍 | Framework as a Building Block for Kubernetes - +

    介绍

    技术组件

    平台开发采取前后端分离,以 K8S 为核心的开发框架,遵循 K8S 的扩展机制及 API 规范,整体开发架构的基本逻辑如下图所示: 图 2

    1. 所有组件的开发、扩展的认证都通过统一认证中心进行认证
    2. 认证由微前端的主框架 DockApp 统一进行,其他微前端的扩展不需要单独支持同认证中心的处理
    3. 开发架构上整体可以按照三层来看
    • 第一层,前端采用微前端架构,尽量采用低代码方式进行开发,提高代码自动化生成比例
    • 第二层,根据业务需求增加 OpenAPI,形成统一的 BFF 层,对 API 进行聚合,提供前端所需要的业务场景数据
    • 后端采用 CRD + controller 的 Operator 模式进行开发,形成数据驱动的流程开发模式
    1. 对外 API 主要包括两部分:
    • 从 BFF 层提供的 OpenAPI
    • 从 K8S 层提供的资源 API

    使用域名访问

    在代理服务器/负载均衡上,使用 nip.io 来支持 http/https 域名的绑定方式,便于默认统一采用域名进行配置。

    • 使用 nip.io 进行访问 http://<ip-address>.nip.io,比如 http://192.168.1.123.nip.io
    - + \ No newline at end of file diff --git "a/en/docs/category/\344\275\216\347\240\201kit/index.html" "b/en/docs/category/\344\275\216\347\240\201kit/index.html" index bb38ea980..ab598d4a2 100644 --- "a/en/docs/category/\344\275\216\347\240\201kit/index.html" +++ "b/en/docs/category/\344\275\216\347\240\201kit/index.html" @@ -5,13 +5,13 @@ 低码Kit | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/en/docs/category/\345\206\205\346\240\270kit/index.html" "b/en/docs/category/\345\206\205\346\240\270kit/index.html" index 1931c3fc4..e07ae3b05 100644 --- "a/en/docs/category/\345\206\205\346\240\270kit/index.html" +++ "b/en/docs/category/\345\206\205\346\240\270kit/index.html" @@ -5,13 +5,13 @@ 内核Kit | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/en/docs/category/\345\272\225\345\272\247kit/index.html" "b/en/docs/category/\345\272\225\345\272\247kit/index.html" index 9bc4e995f..d680dddcf 100644 --- "a/en/docs/category/\345\272\225\345\272\247kit/index.html" +++ "b/en/docs/category/\345\272\225\345\272\247kit/index.html" @@ -5,13 +5,13 @@ 底座Kit | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/en/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" "b/en/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" index 07ff83c51..730e49bea 100644 --- "a/en/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" +++ "b/en/docs/category/\345\277\253\351\200\237\345\274\200\345\247\213/index.html" @@ -5,13 +5,13 @@ 快速开始 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/en/docs/category/\346\246\202\345\277\265/index.html" "b/en/docs/category/\346\246\202\345\277\265/index.html" index 4da0d50b7..04f11a87c 100644 --- "a/en/docs/category/\346\246\202\345\277\265/index.html" +++ "b/en/docs/category/\346\246\202\345\277\265/index.html" @@ -5,13 +5,13 @@ 概念 | Framework as a Building Block for Kubernetes - +
    - + \ No newline at end of file diff --git "a/en/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" "b/en/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" index 225e50aa8..301fbda1c 100644 --- "a/en/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" +++ "b/en/docs/category/\347\224\250\346\210\267\346\214\207\345\215\227/index.html" @@ -5,13 +5,13 @@ 用户指南 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/en/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" "b/en/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" index a2c631a83..4ec5c1f61 100644 --- "a/en/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" +++ "b/en/docs/category/\347\273\204\344\273\266\345\270\202\345\234\272/index.html" @@ -5,13 +5,13 @@ 组件市场 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/en/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" "b/en/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" index 514677267..9b8777958 100644 --- "a/en/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" +++ "b/en/docs/category/\347\273\204\344\273\266\345\274\200\345\217\221/index.html" @@ -5,13 +5,13 @@ 组件开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git "a/en/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" "b/en/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" index bb2813c1f..1a730a87a 100644 --- "a/en/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" +++ "b/en/docs/category/\350\207\252\345\256\232\344\271\211\351\205\215\347\275\256/index.html" @@ -5,13 +5,13 @@ 自定义配置 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/docs/component-market/blockchain/index.html b/en/docs/component-market/blockchain/index.html index 6d6b22420..be7c447af 100644 --- a/en/docs/component-market/blockchain/index.html +++ b/en/docs/component-market/blockchain/index.html @@ -5,13 +5,13 @@ 区块链 - 联盟链 | Framework as a Building Block for Kubernetes - +

    区块链 - 联盟链

    区块链组件目前还没有完成组件化,但是依然可以基于底座Kit进行部署,详细参考 bestchains 平台安装

    • 注意修改 scripts/e2e.sh 中国呢 ingressNode,kubeProxyNode 的配置
    • 部署后即可在管理平台看到区块链相关菜单及功能

    bestchains 的详细使用,请参考帮助文档

    - + \ No newline at end of file diff --git a/en/docs/component-market/kubedashboard/index.html b/en/docs/component-market/kubedashboard/index.html index 7d7f5e8f2..e1336af4a 100644 --- a/en/docs/component-market/kubedashboard/index.html +++ b/en/docs/component-market/kubedashboard/index.html @@ -5,13 +5,13 @@ kubedashboard 管理工具 | Framework as a Building Block for Kubernetes - +

    kubedashboard 管理工具

    这里介绍一下如何基于 kubebb 的底座来部署 Kubernetes Dashboard,并使用统一的 OIDC 认证、单点登录及原生 Kubernetes RBAC 认证。

    • 工具代码地址:kubernetes dashboard

    • Prerequisite

      • 部署 kubebb 的底座组件 u4a-component,提供基于 K8S 构建的基本的账号、认证、权限和审计功能。
      • 获取部署组件
      $ git clone https://github.com/kubebb/addon-components.git
      # 进入对应目录
      $ cd kube-dashboard

    安装 Kubernetes dashboard

    1. 编辑 values.yaml,按需要替换镜像地址,替换 <replaced- 开头的的属性值,其中 OIDC 的相关信息可以在 configmap中获取。
    # 获取 OIDC client 相关配置
    kubectl edit cm oidc-server -n u4a-system
    # 记录 issuer,staticClients 的 id、secret 的值

    修改 values.yaml

    dashboard:
    dashboardImage: hub.tenxcloud.com/addon_system/kube-dashboard:v2.7.0
    proxyImage: hub.tenxcloud.com/addon_system/keycloak-gatekeeper:latest
    metricsImage: hub.tenxcloud.com/addon_system/kube-dashboard-metrics-scraper:v1.0.8

    ingress:
    class: portal-ingress
    host: kubedashboard.<replaced-ingress-nginx-ip>.nip.io

    # You must check and update the value of each variable below
    kubeOidcProxy:
    issuerUrl: <replaced-issuer-url> # https://portal.172.22.96.209.nip.io/oidc
    clientId: <replaced-client-id>
    clientSecret: <replaced-client-secret>
    usernameClaim: preferred_username
    groupClaim: groups
    hostConfig:
    enabled: true
    hostAliases:
    - hostnames:
    # MUST update this value
    - portal.<replaced-ingress-nginx-ip>.nip.io
    ip: <replaced-ingress-nginx-ip>
    1. 运行 helm 安装插件
    # 如果需要,创建单独的 namespace 来部署此插件,比如 addon-system
    kubectl create ns addon-system
    # 部署 kube-dashboard 插件
    helm install kube-dashboard -n addon-system .
    • Note: 此时,dashboard 的 pod 会处于 ContainerCreating 的状态,这是因为缺少所依赖的配置文件,接下来我们需要准备此配置文件。
    1. 创建 kube-dashboard 所需要的 kubeconfig 文件, 以便 kube-dashboard 可以使用统一的 kube-oidc-proxy 进行认证,同时,也需要在配置中使用正确的证书和连接 token 信息
    # copy the kubeconfig template
    $ cp sample-kubeconfig kubeconfig
    # edit kubeconfig file to use the correct cluster.certificate-authority-data, cluster.server, user.token

    # Step 1
    $ export CLUSTER_CA=$(kubectl get secret -n u4a-system oidc-server-root-secret -o jsonpath='{.data.ca\.crt}')
    $ use the value from $CLUSTER_CA to replace cluster.certificate-authority-data(<certificate-authority-data>) in kubeconfig file

    # Step 2
    $ export USER_TOKEN_NAME=$(kubectl -n addon-system get serviceaccount kubernetes-dashboard -o=jsonpath='{.secrets[0].name}')
    $ export USER_TOKEN_VALUE=$(kubectl -n addon-system get secret/${USER_TOKEN_NAME} -o=go-template='{{.data.token}}' | base64 --decode)
    # use the value from $USER_TOKEN_VALUE to replace user.token(<user-token>) in kubeconfig file

    # Step 3 replace cluster.server(<cluster-server>) with the address of kube-oidc-proxy

    # Step 4 create the configmap
    $ kubectl create cm dashboard-kubeconfig --from-file=kubeconfig -n addon-system
    1. 重启 kube-dashboard
    $ kubectl delete pod -n addon-system $(kubectl  get pod -n addon-system | grep kubernetes-dashboard | awk '{print $1}')
    1. 在 OIDC 服务中添加 kube-dashboard 的 callback 地址.
    $ kubectl edit cm oidc-server -n u4a-system
    # find redirectURIs and add a new redirect url 'https://<kubedashboard-host-name>/oauth/callback'
    1. 使用 kubedashboard.<replaced-ingress-nginx-ip>.nip.io 地址访问 kube-dashboard. 如果处于未登录状态,会被重定向到统一的认证服务进行登录,成功登录后即刻携带授权的 token 回到 kube-dashboard 进行正常使用了。

    2. 可以通过给登录的用户授权不同的 RBAC 策略,来验证是否只有授权的资源才能被该用户访问。

    Uninstall

    通过 helm uninstall 来卸载该插件

    helm uninstall kube-dashboard -n addon-system
    - + \ No newline at end of file diff --git a/en/docs/component-market/kubelogin/index.html b/en/docs/component-market/kubelogin/index.html index b7e5b48f7..6c5470f02 100644 --- a/en/docs/component-market/kubelogin/index.html +++ b/en/docs/component-market/kubelogin/index.html @@ -5,14 +5,14 @@ 使用 kubelogin 工具 | Framework as a Building Block for Kubernetes - +

    使用 kubelogin 工具

    Here is the steps about how to install kubelogin to integrate with OIDC server for kubectl tool, so you can do authentication with Kubernetes.

    • Refer to kubelogin for details.
    • Prerequisite Install u4a-component and it'll provide the account, authentication, authorization and audit funcationality built on Kubernetes.

    Install kubelogin

    Get the binary here download and download the one matching your OS.

    Then you need to put the kubelogin binary on your path under the name kubectl-oidc_login so that the kubectl plugin mechanism can find it when you invoke kubectl oidc-login.

    Prepare kubeconfig file

    1. Backup your original config file under ~/.kube/config and create a new one.
    $ cd ~/.kube
    $ cp config config_backup
    $ kubectl config set-credentials oidc \
    --exec-api-version=client.authentication.k8s.io/v1beta1 \
    --exec-command=kubectl \
    --exec-arg=oidc-login \
    --exec-arg=get-token \
    --exec-arg=--oidc-extra-scope=email \
    --exec-arg=--oidc-extra-scope=profile \
    --exec-arg=--oidc-issuer-url=https://portal.172.22.96.209.nip.io/oidc \
    --exec-arg=--oidc-client-id=bff-client \
    --exec-arg=--oidc-client-secret=61324af0-1234-4f61-b110-ef57013267d6 \
    --exec-arg=--insecure-skip-tls-verify
    1. Point the cluster to kube-oidc-server or k8s-apiserver if oidc is enabled.
    - cluster:
    certificate-authority-data: ....
    server: https://172.22.96.133 # Update this value
    name: cluster-name
    1. Add http://localhost:8000 as a valid redirect URL of your OIDC server, so it can redirect to local server after successful login.

    2. Switch current context to oidc

    $ kubectl config set-context --current --user=oidc

    Run kubectl get nodes, kubectl executes kubelogin before calling the Kubernetes APIs. Kubelogin automatically opens the browser, and you can log in to the provider.

    After successful login, you'll get a Authenticated response.

    1. If you get Unable to connect to the server: x509: certificate signed by unknown authority error after kubectl get nodes. Remove certificate-authority-data, and add insecure-skip-tls-verify as true.
    - cluster:
    # certificate-authority-data: ....
    server: https://172.22.96.133
    insecure-skip-tls-verify: true # Add it here
    name: cluster-name

    You can also use a valid certificate data, for example:

    export CLUSTER_CA=$(kubectl get secret -n u4a-system oidc-proxy-cert-tls -o jsonpath='{.data.ca\.crt}')
    # Use the data from CLUSTER_CA and set to certificate-authority-data

    Then you can run any kubectl using the logged in user, Kubernetes RBAC and audit will take effect for the user.

    Get id token from cached file

    The id_token will be cached in ~/.kube/cache/oidc-login/\<cahced-file>, you can use cat to get the content and token from this file. For example:

    {"id_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6IjBkMzEyM2U1MWIxN2IzZTNlNDYzNjgxZTMzZTFkOTNkM2RiY2IwZDkifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc0MzU3OTU0LCJpYXQiOjE2NzQyNzE1NTQsIm5vbmNlIjoiVHhJVlE4VlFINW9PTGtLeGV1ekk3VWp3VVU0WUYyOEQ1N18xLWVpVWEtVSIsImF0X2hhc2giOiJOamZKZWJ1Ry1uUlVlWDJNY2dfZzVRIiwiY19oYXNoIjoiQWVQdUtsTmN5RjgyTy1xWFFqUzEwdyIsImVtYWlsIjoiYWRtaW5AdGVueGNsb3VkLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJuYW1lIjoiYWRtaW4iLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJhZG1pbiIsInBob25lIjoiIiwidXNlcmlkIjoiYWRtaW4ifQ.YtmRZbS7-B0s0vVh9myH1FYcWuKoKNNYkPZQ5asbrQE2n8eC7w74n8D7pvM6v44kvBnp27hNOeo06EK4leNR2Inb2UJtd2QBS1L9i4A3V_vm06o4DkvqfyNcbD7-hL6ES0XkzIKimG3WMMJIznvuA71W_88t77U7jC7wvtKbT7k1KZWgOV6VappWlz7uecuBSQahoCku5AO-s25H1O-FbodOYtL8-ju0sqiHrgmbNaV-f6Wuvvk9XkquAe_dztqWCJ0axfUW7u4J-M947mlR1JlWwbhm-nQXgvugyMVh3FjFOjwi7jR3BA3Me-iuS_XPNSWx-DB0dfsCfErCJ9DvBA"}

    Get id token using username/password

    1. Enable passwordConnector in the oidc-server configuration
    # kubectl edit cm oidc-server -n u4a-system
    oauth2:
    # Enable this one
    passwordConnector: k8scrd
    skipApprovalScreen: true
    1. Get id token using kubelogin or curl
    • kubelogin
    kubelogin get-token --oidc-issuer-url=https://portal.172.22.96.209.nip.io/oidc --oidc-client-id=bff-client --oidc-client-secret=61324af0-1234-4f61-b110-ef57013267d6 --insecure-skip-tls-verify --grant-type=password --username=admin --password=admiN\$123

    # here is the response, get the token from the json
    {"kind":"ExecCredential","apiVersion":"client.authentication.k8s.io/v1beta1","spec":{"interactive":false},"status":{"expirationTimestamp":"2023-02-11T04:37:32Z","token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImY2ZjFjMjFkNzFhOGEyYmU3ZTg2YjQyYWIwOTYwY2MxNzU3NjdiM2MifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc2MDkwMjUyLCJpYXQiOjE2NzYwMDM4NTIsImF0X2hhc2giOiJyLWtsUnBQcEd3U0I5TFQyelVQSWtRIiwicGhvbmUiOiIifQ.tFOmGN1w79I_s5pWZZK4zEEHwCyuJRwkNtacmxVcCY-Jms-JOzXUJTxnNm8XzIBC3cZqt5U6oNXMuk68MHq0v3g2tQKJeAwV1aojJrIIp5QHefXMUjl_hTaFe1tRgwsvZqBWhExLi1yaTSUfjmP_SZEb23A0R_AWvc7ClO7sbuKQlkPG_gi2TPCBOeTx0UmlQ14w6U3dIJhR57hXfttdSr2nRqKma8bp_jAiUiWaKLSWSyc3tQsxWl7LeAAbRM3URx-3winVIEPEpUgwIBXnrr-ba9BZwZoD5NGytOGw4xA80eGDmmMIG8U2QarJKsZErpzS7EWbiPBLgS2_Wg1eBA"}}
    • curl
    curl -k -XPOST https://portal.172.22.96.209.nip.io/oidc/token -H "Authorization: Basic <base64 of oidc-client-id:oidc-client-secret>" -H "Content-Type: application/x-www-form-urlencoded"  --data 'grant_type=password&password=password&scope=openid&username=username'

    # here is the response, and get id_token from the json
    {"access_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImY2ZjFjMjFkNzFhOGEyYmU3ZTg2YjQyYWIwOTYwY2MxNzU3NjdiM2MifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc2MDkyOTk0LCJpYXQiOjE2NzYwMDY1OTQsImF0X2hhc2giOiJtM2xsQWprUXdlQnhJbUJlQkgxRG1RIiwicGhvbmUiOiIifQ.iel5l_mzlVf2LjbMqzqXb3sqb7L195a-fY4Aaon2_CVn1lBMzOf2qDYbtVF3KhGHxNlaKRxig63uCDfyts84BMD5-Uaz_x4_mq5QaMVYVYEUw9NWsLP-jQ0bTSZE7MZKlxz_a3AGW_fXwW0Y02dqemugBfC3IagBhroYI2PSTKcNCCQz2aao-ZSQ5-rysKSyo0VPDtcY_K8ikpDChLM9GhUKzbdIvctO6mGBOOKHRkiCAbRegOCFhJ6-0O4k6b-m3rXyJkQAIBfesOPIAFxhQQhg3y9wDEVxbBTZ99fwfvfIuSxN_vsITKCsqpRr7t-30jqReIKsYktyzZ15jiJhKg","token_type":"bearer","expires_in":86399,"id_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImY2ZjFjMjFkNzFhOGEyYmU3ZTg2YjQyYWIwOTYwY2MxNzU3NjdiM2MifQ.eyJpc3MiOiJodHRwczovL3BvcnRhbC4xNzIuMjIuOTYuMjA5Lm5pcC5pby9vaWRjIiwic3ViIjoiQ2dWaFpHMXBiaElHYXpoelkzSmsiLCJhdWQiOiJiZmYtY2xpZW50IiwiZXhwIjoxNjc2MDkyOTk0LCJpYXQiOjE2NzYwMDY1OTQsImF0X2hhc2giOiJRT3NNWGdSeDRYaUJJTVZwSElXeUlRIiwicGhvbmUiOiIifQ.ZDU7AouftfpLAs2SDE3Kb86ggVyDEwrgA3jtUxitKUQwKqosjWiaEEGc3w824FAC3eDZhFr1w5uXT6R30O2s0DPzPb0nesDN8wa2ZscU9ESjZrKAAgpgM7uE1vU41mi7GfdZEUHabx83XFvu69KvmA9OKnqaSdyi3-aPYHyBP5GfNYoQ-mteCBsAbRF8l6fe1VREIYV3sQrBC8b9s1Ony4F8YFWgFE4G_1gxV-0qz8IxgzhLGUgehuwsHTUjMLvyGgTiFrFvrPsftEuEGtOQbKswngWQGlYWSsUIWb79Fdk_-wD08fyM9YUGJyb0Bg_HO2M95CFsSASB4HDO4QHOXw"}

    Logout

    You can remove the local cache files under ~/.kube/cache/oidc-login/<cached-file> to logout current user.

    - + \ No newline at end of file diff --git a/en/docs/component-market/logging/index.html b/en/docs/component-market/logging/index.html index f123d8d54..573d0aefc 100644 --- a/en/docs/component-market/logging/index.html +++ b/en/docs/component-market/logging/index.html @@ -5,7 +5,7 @@ 日志组件 | Framework as a Building Block for Kubernetes - + @@ -18,7 +18,7 @@ hub.tenxcloud.com/system_containers/elasticsearch 7.10.1-ik 3bf941c09b95 8 months ago 963MB hub.tenxcloud.com/system_containers/kubectl v1.20.8 403754878e80 3 months ago 112MB hub.tenxcloud.com/system_containers/fluentd-elk v5.0-kfk 63cd90e77b9c 18 months ago 347MB

    调整values.yml

    • .Values.rbacSidecar.enabled: false
    • .Values.elasticsearch.secure: false
    • .Values.ingress.enabled true

    访问 elasticsearch

    1. 获取ingress信息
    ES_HOST=$(kubectl get ingress ingress-es -n addon-system | grep ingress-es | awk '{print $3}')
    INGRESS_IP=$(kubectl get ingress ingress-es -n addon-system | grep ingress-es | awk '{print $4}')
    1. 访问elasticsearch
    curl http://$INGRESS_IP -H "Host: $ES_HOST"

    会返回如下提示信息,说明日志服务正常启动:

    {
    "name": "es-allinone-es-0",
    "cluster_name": "es",
    "cluster_uuid": "ATBDAzVHQeSDb7gaKdgNUw",
    "version": {
    "number": "7.10.1",
    "build_flavor": "default",
    "build_type": "tar",
    "build_hash": "7a15d2a",
    "build_date": "2020-08-12T07:27:20.804867Z",
    "build_snapshot": false,
    "lucene_version": "7.7.3",
    "minimum_wire_compatibility_version": "5.6.0",
    "minimum_index_compatibility_version": "5.0.0"
    },
    "tagline": "You Know, for Search"
    }
    - + \ No newline at end of file diff --git a/en/docs/component-market/monitoring/index.html b/en/docs/component-market/monitoring/index.html index e62517d97..a5c9d2e5a 100644 --- a/en/docs/component-market/monitoring/index.html +++ b/en/docs/component-market/monitoring/index.html @@ -5,7 +5,7 @@ 监控组件 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ 各个组件的作用如下:

    • node-exporter:采集到主机的运行指标如 CPU、内存、磁盘等信息;
    • victoriametrics:一个快速高效、经济并且可扩展的监控解决方案和时序数据库,负责监控数据的抓取、存储、查询,并可以根据告警规则触发告警;
    • metrics-server: Kubernetes 集群核心监控数据的聚合器,定时从 Kubelet 的 Summary API 采集指标信息),可以通过 Metrics API 的形式获取 Metrics 数据;
    • kube-state-metrics:采集 deployment,Pod、daemonset、cronjob 等 k8s 资源对象的监控数据,提供监控指标;
    • grafana:一个可视化工具,它提供了强大和优雅的方式去创建、共享、浏览数据,并提供了很多漂亮的模板,当需要直接查看监控数据时候,可以装上;
    • monitoring-operator: 负责管理上述监控组件.

    安装步骤

    前置条件

    • 如果监控组件 vmselect 如果开启了 sidecar,kube-rbac-proxy 支持 OIDC,则需要提前部署好 OIDC 相关的内容,可以通过执行

      kubectl  get pod -n u4a-system

      查看是否有 oidc-server,检查相关组件是否已经安装好;

    • 如果需要使用 ingress,则需要提前部署好 ingress-controller;

    • vmstorage 需要进行数据持久化,需要提前准备好 StorageClass;

    • 创建好 Group observability,该组具有访问监控数据的权限;

    1.准备镜像,push 到对应环境的 harbor 仓库

    • 需要以下镜像
    # 主要入口 operator
    hub.tenxcloud.com/kubebb/monitoring-operator:v0.1.2

    # vm 社区相关镜像
    hub.tenxcloud.com/kubebb/vm-operator:v0.35.1
    hub.tenxcloud.com/kubebb/vminsert:v1.91.3-cluster
    hub.tenxcloud.com/kubebb/vmstorage:v1.91.3-cluster
    hub.tenxcloud.com/kubebb/vmselect:v1.91.3-cluster
    hub.tenxcloud.com/kubebb/vmagent:v1.91.3
    hub.tenxcloud.com/kubebb/vmalert:v1.91.3

    # 其他依赖镜像
    hub.tenxcloud.com/kubebb/kube-rbac-proxy:v0.13.0-32f11472
    hub.tenxcloud.com/kubebb/node-exporter:v2.5.0
    hub.tenxcloud.com/kubebb/configmap-reload:v0.3.0
    hub.tenxcloud.com/kubebb/prometheus-config-reloader:v0.58.0
    hub.tenxcloud.com/kubebb/prom-rule-reloader:v0.1.2
    hub.tenxcloud.com/kubebb/alertmanager:v0.20.0
    hub.tenxcloud.com/kubebb/kube-state-metrics:v1.9.7 (选装)
    hub.tenxcloud.com/kubebb/metrics-server:v0.4.1 (选装)
    hub.tenxcloud.com/kubebb/grafana:10.0.2(选装)

    2.获取 helm 包,并解压

    tar zxvf monitoring-operator-0.1.0.tgz
    cd monitoring-operator

    3.修改 charts 包的 values.yaml

    参照 values.yaml 里面的注释,主要有以下内容需要修改:

    • 根据实际环境,修改镜像地址;
    • 带有 enabled 的是可以控制改组件是否可以启用,false 则不安装,true 会安装,没有 enabled 参数会默认装上;
    • 如果开启 nodePort,先检查端口是否被占用,不使用设置为 0 即可;
    • 如果开启 ingress,需要修改 ingress 资源的注解,注解 key 是 kubernetes.io/ingress.class。注解的值可以查看 ingress-controller 的 deploy 里面的 args 参数,如
      kubeclt  edit  deploy -n kube-system ingress-urygcdmyts
      取 args 里面的值- --ingress-class=nginx-ingress-urygcdmyts,nginx-ingress-urygcdmyts 就是要填入注解的值;

    4.创建 namesapce

    kubectl --as=admin  --as-group=iam.tenxcloud.com create -f - <<EOF 
    apiVersion: v1
    kind: Namespace
    metadata:
    labels:
    capsule.clastix.io/tenant: system-tenant
    name: addon-system
    EOF
    • 如果创建 ns 前就存在,可以之前部署过监控,为了确保后续不会报错,先清除旧的 addon-system 下的资源,并删除 vm 相关的 crd,查找 vm 的 crd 命令是 kubectl get crd | grep victoriametrics.com

    5.生成 ca 证书(只有 vmselect 开启了 sidecar,支持 oidc 参数时需要)

    kube-rbac-proxy 支持 OIDC,args 需要设置参数 oidc-issuer、oidc-clientID、oidc-ca-file,若 oidc-server 部署在 u4a-system 下,可以这样去获取相关的参数,供参考:

    • 生成证书:
    kubectl get secret -n u4a-system  oidc-server-root-secret  -oyaml > oidc-sidecar-secret.yaml

    修改 yaml 的 namesapce 为 addon-system,创建一个新的 secret

    kubectl create -f oidc-sidecar-secret.yaml
    • oidcIssuer,oidcClientID 参数的获取
    kubectl  get cm -n u4a-system   oidc-server -o yaml

    oidcIssuer 取其中的 issuer 的内容即可,比如:https://oidc.192.168.90.217.nip.io

    oidcClientID 取其中的 staticClients 下的 id 内容即可,比如 bff-client

    6.helm install

    • 执行 helm 命令,monitoring-operator 是应用的名称,根据实际需要修改
    helm install monitoring-operator -n addon-system ./

    7.检查组件是否运行成功

    kubectl get po -n addon-system

    检查的 Pod 是否正常运行;

    8.功能验证

    • 部署成功后,可以通过 ingress 地址去方式访问数据,查看 ingress 的 hosts 地址命令如下:

      kubectl  -n addon-system get ingress

      如果 vmselect 开启了 nodePort,那么也可以通过主机 IP:nodePort 的方式去访问监控数据

    • 将用户加入组 observability,该组具有访问监控数据的权限,获取用户 token,访问监控数据带上 token,验证权限,没有权限则出现 Unauthorized; 请求命令参考:

      curl -k "monitoring.192.168.90.217.nip.io/select/0/prometheus/api/v1/query" -d "query=up" -H"Authorization: bearer eyJhbGciOi..."
    - + \ No newline at end of file diff --git a/en/docs/contribute/index.html b/en/docs/contribute/index.html index ebd16349f..696f23ad2 100644 --- a/en/docs/contribute/index.html +++ b/en/docs/contribute/index.html @@ -5,7 +5,7 @@ 贡献指南 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ 在仓库主页面的左手边。然后你就可以在你的 GitHub 用户名中看到你的仓库了。

  • Clone 你自己的仓库到本地来开发。使用 git clone https://github.com/<your-username>/<your-project>.git 来克隆代码仓库到你的本地机器。然后你可以创建新的分支来完成你想做的改动。

  • 设置远程上游 上游设置为 https://github.com/kubebb/<project>.git 例如:

    git remote add upstream https://github.com/kubebb/core.git
    git remote set-url --push upstream no-pushing

    增加 upstream ,我们可以轻松地将本地分支与上游分支同步。

  • 创建一个分支 添加新特性或者修复问题。 更新本地工作目录:

    cd <project>
    git fetch upstream
    git checkout main
    git rebase upstream/main

    创建新的分支:

    git checkout -b <new-branch>

    在新的分支改动后可以构建和测试你的代码。

  • PR规范

    通过创建 PR 是对 KubeBB 项目的文件进行修改的唯一途径.

    git commit --signoff -m "description of this PR"

    为了帮助 reviewer 更好地理解你创建的 PR 的目的,PR描述需符合如下规范:

    <type>: <description>

    [optional body]

    其中,type 种类包括:

    • feat - 引入了新功能
    • fix - 修复了一个错误
    • chore - 与修复或功能无关的更改,不修改源代码或测试文件(例如更新依赖项)
    • refactor - 重构的代码,既不修复错误也不添加功能
    • docs - 更新文档,如README或其他markdown文件
    • style - 不影响代码含义的更改,通常与代码格式相关,如空格、缺少分号等。
    • test - 包括新的测试或更正之前的测试
    • perf - 性能改进
    • ci - 与持续集成相关
    • build - 影响构建系统或外部依赖项的更改
    • revert - 撤销先前的提交

    如果PR为对应解决某个 issue ,必须在PR出,添加 Fix: #1 #2 ,如下图:

    pr_example

    代码规范

    参与帮助任何事情

    我们选择 GitHub 作为 KubeBB 合作的主要场所。所以 KubeBB 的最新更新总是在这里。尽管通过 PR 的贡献是一种明确的帮助方式,我们仍然呼吁任何其他方式:

    • 在 issue 中回复其他人的问题
    • 帮助解决其他人的问题
    • 帮助 review 其他人的 PR
    • 参与讨论
    • 写技术博客
    • 等等

    加入社区

    如果您想成为 KubeBB GitHub 组织的成员,请参考下面的介绍:

    加入 KubeBB Github 组织

    在要求加入社区之前,我们要求你先做少量的贡献,以证明你有继续为 KubeBB 贡献的意愿。

    • 注意 任何人都可以为 KubeBB 做出贡献,加入 KubeBB Github 组织并不是一个强制性的步骤。

    有很多方法可以为 KubeBB 做出贡献:

    • 提交 PR
    • 报告错误或提供反馈
    • 回答 GitHub 上的问题

    提出您的申请

    • 在 KubeBB 仓库中创建一个 issue,并尽可能罗列您所做的全部工作。
    • 请 AT 2 个现有的 reviewer 以获取同意。
    • 请求被批准后,管理员将向你发出邀请。
      • 这是一个手动过程,通常每周运行几次。
      • 如果一个星期过去了,没有收到邀请,请通过邮件或者钉钉联系我们。
    - + \ No newline at end of file diff --git a/en/docs/core/concepts/buildingbase_resources/index.html b/en/docs/core/concepts/buildingbase_resources/index.html index cad5256b7..82ba2cb1b 100644 --- a/en/docs/core/concepts/buildingbase_resources/index.html +++ b/en/docs/core/concepts/buildingbase_resources/index.html @@ -5,13 +5,13 @@ 扩展资源(底座) | Framework as a Building Block for Kubernetes - +

    扩展资源(底座)

    底座扩展资源用于适配底座的门户服务,不涉及核心的组件生命周期管理。目前主要有两种:

    • Menu: 门户菜单
    • Portal: 门户路由

    菜单是底座门户资源,用于与微前端页面结合,实现可定制化的门户菜单。

    定义

    代码定义位于 Menus,详细介绍如下:

    tip

    说明 对于下面的 yaml,我们想要访问 bar 字段,书写格式为 spec.foo.bar

    spec:
    foo:
    bar: xx
    • spec.id: 菜单组ID
    • spec.text: 菜单中文名称
    • spec.textEn: 菜单英文名称
    • spec.column: 菜单组所在列序号
    • spec.rankingInColumn: 菜单在当前组中的排序,数字越小越靠前
    • spec.icon: 菜单图标
    • spec.replaceSiderBackNextPathnamePattern: 给替换菜单的返回按钮使用,当新的 pathname 是替换菜单,且替换菜单的返回按钮需要返回到当前 pathname 时,配置此属性
    • spec.pathname: 菜单路由
    • spec.redirect: 跳转菜单路由,优先级高于 pathname,指定后点击菜单会跳转到 redirect 相应路由
    • spec.target: 同 a 标签的 target 属性
    • spec.requiredRoles: 菜单可见需要的角色
    • spec.requiredModuleBits: 菜单可对应的 module 二进制位 (有一个满足即可见)
    • spec.tenant: 菜单对应路由是否可以切换租户
    • spec.project: 菜单对应路由是否可以切换项目
    • spec.cluster: 菜单对应路由是否可以切换集群
    • spec.isRenderSelectCurrent: 是否渲染选择项目、集群
    • spec.useChildrenReplaceSider: 是否在进入子页面后将 sider 替换
    • spec.getTitleForReplaceSider: 获取 title 的函数
    • spec.parent: 父菜单 ID
    • spec.parentOwnerReferences: 父菜单依赖
    • spec.disabled: menu 显示控制

    门户路由

    门户路由是底座门户资源,用于配置访问路径。

    定义

    代码定义位于 Menus,详细介绍如下:

    tip

    说明 对于下面的 yaml,我们想要访问 bar 字段,书写格式为 spec.foo.bar

    spec:
    foo:
    bar: xx
    • spec.path: 请求的访问路径
    • spec.entry: 静态资源的访问路径
    - + \ No newline at end of file diff --git a/en/docs/core/concepts/component/index.html b/en/docs/core/concepts/component/index.html index 195917484..962d17d83 100644 --- a/en/docs/core/concepts/component/index.html +++ b/en/docs/core/concepts/component/index.html @@ -5,13 +5,13 @@ 组件 | Framework as a Building Block for Kubernetes - +

    组件

    组件是将 chart package 映射为集群资源的一个概念,组件定义了 chart package 的基础描述信息,版本信息等。组件一般由仓库创建出来,无需手动创建。

    定义

    CRD 的代码定义位于 ComponentTypes。组件的信息都定在 status 中, 接下来会详细介绍每个字段的含义及其作用。

    • status.name

      该字段用来保存 chart package 的名字,必须符合kubernetes的命名规范。

    • status.displayName

      该字段用来保存 chart package 的展示名,该字段内容为组件最新版本的注解core.kubebb.k8s.com.cn/displayname,允许为空。

    • status.versions

      该字段是数组,用来保存 chart package 的多个版本。每个版本包含的信息如下

      • status.versions[index].appVersion 定义 chart packge 里面的应用的版本信息。
      • status.versions[index].annotations 定义该版本的注解信息,如组件的展示名。
      • status.versions[index].createdAt 创建时间
      • status.versions[index].updatedAt 更新时间
      • status.versions[index].deprecated 当前版本是否废弃
      • status.versions[index].version chart package 的版本信息
      • status.versions[index].digest 数字签名
    • status.description

      chart package 的描述信息

    • status.maintainers

      该字段是数组类型,每一项都是 chart package 的维护者。每一项的包含的信息如下

      • status.maintainers[index].name 维护者名字
      • status.maintainers[index].email 维护者的邮箱
      • status.maintainers[index].url 维护者的网站
    • status.home

      组件的官网。

    • status.soureces

      该字段是字符串数组类型,定义组件代码仓库。

    • status.keywords

      该字段是字符串数组类型,定义与该组件关联的关键词。

    • status.icon

      定义该组件的图标

    • status.deprecated

      定义当前组件是否废弃

    工作原理

    仓库Watcher同步仓库服务的组件列表,并创建/更新组件。因此:

    • 组件不建议主动创建,应该都通过组件仓库对应的Watcher自动同步获得
    • 同属一个仓库的组件可通过kubebb.component.repository=<repository-name>检索
    - + \ No newline at end of file diff --git a/en/docs/core/concepts/componentplan/index.html b/en/docs/core/concepts/componentplan/index.html index dc06cbfe6..7bdbfee5b 100644 --- a/en/docs/core/concepts/componentplan/index.html +++ b/en/docs/core/concepts/componentplan/index.html @@ -5,7 +5,7 @@ 组件部署 | Framework as a Building Block for Kubernetes - + @@ -17,7 +17,7 @@ 字段中,类似于先进行 helm install/upgrade --dry-run 后,将生成的 manifest 再进行 kubectl diff 操作,
  • 只有 ComponentPlan 中的 spec.approvedtrue,对应的 helm release 才会真正安装。
  • 单个 ComponentPlan 的镜像替换 (即 spec.override.images 字段)的规则遵循 kustomize:ImageTagTransformer 规范,代码实现也是直接调用了 kustomize 的这部分代码,降低了用户学习成本,保证了代码的兼容性和有效性。
  • 单个 ComponentPlan 的镜像替换和整个 Repository 的镜像替换,都是通过 Helm:post-rendering 技术实现的。
  • 镜像覆盖策略

    image-changed

    ComponentPlan 和 Helm release 的关系

    componentplan

    - + \ No newline at end of file diff --git a/en/docs/core/concepts/rating/index.html b/en/docs/core/concepts/rating/index.html index 2599abdd1..690527fdd 100644 --- a/en/docs/core/concepts/rating/index.html +++ b/en/docs/core/concepts/rating/index.html @@ -5,7 +5,7 @@ 组件评级 | Framework as a Building Block for Kubernetes - + @@ -14,7 +14,7 @@ ClusterRole 定义了所有的 Task 在运行时需要的权限,有新的权限需求,直接更新这个 ClusterRole 即可。

    1. ServiceAccount

    名为 kubebb-system.kubebb-rating。这个 serviceaccount 会在每个 Repository 的 namespace 下都创建一个。

    1. ClusterRoleBinding

    名为 kubebb-system.kubebb-rating,将 serviceaccount kubebb-system.kubebb-rating 与 clusterrole kubebb-system.kubebb-rating 绑定。

    kubectl get clusterrole,clusterrolebinding kubebb-system.kubebb-rating 
    NAME CREATED AT
    clusterrole.rbac.authorization.k8s.io/kubebb-system.kubebb-rating 2023-08-21T09:24:12Z

    NAME ROLE AGE
    clusterrolebinding.rbac.authorization.k8s.io/kubebb-system.kubebb-rating ClusterRole/kubebb-system.kubebb-rating 8m8s
    1. Pipeline, Task

    一个名为 kubebb 的 pipeline 和 名字分别为 kubebb-rback, kubebb-helm-lintTaskTask 定义了要执行的任务的具体动作,而 Pipeline 则是定义了要执行哪些 Task 以及这些 Task 之间的执行顺序等。

    kubectl get pipeline -nkubebb-system
    NAME AGE
    kubebb 4m19s

    kubectl get task -nkubebb-system
    NAME AGE
    kubebb-helm-lint 4m25s
    kubebb-rback 4m25s

    支持用户自己定义 TaskPipeline, 但是需要讲这些资源放到与 operator 相同的 namespace 下。 ClusterRole, ClusterRoleBinding, ServiceAccount 是给 pipelinerun 在执行 Task 用的,避免 Task 因为权限不足而失败。

    核心逻辑

    1. Rating 创建时

    Rating 添加两个标签 rating.component=<component-name>, rating.repository=<repository-name> 用来记录与当前 Rating 关联的组件和仓库的名字。

    1. Rating 更新时

    目前对于 spec, status 的更新不做处理,只有当 metadata 发生变化的时候,才会进入处理逻辑。

    根据 spec 定义的 pipeline 列表,开始创建 PipelineRun, 同时 watch PipelineRun 的变化,将其状态,以及 Task, TaskRun 的信息同步到 Rating

    1. Rating 删除时

    Rating 被删除,他所创建的 PipelineRun 同样会被删除。

    使用

    一个 rating CR 的例子:

    # rating.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Rating
    metadata:
    name: rating
    namespace: kubebb-system
    spec:
    componentName: kubebb.kubebb-core
    pipelineParams:
    - pipelineName: kubebb
    params:
    - name: URL
    value:
    type: string
    stringVal: https://github.com/kubebb/components/releases/download/kubebb-core-v0.1.10/kubebb-core-v0.1.10.tgz
    arrayVal: []
    objectVal: {}
    - name: COMPONENT_NAME
    value:
    stringVal: kubebb-core
    type: string
    - name: VERSION
    value:
    stringVal: v0.1.10
    type: string
    - name: REPOSITORY_NAME
    value:
    stringVal: kubebb
    type: string

    上述 yaml 所定义的 Rating 包含一个默认的 pipeline,并且将 pipeline 需要的参数传递进去。 执行上面的 yaml

    kubectl apply -f rating.yaml

    可以持续观察 Rating 的变化

    kubectl -nkubebb-system get rating -oyaml -w

    如果运行不成功,会在 status 中给出错误原因。 运行成功会得到如下的 status

    status:
    pipelineRuns:
    rating.kubebb:
    actualWeight: 2
    conditions:
    - lastTransitionTime: "2023-08-23T05:39:53Z"
    message: 'Tasks Completed: 2 (Failed: 0, Cancelled 0), Skipped: 0'
    reason: Succeeded
    status: "True"
    type: Succeeded
    expectWeight: 2
    pipelineName: kubebb
    tasks:
    - conditions:
    - lastTransitionTime: "2023-08-23T05:39:53Z"
    message: All Steps have completed executing
    reason: Succeeded
    status: "True"
    type: Succeeded
    name: kubebb-rback
    taskRunName: rating.kubebb-kubebb-rback
    - conditions:
    - lastTransitionTime: "2023-08-23T05:39:53Z"
    message: All Steps have completed executing
    reason: Succeeded
    status: "True"
    type: Succeeded
    name: kubebb-helm-lint
    taskRunName: rating.kubebb-kubebb-helm-lint

    如果想要知道每个 task 运行的输出日志,可以先得到 pod 列表,然后查看pod日志即可。

    kubectl get po -nkubebb-system
    NAME READY STATUS RESTARTS AGE
    kubebb-5dbf45964c-26jpp 1/1 Running 0 3m53s
    rating.kubebb-kubebb-helm-lint-pod 0/1 Completed 0 2m9s
    rating.kubebb-kubebb-rback-pod 0/1 Completed 0 2m9s
    - + \ No newline at end of file diff --git a/en/docs/core/concepts/repository/index.html b/en/docs/core/concepts/repository/index.html index e8248a550..14bec371e 100644 --- a/en/docs/core/concepts/repository/index.html +++ b/en/docs/core/concepts/repository/index.html @@ -5,7 +5,7 @@ 组件仓库 | Framework as a Building Block for Kubernetes - + @@ -17,7 +17,7 @@ 对 wordpress 的版本定义了多虑条件,精确匹配 16.1.14, 16.1.13 两个版本。 对仓库中所有来自 docker.io 的镜像,替换为 192.168.1.1 ,并将镜像路径为 library 的镜像替换为 system-container ,比如仓库中有镜像 docker.io/library/nginx:v1.2.3 会替换为 192.168.1.1/system-container/nginx:v1.2.3

    OCI 仓库的额外说明

    支持地址

    仓库支持使用 OCI 镜像仓库的地址,目前支持如下几种方式:

    • 原生 harbor 2.x 以上版本:
      • 项目纬度,将会尝试获取这个项目下所有镜像的所有 tag,例如:oci://demo.goharbor.io/helm-test (demo.goharbor.io 是 harbor 的在线测试服务器,该服务器数据每 2 天清空一次,详情见 docker 文档的相关说明)
      • 镜像维度,将会尝试获取这个镜像的所有 tag,例如:oci://demo.goharbor.io/helm-test/nginx
    • dockerhub:
      • 项目纬度,将会尝试获取这个项目下所有镜像的所有 tag,例如:oci://registry-1.docker.io/bitnamicharts
      • 镜像维度,将会尝试获取这个镜像的所有 tag,例如:oci://registry-1.docker.io/bitnamicharts/wordpress
    • github package
      • github package 组织维度:
        • 整个组织的地址,将会尝试获取该组织下所有镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example
        • 组织中单独上传的镜像地址,将会尝试获取该组织下所有镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example/redis
        • 组织中的仓库地址,将会尝试获取该仓库下所有镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example/helm-oci-example
        • 组织中某个仓库的镜像地址,将会尝试获取该镜像的所有 tag,例如:oci://ghcr.io/oci-helm-example/helm-oci-example/nginx
      • github package 个人维度:
        • 该用户的地址,将会尝试获取该用户名下所有镜像的所有 tag,例如:oci://ghcr.io/abirdcfly
        • 用户单独上传的镜像地址,将会尝试获取该用户名下所有镜像的所有 tag,例如:oci://ghcr.io/abirdcfly/redis
        • 用户的仓库地址,将会尝试获取该仓库下所有镜像的所有 tag,例如:oci://ghcr.io/abirdcfly/helm-oci-example
        • 用户某个仓库的镜像地址,将会尝试获取该镜像的所有 tag,例如:oci://ghcr.io/abirdcfly/helm-oci-example/nginx

    限制

    1. 限于 github package API 的限制

    若要使用 REST API 管理 GitHub Packages,必须使用 personal access token (classic) 进行身份验证。 要访问包元数据,令牌必须包含 read:packages 范围。

    在使用 github package 作为 OCI 的存储地址时,需要提供一个 personal access token (classic) ,用户可以使用环境变量 GITHUB_PAT_TOKEN 来覆盖系统默认提供的 token。

    1. 限于 原生 harborgithub package 并没有在 API 返回详情中写明镜像的类型,所以从 API 返回结果中无法区分该 OCI 镜像是否存储的是 Helm 包还是普通 docker 镜像。因此将会一并尝试,可能会在日志或资源状态中看到错误。我们建议用户单独将某个 OCI 仓库用作 Helm 包存储。

    2. 目前还不支持私有仓库。将在后续版本支持。

    3. 因为工作原理不同,chartmuseum 类型的仓库会提供索引文件,而 OCI 仓库只能通过拉取具体压缩包并解析内容,OCI 仓库的获取要慢于 chartmuseum 仓库,并且各个存储地址都有自己的 API 请求速率限制。默认将 OCI 仓库的解析的并发数设置为 5,可以通过环境变量 OCI_PULL_WORKER 覆盖该设置,数字越大,解析并发数越多,也越可能遇到 429 Too Many Requests 错误。

    工作原理

    仓库以 Kubernetes Operator 方式实现。周期性的获取 chart repository 的数据,对集群中组件更新或者创建,一般不会删除组件,而是将在 chart repository 中不存在的组件标记为废弃

    1. Repository 创建,更新时

    创建或者更新 Repository 时,会检查该资源是否添加 finalizers, 以及 URL 变更历史是否正确更新。

    当所有的更新都处理完成后,将会启动 chartmuseum watcher,在每次获取到若干的 chart package 后,与集群中已经存在的 Component 对比,将会执行 新增 component, 更新 component

    我们不会删除已经创建的 Component, 而是在发现集群中存在 Component 且并不存在于当前的 chart repository 中,那么会将其标记为废弃。

    1. Repoistory 删除时

    Repository 创建出来的 Component 都会添加 OwnerReferences,删除 Repository 的时候会自动删除关联的 Component

    1. 镜像覆盖策略

    image-changed

    - + \ No newline at end of file diff --git a/en/docs/core/concepts/subscription/index.html b/en/docs/core/concepts/subscription/index.html index cb0d5fb10..d4d7d937c 100644 --- a/en/docs/core/concepts/subscription/index.html +++ b/en/docs/core/concepts/subscription/index.html @@ -5,7 +5,7 @@ 组件订阅 | Framework as a Building Block for Kubernetes - + @@ -19,7 +19,7 @@ 组件安装计划的安装方式,默认为 auto,可选项为 automanual

  • spec.schedule 可选字段 组件安装计划的安装时间,默认为空,只有 spec.componentPlanInstallMethodauto,且上游发布了新版本时才有生效,Cron 格式,例如 45 20 * * *,代表每天 20:45 后再进行新版本安装。

    # ┌───────────── 分 (0 - 59)
    # │ ┌───────────── 时 (0 - 23)
    # │ │ ┌───────────── 月的第几天 (1 - 31)
    # │ │ │ ┌───────────── 月份 (1 - 12)
    # │ │ │ │ ┌───────────── 周的第几天 (0 - 6) (周日到周六)
    # │ │ │ │ │
    # │ │ │ │ │
    # │ │ │ │ │
    # * * * * *
  • spec.其他 订阅中完整的包含了组件安装计划中的自定义配置字段。详细内容见组件安装计划的文档。

  • 工作原理

    订阅以 Kubernetes Operator 方式实现。当订阅控制器监视发现集群中订阅对应的组件创建或更新时,判断订阅未处理该更新事件时,用订阅中的组件安装计划配置创建一个名为 sub-<订阅名>-<安装版本>ComponentPlan,触发后续的组件安装步骤。

    - + \ No newline at end of file diff --git a/en/docs/core/intro/index.html b/en/docs/core/intro/index.html index 7796ef24a..d4e6a4d5c 100644 --- a/en/docs/core/intro/index.html +++ b/en/docs/core/intro/index.html @@ -5,14 +5,14 @@ 介绍 | Framework as a Building Block for Kubernetes - +

    介绍

    内核基于kubernetes operator模式进行开发,提供完整的组件生命周期管理、组件订阅和自动化部署能力,并通过tekton扩展实现组件自动化评级和安装前校验等能力。

    整体架构

    KubeBB Core架构图

    核心优势

    声明式组件生命周期管理

    组件生命周期

    组件的生命周期主要可以划分为三个阶段:

    component_lifecycle

    1. 研发阶段

    开发者通过低码平台(optional)完成组件的开发工作,根据不同的组件类型,选择不同的方式完成组件的打包,并将打包后的资源发布到仓库服务中。

    组件资源一般包含两类:

    • 镜像资源: 组件开发完成一般需要构建成一个镜像,并推送到镜像仓库
    • 安装包:组件安装部署时的各种资源配置信息(通常为Helm charts),并推送到Charts包仓库

    镜像资源一般通过公共镜像仓库(Dockerhub)或私有镜像仓库管理,我们不做特殊处理。组件仓库服务主要用于存储安装包(Charts)。

    2. 部署阶段

    系统管理员从组件仓库服务中手动查找存储的组件列表,获取可用的组件信息(发布者、版本、安装配置等),并根据实际情况,完成组件配置并安装到系统中。安装完成后,需要手动去检查组件版本更新,并谨慎的完整组件的升级。

    3. 使用阶段

    普通用户在组件安装到系统后,通过底座Kit提供的统一访问入口和统一用户认证来访问具体的组件服务。

    声明式的优势

    声明式的组件生命周期管理有以下优势:

    • 可读性: 更易于理解和阅读组件定义本身,因为它们表达了想要的结果,而不是实现的步骤。
    • 可维护性:更易于维护组件,因为它们更容易理解,更容易修改,而且更少有错误。
    • 可重用性:更容易重用组件,因为它们通常是独立于上下文的,可以在不同的环境中使用。
    • 可扩展性:更易于扩展组件,因为它们通常是基于组件和模块的,可以简单地组合起来创建更复杂的系统。
    • 可靠性:更可靠,因为它们通常是基于静态配置的,而不是基于运行时状态的。这意味着它们更少出现运行时错误和意外行为。

    多维度组件评级

    通过集成Tekton流水线组件,实现自动化的组件评级,并通过CRD Rating完成评测数据总结和分析。

    目前组件评级围绕三个维度展开:

    • 安全性: 评估组件的安全性和软件供应链的安全性。
    • 可靠性: 评估组件本身是否已经过良好的功能测试、性能测试
    • 可用性: 评估组件是否有充足的文档、示例来指导用户使用

    全面适配Helm生态

    Helm是一种成熟的包管理工具,提供一种简单的方式来管理Kubernetes应用程序的部署和升级。它拥有庞大的社区和众多优秀的项目。 因此,内核从设计之初就确定了必须全面兼容Helm生态

    这一特点体现在内核设计的各个方面,我们支持:

    扩展适配底座服务

    tip
    1. 低代码开发平台定义MenusRoute等底座相关资源,并打包到组件模版中
    2. 内核获取底座自定义资源后,自动解析、配置、创建对应资源

    底座服务支持通过自定义菜单路由扩展平台服务,为支撑适配这一能力,我们做了以下努力:

    • 移植Menu资源类型
    • 移植Route配置

    从而通过内核串联云梯低代码开发平台底座服务

    - + \ No newline at end of file diff --git a/en/docs/core/rating/index.html b/en/docs/core/rating/index.html index 77b359138..b1bff7e83 100644 --- a/en/docs/core/rating/index.html +++ b/en/docs/core/rating/index.html @@ -5,13 +5,13 @@ 组件评级 | Framework as a Building Block for Kubernetes - +

    组件评级

    组件评级的目的是通过尽可能多的自动化测试从多个维度来评估一个组件。因此,组件评级三个部分的内容:

    • 定义并完成组件的自动化测试
    • 收集处理测试数据
    • 评估获取组件等级

    为了实现上述提到的三个部分的内容,我们选择:

    • 通过Tekton安全性可靠性可用性三个维度定义多种任务来完成自动化组件测试
    • 定义CRD Rating及其控制器,实时监听Tekton资源,收集测试数据
    • 通过KubeAGI/arcadia实现AI制定组件评估规则,并基于测试数据实时更新组件等级
    tip

    核心流程

    rating_workflow

    任务列表

    tip

    所有任务权重目前都为1,后续需要根据实际的任务重要性赋予不同的权重,并以此获得最终的评分

    评测任务类型描述权重状态
    rating-security-rback安全性通过 dryrun 的方式获取完整的组件 RBAC1已支持
    rating-security-slsa安全性验证 SLSA 供应链安全等级1研发中
    rating-reliability-linting可靠性通过 helm lint 验证组件是否符合规范1已支持
    rating-reliability-testing可靠性通过 helm testing1研发中
    rating-reliability-deployment-testing可靠性验证部署后的组件,功能和性能是否符合预期1研发中
    rating-availability-sample可用性验证组件包是否包含 ComponentPlan 示例1研发中

    安全性任务

    rating-security-rback 根据 chart 包里的内容得到完整的安装文件,并根据这些文件生成权限关系图,用户可以根据这个关系图判断权限是否过大而选择是否安装。

    分以下4个步骤完成:

    1. 下载chart包

    通过 helm pull 下载指定的chart包

    1. 生成 .dot 文件

    通过 helm template 将 chart 包要安装的内容完全列举出来,并通过 yqjq 命令转换成如下格式:

    {
    "kind":"List",
    "apiVersion": "v1",
    "items": [
    {
    "kind": "ServiceAccount",
    "apiVersion": "v1",
    "metadata": {
    "name": "sa",
    }
    }
    ]
    }

    通过 rback 命令将以上的 json 内容转换成 .dot 文件。

    1. 存储到 ConfigMap

    将上述得到 .dot 文件存储到 ConfigMap 中。

    1. 将 ConfigMap 名字写到输出

    Pipeline 支持输出一些运行结果,将CongiMap的名字写到输出内容中,方便后续用户使用。

    可靠性任务

    rating-reliability-linting 用来检查 chart 包里书写格式是否正确,以保证平台可以完整的给用户展示相关信息。

    分以下2个步骤完成:

    1. 下载 chart 包

    通过 helm pull 下载 chart 包。

    1. 检查 lint

    通过 helm lint 检查包的书写格式,输出内容会跟随日志输出。

    可用性任务

    (待补充)

    - + \ No newline at end of file diff --git a/en/docs/core/roadmap/index.html b/en/docs/core/roadmap/index.html index 776686785..668e79291 100644 --- a/en/docs/core/roadmap/index.html +++ b/en/docs/core/roadmap/index.html @@ -5,13 +5,13 @@ 路线图 | Framework as a Building Block for Kubernetes - +

    路线图

    v0.1.0

    • 支持管理组件仓库 Repository
      • 支持与 Helm 仓库兼容的 Repository Server
      • Watcher 监视 Repository
    • 实现 Component 管理
      • Watcher 实现 Components 的 CRUD 操作
    • 支持 ComponentPlanSubscription
      • 允许用户订阅 Component 的最新版本更改
      • 使用与 Helm Chart 兼容的 ComponentPlan 计划组件部署

    v0.2.0

    • 支持内核各控制器的Events记录
    • 适配 Kubebb 底座服务
    • 基于Tekton Pipeline安全性可靠性可用性 三个维度对 Component 进行评级Rating
    • 基于Tekton Pipeline实现ComponentPlan组件部署前的预先校验Check
    • 组件仓库Repository 中启用 authOCI
    • 实现与低代码平台集成
    - + \ No newline at end of file diff --git a/en/docs/core/userguide/component-dev/index.html b/en/docs/core/userguide/component-dev/index.html index f5ac2b30e..9257ced81 100644 --- a/en/docs/core/userguide/component-dev/index.html +++ b/en/docs/core/userguide/component-dev/index.html @@ -5,13 +5,13 @@ 组件开发 | Framework as a Building Block for Kubernetes - +
    -

    组件开发

    Kubebb的组件安装包采用Helm模式,遵循Helm charts开发规则。除此之外,我们额外定义添加了一些特殊字段来满足一些组件的特殊性。

    组件类型

    从功能角度,我们将组件划分为两类:

    • 系统组件,如U4A、TMF等,组件的运行需要系统管理权限

    • 普通功能组件,如minio、weaviate等,组件可运行在任何租户-项目中,没有特殊限制

    通用配置

    参考Helm官方文档

    组件高级配置

    为支持不同组件对安装位置、权限的可控,特此额外约定了多个配置字段

    Chart.yaml

    Chart.yaml中包含组件的核心定义、版本、维护者等信息,属于Helm预定义的内容。为了支持额外的特殊需求,我们决定通过annotations来自由定义。如下所示:

    annotations:
    core.kubebb.k8s.com.cn/displayname: "内核"
    core.kubebb.k8s.com.cn/restrict-tenants: "system-tenant"
    core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"
    • core.kubebb.k8s.com.cn/displayname: 用于填充组件的展示名,支持中英文
    • core.kubebb.k8s.com.cn/restrict-tenants: 用于设置组件安装位置的限制租户,多个租户需要通过,隔开
    • ore.kubebb.k8s.com.cn/restricted-namespaces: 用于设置组件安装位置的限制项目/命名空间,多个命名空间通过,隔开
    - +

    组件开发

    Kubebb的组件安装包采用Helm模式,遵循Helm charts开发规则。除此之外,我们额外定义添加了一些特殊字段来满足一些组件的特殊性。

    组件类型

    从功能角度,我们将组件划分为两类:

    • 系统组件,如U4A、TMF等,组件的运行需要系统管理权限

    • 普通功能组件,如minio、weaviate等,组件可运行在任何租户-项目中,没有特殊限制

    通用配置

    参考Helm官方文档

    组件高级配置

    为支持不同组件对安装位置、权限的可控,特此额外约定了多个配置字段

    Chart.yaml

    Chart.yaml中包含组件的核心定义、版本、维护者等信息,属于Helm预定义的内容。为了支持额外的特殊需求,我们决定通过annotations来自由定义。如下所示:

    annotations:
    core.kubebb.k8s.com.cn/displayname: "内核"
    core.kubebb.k8s.com.cn/restricted-tenants: "system-tenant"
    core.kubebb.k8s.com.cn/restricted-namespaces: "msa-system"
    • core.kubebb.k8s.com.cn/displayname: 用于填充组件的展示名,支持中英文
    • core.kubebb.k8s.com.cn/restrict-tenants: 用于设置组件安装位置的限制租户,多个租户需要通过,隔开
    • ore.kubebb.k8s.com.cn/restricted-namespaces: 用于设置组件安装位置的限制项目/命名空间,多个命名空间通过,隔开
    + \ No newline at end of file diff --git a/en/docs/core/userguide/enablerating/index.html b/en/docs/core/userguide/enablerating/index.html index c16f5d8b1..186dc5211 100644 --- a/en/docs/core/userguide/enablerating/index.html +++ b/en/docs/core/userguide/enablerating/index.html @@ -5,13 +5,13 @@ 启用Rating | Framework as a Building Block for Kubernetes - +

    启用Rating

    默认情况下,Rating组件评级功能是关闭的,需要手动启用。我们建议按照以下步骤启用Rating:

    tip

    详细了解更多Rating有关内容,请参考组件评级设计Rating CRD定义

    Rating依赖两个组件,分别是:

    • Tekton提供流水线能力,完成组件的自动化测试
    • KubeAGI Arcadia提供AI数据分析能力,完整组件的AI评测

    因此, 需要先安装好Tekton和Arcadia, 才能使用Rating功能。

    1.安装kubebb core

    参考安装内核完成内核(未启用Rating)安装。

    2. 安装Tekton流水线

    官方组件仓库提供了Tekton组件安装包Tekton安装示例。可以使用内核快速完成tekton的安装。

    执行以下命令前需要确保组件kubebb.tekton-operator已经同步完成

    命令如下:

    # 默认安装到default命名空间
    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/tekton-operator/componentplan.yaml

    查看安装状态:

    kubectl get pods --watch

    如果安装完成,输出如下:

    ❯ kubectl get pods
    NAME READY STATUS RESTARTS AGE
    my-tekton-tekton-operator-68bdffc888-8dtfx 2/2 Running 0 25m
    my-tekton-tekton-operator-webhook-78bdfcbc77-6k6cx 1/1 Running 0 25m

    如果长时间安装未完成,可查看对应的Componentplan资源的状态。

    3. 安装Arcadia AI组件

    Arcadia组件位于另外的一个组件仓库

    1. 添加arcadia组件仓库
    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/repos/repository_arcadia.yaml

    成功后,可通过如下命令查看仓库中的组件:

    kubectl get components -nkubebb-system -l kubebb.component.repository=arcadia

    如果一切正常,输入如下:

    ❯ kubectl get components -nkubebb-system -l kubebb.component.repository=arcadia
    NAME AGE
    arcadia.arcadia 32s
    arcadia.jupyterlab 32s
    arcadia.llms 32s
    1. 安装Arcadia AI组件

    此处建议采用组件订阅自动安装的模式,默认安装到default命名空间

    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/arcadia/subscription.yaml
    1. 查看安装状态
    ❯ kubectl get pods --watch
    NAME READY STATUS RESTARTS AGE
    my-tekton-tekton-operator-68bdffc888-8dtfx 2/2 Running 0 48m
    my-tekton-tekton-operator-webhook-78bdfcbc77-6k6cx 1/1 Running 0 48m
    arcadia-5cb86f8787-jvd7j 0/1 Pending 0 0s
    arcadia-5cb86f8787-jvd7j 0/1 Pending 0 0s
    arcadia-5cb86f8787-jvd7j 0/1 ContainerCreating 0 0s
    arcadia-5cb86f8787-jvd7j 0/1 Running 0 20s
    arcadia-5cb86f8787-jvd7j 1/1 Running 0 30s

    4. 更新内核

    通过设置参数deployment.rating_enable=true来启用Rating

    helm upgrade  -nkubebb-system kubebb-core kubebb/kubebb-core  --set deployment.rating_enable=true

    查看内核Pod状态:

    ❯ kubectl get pods -nkubebb-system --watch
    NAME READY STATUS RESTARTS AGE
    kubebb-core-65ddc99994-25k49 0/1 Running 0 7s
    kubebb-core-6d78d7d8fd-vxbc6 1/1 Running 0 119s
    kubebb-core-65ddc99994-25k49 1/1 Running 0 10s
    kubebb-core-6d78d7d8fd-vxbc6 1/1 Terminating 0 2m2s
    kubebb-core-6d78d7d8fd-vxbc6 0/1 Terminating 0 2m3s
    kubebb-core-6d78d7d8fd-vxbc6 0/1 Terminating 0 2m3s
    kubebb-core-6d78d7d8fd-vxbc6 0/1 Terminating 0 2m3s

    如果升级成功,则可在内核Pod内看到如下日志:

    1.6935407235060694e+09 INFO Starting EventSource {"controller": "rating", "controllerGroup": "core.kubebb.k8s.com.cn", "controllerKind": "Rating", "source": "kind source: *v1alpha1.Rating"}
    1.6935407235063274e+09 INFO Starting EventSource {"controller": "rating", "controllerGroup": "core.kubebb.k8s.com.cn", "controllerKind": "Rating", "source": "kind source: *v1beta1.PipelineRun"}
    - + \ No newline at end of file diff --git a/en/docs/core/userguide/helmtofuture/index.html b/en/docs/core/userguide/helmtofuture/index.html index 3f3de1be6..2923a67e9 100644 --- a/en/docs/core/userguide/helmtofuture/index.html +++ b/en/docs/core/userguide/helmtofuture/index.html @@ -5,13 +5,13 @@ 从 Helm 命令迁移 | Framework as a Building Block for Kubernetes - +

    从 Helm 命令迁移

    内核被设计为尽量兼容 Helm 命令,对于一个熟悉 Helm 命令的开发者或者用户,使用内核将变得很容易。

    helm repo add 添加仓库

    helm repo add bitnami https://charts.bitnami.com/bitnami

    添加仓库对应创建 Repository 资源

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: bitnami
    spec:
    url: https://charts.bitnami.com/bitnami

    helm install 安装 chart

    cat << EOF > values.yaml
    replicaCount: 2
    EOF

    helm install nginx bitnami/nginx --version 15.0.2 -f values.yaml --set image.registry=ddd.ccc

    安装 Chart 对应创建 ComponentPlan 资源。

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx
    spec:
    approved: true
    component:
    name: bitnami.nginx
    namespace: default
    name: nginx
    override:
    valuesFrom:
    - kind: ConfigMap
    name: nginx
    valuesKey: values.yaml
    set:
    - image.registry=ddd.ccc
    version: 15.0.2
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: nginx
    data:
    values.yaml: |
    replicaCount: 2

    helm upgrade 更新版本

    helm upgrade nginx bitnami/nginx --set image.registry=docker.io

    升级版本,这里有 2 个方法:

    1. 用新的配置创建一个 ComponentPlan,只要保证 metadata.namespace(对应 helm releasenamespace)和 spec.name(对应 helm releasename)一致,就会升级对应的 helm release
    2. 在原来的 ComponentPlan 基础上修改。相比较前者,这种方式不会保留历史,更灵活。前一种方式可以进行回滚。

    helm uninstall 删除 release

    helm uninstall nginx

    删除 release,对应删除 ComponentPlan。因为 ComponentPlanhelm release 的关系是可以多对一,要注意,需要删除 status.latestTrue(表明当前 ComponentPlan 对应 helm release 的最新版本)或者 status.installedRevision 和当前 helm release 版本一致的 Componentplan

    helm rollback 回滚 release

    helm rollback nginx 1

    回滚,只需要在想要回滚到的 ComponentPlan 上增加一个 label:core.kubebb.k8s.com.cn/rollback: ture 即可。

    - + \ No newline at end of file diff --git a/en/docs/core/userguide/imageoverride/index.html b/en/docs/core/userguide/imageoverride/index.html index 490637990..4141ec7f1 100644 --- a/en/docs/core/userguide/imageoverride/index.html +++ b/en/docs/core/userguide/imageoverride/index.html @@ -5,13 +5,13 @@ “镜像替换”功能的完整说明 | Framework as a Building Block for Kubernetes - +

    “镜像替换”功能的完整说明

    介绍

    镜像替换指的是使用指定的镜像名称替换 helm chart 包中的镜像从而正常安装的功能。常用于 kubernetes 离线环境,无法访问原始镜像仓库时使用。

    这时我们通常会做 2 件事:首先找一个可以访问原始镜像仓库的环境下载镜像,并将镜像按一定的规则上传到自己的镜像仓库中,然后在安装时修改镜像地址。本功能就是为了方便的完成第 2 步而设计的。

    有如下优势:

    1. 无需更改 helm chart 包。
    2. 按需更改,颗粒度小。
    3. 分为仓库组件替换和组件替换,可分别配置,互相解耦,互不影响。

    image-changed

    以上图为例,helm chart 包中的镜像为 docker.com/library/nginx:1.25.1,我们实际安装 helm chart 包时,需要的镜像地址为 192.168.1.1/system-container/nginx:latest,镜像替换功能确保了我们安装时使用后一个地址进行安装。

    镜像替换功能实际是通过 Helm:post-rendering 技术实现的。

    参数说明

    Docker 镜像格式说明

    我们仍然以 docker.com/library/nginx:1.25.1 为例来说明:

    根据 docker 官方文档基本格式为:[HOST[:PORT_NUMBER]/]PATH:TAG,其中:

    • HOST: 可选,主机名指定映像所在的位置。主机名必须符合标准 DNS 规则,但不得包含下划线。如果未指定主机名,则 Docker 默认使用位于 registry-1.docker.io 的公共镜像仓库。
    • PORT_NUMBER: 如果存在主机名,则可以选择在其后面跟随格式为 :8080 的镜像仓库端口号。
    • PATH: 路径由斜杠分隔的部分组成。每个部分可以包含小写字母、数字和分隔符。分隔符定义为一个句点、一个或两个下划线、或者一个或多个连字符。部分不能以分隔符开始或结束。虽然 OCI 规范支持两个以上斜杠分隔的部分,但大多数镜像仓库仅支持两个斜杠分隔的部分。对于 Docker 的公共镜像仓库,路径格式如下:
      • [NAMESPACE/]REPOSITORY: 第一个可选部分通常是用户或组织的命名空间。第二个强制部分是存储库名称。当命名空间不存在时,Docker 使用 library 作为默认命名空间。
    • 在镜像名称之后,可选的 TAG 是一个自定义的、人类可读的清单标识符,通常是镜像的特定版本或变体。该标签必须是有效的 ASCII,并且可以包含小写和大写字母、数字、下划线、句点和连字符。它不能以句点或连字符开头,且不得超过 128 个字符。如果未指定标记,则 Docker 命令默认使用 latest

    镜像替换 配置说明

    仓库部分

    一个仓库的例子如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repository-bitnami-special-version
    namespace: kubebb-system
    spec:
    # 省略其他部分
    imageOverride:
    - registry: docker.io
    newRegistry: 192.168.1.1
    pathOverride:
    path: library
    newPath: system-container

    每项参数的具体说明:

    spec.imageOverride 非必需,该字段是数组,定义了一系列仓库级别的镜像覆盖策略。

    每一项内容包括:

    • spec.imageOverride[].registry 该镜像仓库域名地址,可以包含端口,例如:docker.io192.168.1.1:5000
    • spec.imageOverride[].newRegistry 要将 registry 替换后的镜像仓库域名地址,可以包含端口。
    • spec.imageOverride[].pathOverride 非必需,数组。
      • spec.imageOverride[].pathOverride.path 旧的镜像仓库路径,比如镜像地址 docker.io/library/nginx:latest 中的 path 为 library
      • spec.imageOverride[].pathOverride.newPath 要将 path 替换后的镜像仓库新路径,可以为空。

    组件安装部分

    下面是一个 ComponentPlan 示例:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx-15.0.2
    namespace: kubebb-system
    spec:
    # 省略其他部分
    override:
    images:
    - name: docker.io/bitnami/nginx
    newTag: latest

    每项参数的具体说明:

    spec.override.images 数组。类似 kustomize 的镜像自定义参数

    • spec.override.images[].name

      原始镜像名称,tag 可选,如果包含 tag,则匹配精确到 tag 一致才替换,比如,如果该项为 docker.io/bitnami/nginx:v1,那么只匹配 tag 为 v1 的 nginx 镜像,如果有 docker.io/bitnami/nginx:v2 不会被替换。

    • spec.override.images[].newName

      替代原始镜像名称的名称

    • spec.override.images[].newTag

      替代原始 tag 的新 tag 名称

    • spec.override.images[].digest

      替代原始 tag 的新 digest,如果 digest 有值,会忽略 newTag 的值。

    具体案例

    批量复制仓库

    假设我们希望将 helm chart 包 bitnami nginx chart version:15.0.2 部署到离线 kubernetes 环境,这里涉及到的镜像为:

    原始镜像用途必选
    docker.io/bitnami/nginx:1.25.1-debian-11-r0nginx deployment 使用必选
    docker.io/bitnami/git:2.41.0-debian-11-r4nginx deployment 导入配置使用可选
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91nginx deployment 暴露 metric 使用可选

    假设我们将 docker.io/bitnami/ 镜像仓库批量复制到了我们自己的镜像仓库 192.168.1.1:5000 中,那么上面镜像的地址将变更如下:

    原始镜像本地镜像
    docker.io/bitnami/nginx:1.25.1-debian-11-r0192.168.1.1:5000/nginx:1.25.1-debian-11-r0
    docker.io/bitnami/git:2.41.0-debian-11-r4192.168.1.1:5000/git:2.41.0-debian-11-r4
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91192.168.1.1:5000/nginx-exporter:0.11.0-debian-11-r91

    那么,我们只需要在仓库中修改配置如下即可

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repository-bitnami-special-version
    namespace: kubebb-system
    spec:
    # 省略其他部分
    imageOverride:
    - registry: docker.io
    newRegistry: 192.168.1.1:5000
    pathOverride:
    path: bitnami
    newPath: ""

    使用自定义镜像

    假设我们希望将 helm chart 包 bitnami nginx chart version:15.0.2 部署到可访问 docker.io 的 kubernetes 环境中,这里涉及到的镜像为:

    原始镜像用途必选
    docker.io/bitnami/nginx:1.25.1-debian-11-r0nginx deployment 使用必选
    docker.io/bitnami/git:2.41.0-debian-11-r4nginx deployment 导入配置使用可选
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91nginx deployment 暴露 metric 使用可选

    假设我们希望使用自己编译的 nginx 镜像 192.168.1.1:5000/tmp/nginx:2023,那么上面镜像的地址变更如下:

    原始镜像本地镜像
    docker.io/bitnami/nginx:1.25.1-debian-11-r0192.168.1.1:5000/tmp/nginx:2023
    docker.io/bitnami/git:2.41.0-debian-11-r4docker.io/bitnami/git:2.41.0-debian-11-r4
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91

    那么,我们只需要在安装时的 ComponentPlan 中配置如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx-15.0.2
    namespace: kubebb-system
    spec:
    # 省略其他部分
    override:
    images:
    - name: docker.io/bitnami/nginx
    newName: 192.168.1.1:5000/tmp/nginx
    newTag: "2023"

    批量复制仓库,且使用自定义镜像

    假设我们希望将 helm chart 包 bitnami nginx chart version:15.0.2 部署到离线 kubernetes 环境,这里涉及到的镜像为:

    原始镜像用途必选
    docker.io/bitnami/nginx:1.25.1-debian-11-r0nginx deployment 使用必选
    docker.io/bitnami/git:2.41.0-debian-11-r4nginx deployment 导入配置使用可选
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91nginx deployment 暴露 metric 使用可选

    假设我们将 docker.io/bitnami/ 镜像仓库批量复制到了我们自己的镜像仓库 192.168.1.1:5000/bitnami-mirror/ 中,并且我们希望使用自己编译的 nginx 镜像 192.168.1.1:5000/tmp/nginx:2023,那么上面镜像的地址变更如下:

    原始镜像本地镜像
    docker.io/bitnami/nginx:1.25.1-debian-11-r0192.168.1.1:5000/tmp/nginx:2023
    docker.io/bitnami/git:2.41.0-debian-11-r4192.168.1.1:5000/bitnami-mirror/git:2.41.0-debian-11-r4
    docker.io/bitnami/nginx-exporter:0.11.0-debian-11-r91192.168.1.1:5000/bitnami-mirror/nginx-exporter:0.11.0-debian-11-r91

    那么,我们首先需要在仓库中配置如下

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repository-bitnami-special-version
    namespace: kubebb-system
    spec:
    # 省略其他部分
    imageOverride:
    - registry: docker.io
    newRegistry: 192.168.1.1:5000
    pathOverride:
    path: bitnami
    newPath: bitnami-mirror

    然后在在安装时的 ComponentPlan 中配置如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: nginx-15.0.2
    namespace: kubebb-system
    spec:
    # 省略其他部分
    override:
    images:
    - name: docker.io/bitnami/nginx
    newName: 192.168.1.1:5000/tmp/nginx
    newTag: "2023"

    对比

    和 helm 安装命令中的 --set image=xxx 对比

    很多 helm chart 包在 values.yaml 中提供了变量来存储镜像的地址。

    一些设计的更加精巧的 helm chart 包还详细的区分了镜像的 registry ,repository 和 tag。(比如上面提到的 helm chart 包 bitnami nginx chart version:15.0.2 提供了 image.registry(默认为 docker.io) image.repository(默认为 bitnami/nginx) image.tag(默认为 1.25.1-debian-11-r0) 和 image.digest(默认为空) 四个变量)

    使用这些变量来修改镜像地址是可行的。

    但是不是所有的 helm chart 包都遵循了这些实践,另一方面,helm chart 包使用哪个变量来替换镜像并没有统一的规范。

    使用镜像替换功能可以无视上述问题,实现统一镜像替换。

    另一个场景是,当我们复制镜像仓库时(比如修改 harbor 的对外地址,或者将镜像离线安装到另一个环境),如果使用 helm 的安装命令 --set image=xxx 来安装,我们需要修改每一个命令为新的仓库地址。而使用镜像替换功能,我们只需要修改仓库的配置,组件的配置无需修改。

    - + \ No newline at end of file diff --git a/en/docs/core/userguide/privatecluster/index.html b/en/docs/core/userguide/privatecluster/index.html index 257cda2c6..0123446ae 100644 --- a/en/docs/core/userguide/privatecluster/index.html +++ b/en/docs/core/userguide/privatecluster/index.html @@ -5,7 +5,7 @@ 私有集群部署方案 | Framework as a Building Block for Kubernetes - + @@ -16,7 +16,7 @@ 这里还是以 chartmuseum 测试

    cd components/charts/chartmuseum
    helm pacakge .

    # 成功将会输出 {"saved":true}
    curl --data-binary "@chartmuseum-3.10.1.tgz" http://localhost:8080/api/charts

    4.4 结果查看

    # 查看 index.yaml
    curl http://localhost:8080/index.yaml

    4.4 上传其他的chart包

    对于 components/charts 下面的其他的chart包用到哪些,按照 4.3 步骤就可以完成上传。


    5.使用 kubebb-core

    5.1 部署一个 Repository

    # repo.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: kubebb
    namespace: kubebb-system
    spec:
    url: http://chartmuseum.default.svc.cluster.local:8080
    pullStategy:
    intervalSeconds: 120
    retry: 5

    创建 repository

    # 1. 创建 repository
    kubectl apply -f repo.yaml

    # 2. 检查 components 是否创建
    kubectl get components.core.kubebb.k8s.com.cn -n kubebb-system
    NAME AGE
    kubebb.chartmuseum 2s
    kubebb.kubebb-core 2s

    可以看到创建了 repository 后,相关的 components 已经被创建出来了。

    5.2 部署一个 Component

    部署 component ,需要使用 componentplan 这个资源。这里我们选择再次部署一个 chartmuseum。前面通过 helm 部署的 chartmuseum 是给系统存储chart用的,这里部署,是为了测试功能正常(没搞其他的chart包)。

    # componentplan.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: chartmuseum-test
    namespace: default
    spec:
    approved: true
    name: chartmuseum-test
    version: 3.10.1
    override:
    set:
    - image.repository=localhost:5001/chartmuseum
    - env.open.DISABLE_API=false
    component:
    name: kubebb.chartmuseum
    namespace: kubebb-system

    创建compnentplan

    kubectl apply -f componentplan.yaml

    部署完成后,可以看到 chartmuseum-test 的pod也起来了。

    kubectl get po

    NAME READY STATUS RESTARTS AGE
    chartmuseum-6c4bc46898-msp7r 1/1 Running 0 107s
    chartmuseum-test-86d66fd5d7-lp2rn 1/1 Running 0 11s

    5.3 Repository Image 重写

    这个步骤是为了测试 image 更新策略,如果不需要可以不用操作。 我们还是用之前的helm部署的 chartmuseum,里面有一个chartmuseum 的包, 要用到镜像

    ghcr.io/helm/chartmuseum:v0.16.0

    # repo-override-image.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: repo-override-image
    namespace: kubebb-system
    spec:
    url: http://chartmuseum.default.svc.cluster.local:8080
    pullStategy:
    intervalSeconds: 120
    retry: 5
    imageOverride:
    - registry: ghcr.io
    newRegistry: localhost:5001
    pathOverride:
    path: helm
    newPath: ""

    创建Repository

    kubectl apply -f repo-override-image.yaml

    创建 repository 后,查看 components

    kubectl get components -A
    NAMESPACE NAME AGE
    kubebb-system kubebb.chartmuseum 18m
    kubebb-system kubebb.kubebb-core 18m
    kubebb-system repo-override-image.chartmuseum 5s
    kubebb-system repo-override-image.kubebb-core 5s

    再次安装 chartmuseum 注意,这里我们不再设置 chartmuseum 所使用的镜像。

    # componentplan-default-override.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: chartmuseum-test233
    namespace: default
    spec:
    approved: true
    name: chartmuseum-test233
    version: 3.10.1
    override:
    set:
    - env.open.DISABLE_API=false
    component:
    name: repo-override-image.chartmuseum
    namespace: kubebb-system

    创建 componentplan

    kubectl apply -f componentplan-default-override.yaml

    检查pod的运行情况

    kubectl get po

    NAME READY STATUS RESTARTS AGE
    chartmuseum-6c4bc46898-msp7r 1/1 Running 0 32m
    chartmuseum-test-86d66fd5d7-lp2rn 1/1 Running 0 31m
    chartmuseum-test233-544cbfb87c-b6pdd 1/1 Running 0 12s

    部署完成✅!

    - + \ No newline at end of file diff --git a/en/docs/intro/index.html b/en/docs/intro/index.html index 176190cdc..5cd9cc979 100644 --- a/en/docs/intro/index.html +++ b/en/docs/intro/index.html @@ -5,14 +5,14 @@ 总览 | Framework as a Building Block for Kubernetes - +

    总览

    KubeBB(Kubernetes Building Blocks)是一种由内核驱动的组件生命周期管理平台,集成开箱即用的云原生底座低码组件开发能力,整合实现云原生三层组件模式

    overview

    Kubebb提供三个套件

    提供声明式的组件生命周期管理和组件市场,并通过Tekton流水线强化低代码平台组件与底座服务的集成。

    提供开箱即用的云原生服务门户,包括用户、OIDC认证、权限、审计、租户管理、门户服务等基础组件以及证书管理、Nignx Ingress等集群组件。

    依托Low-Code Engine和具有Git特性的关系数据库Dolt打造,并借助底座门户的菜单和路由资源和内核套件的组件管理能力,实现组件开发、测试到上线的全链路能力。

    三个核心套件之间的关系可以类比一下操作系统:

    • Kubernetes ~ 操作系统内核
    • Core ~ 软件安装器
    • 底座Kit ~ 操作系统的系统软件,如GUI、用户系统、网络等
    • 低码组件开发Kit ~ 操作系统软件开发工具

    内核Kit

    内核Kit的是现阶段我们重点关注并研发的项目,完全遵循开源项目管理规范。现阶段我们的目标:

    1. 声明式的组件全生命周期管理

    基于Operator Pattern开发,实现声明式的组件全生命周期管理。

    component-lifecycle

    提供四个核心的CRD实现:

    定义了组件仓库的访问信息、轮询策略和过滤选项,从而实现周期性地向仓库服务获取最新的组件列表信息。

    记录组件的基础描述、版本列表、是否废弃等信息

    定义组件安装部署的手动批准、组件引用、版本设置、类helm的配置覆盖策略,从而实现组件的可追踪部署、升级和回滚。

    定义了用户订阅组件版本更新

    一个扩展CRD实现,集成Tekton Pipeline:

    2. 开放组件市场

    组件市场是内核能力的产品化,作为一个适配底座服务的组件发布到官方组件仓库中使用,扩展KubeBB生态。

    底座Kit

    底座Kit通过集成以下组件从而提供统一的认证中心和门户入口:

    portal

    低码Kit

    低码Kit提供三层组件开发模式前端模块研发、出码能力,并借助内核Kit完成标准化打包、测试、发布。

    lowcode_development

    技术架构

    平台开发采取前后端分离,以 K8S 为核心的开发框架,遵循 K8S 的扩展机制及 API 规范,整体开发架构的基本逻辑如下图所示: 图 2

    1. 所有组件的开发、扩展的认证都通过统一认证中心进行认证
    2. 认证由微前端的主框架 DockApp 统一进行,其他微前端的扩展不需要单独支持同认证中心的处理
    3. 开发架构上整体可以按照三层来看
    • 第一层,前端采用微前端架构,尽量采用低代码方式进行开发,提高代码自动化生成比例
    • 第二层,根据业务需求增加 OpenAPI,形成统一的 BFF 层,对 API 进行聚合,提供前端所需要的业务场景数据
    • 后端采用 CRD + controller 的 Operator 模式进行开发,形成数据驱动的流程开发模式
    1. 对外 API 主要包括两部分:
    • 从 BFF 层提供的 OpenAPI
    • 从 K8S 层提供的资源 API

    获取更多组件

    浏览 组件市场,安装更多需要的服务组件到门户中,比如:

    - + \ No newline at end of file diff --git a/en/docs/lowcode-development/development/bff-apis/index.html b/en/docs/lowcode-development/development/bff-apis/index.html index 15564056d..7a88a9f24 100644 --- a/en/docs/lowcode-development/development/bff-apis/index.html +++ b/en/docs/lowcode-development/development/bff-apis/index.html @@ -5,13 +5,13 @@ BFF层API开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/docs/lowcode-development/development/develop-hello-world/index.html b/en/docs/lowcode-development/development/develop-hello-world/index.html index 78de68a71..fb6db0646 100644 --- a/en/docs/lowcode-development/development/develop-hello-world/index.html +++ b/en/docs/lowcode-development/development/develop-hello-world/index.html @@ -5,13 +5,13 @@ 发布一个 Hello World 组件 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/docs/lowcode-development/development/frontend/index.html b/en/docs/lowcode-development/development/frontend/index.html index 09257dfdb..4d88c696f 100644 --- a/en/docs/lowcode-development/development/frontend/index.html +++ b/en/docs/lowcode-development/development/frontend/index.html @@ -5,13 +5,13 @@ 前端开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/docs/lowcode-development/development/low-code-engine/index.html b/en/docs/lowcode-development/development/low-code-engine/index.html index b18278fcc..d6528e15c 100644 --- a/en/docs/lowcode-development/development/low-code-engine/index.html +++ b/en/docs/lowcode-development/development/low-code-engine/index.html @@ -5,13 +5,13 @@ 前端基于低代码引擎的开发 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/docs/lowcode-development/development_pattern/index.html b/en/docs/lowcode-development/development_pattern/index.html index f66de2cb8..454fe2f1e 100644 --- a/en/docs/lowcode-development/development_pattern/index.html +++ b/en/docs/lowcode-development/development_pattern/index.html @@ -5,14 +5,14 @@ 开发模式 | Framework as a Building Block for Kubernetes - +

    开发模式

    KubeBB组件的开发采取前后端分离,以 K8S 为核心的开发框架,遵循 K8S 的扩展机制及 API 规范。整体开发架构的基本逻辑如下图所示: dev_arch

    1. 所有组件的开发、扩展的认证都通过统一认证中心进行认证
    2. 认证由微前端的主框架 DockApp 统一进行,其他微前端的扩展不需要单独支持同认证中心的处理

    三层模式

    基于低码开发的组件,将采用如下三层开发模式:

    • 第一层: 前端采用微前端架构,采用低代码方式进行开发,提高代码自动化生成比例
    • 第二层: 根据业务需求增加 OpenAPI,形成统一的 BFF 层,对 API 进行聚合,提供前端所需要的业务场景数据
    • 第三层: 后端采用CRD + controller 的 Operator 模式进行开发,形成数据驱动的流程开发模式
    - + \ No newline at end of file diff --git a/en/docs/lowcode-development/intro/index.html b/en/docs/lowcode-development/intro/index.html index bf52327ca..f24cbe459 100644 --- a/en/docs/lowcode-development/intro/index.html +++ b/en/docs/lowcode-development/intro/index.html @@ -5,13 +5,13 @@ 介绍 | Framework as a Building Block for Kubernetes - + - + \ No newline at end of file diff --git a/en/docs/quick-start/buildingbase_quickstart/index.html b/en/docs/quick-start/buildingbase_quickstart/index.html index 4664996a8..7df9d9879 100644 --- a/en/docs/quick-start/buildingbase_quickstart/index.html +++ b/en/docs/quick-start/buildingbase_quickstart/index.html @@ -5,13 +5,13 @@ 安装底座 | Framework as a Building Block for Kubernetes - +

    安装底座

    本章节主要介绍底座组件的部署步骤,包括相关的开源技术组件、前提条件以及快速部署,并将部署的集群添加到服务门户上。

    tip

    安装前需完成安装内核

    部署

    1. 创建官方组件仓库

    可参考使用官方组件仓库

    2. 创建底座组件空间

    tip

    目前仅支持使用命名空间u4a-system

        kubectl create namespace u4a-system

    3. 部署Cluster Component

    组件部署信息cluster_componentplan.yaml如下:

    详细可参考

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: cluster-component
    namespace: u4a-system
    spec:
    approved: true
    name: cluster-component
    version: 0.1.3
    override:
    set:
    - ingress-nginx.controller.nodeSelector.kubernetes\.io/hostname=kubebb-core-control-plane
    component:
    name: kubebb.cluster-component
    namespace: kubebb-system

    需调整参数:

    • override.set.ingress-nginx.controller.nodeSelector.kubernetes\.io/hostname 将作为ingress-nginx服务节点

    此处基于kind开发集群kubebb-core-control-plane节点。

    通过一下命令部署:

        kubectl apply -nu4a-system -f cluster_componentplan.yaml

    Cluster Component部署完成后,可通过以下命令查看组件部署状态:

        kubectl get componentplan -nu4a-system cluster-component -oyaml

    当组件部署状态如下时,表示组件部署成功:

    status:
    conditions:
    - lastTransitionTime: "2023-07-25T08:15:41Z"
    reason: ""
    status: "True"
    type: Approved
    - lastTransitionTime: "2023-07-25T08:15:44Z"
    reason: InstallSuccess
    status: "True"
    type: Actioned
    - lastTransitionTime: "2023-07-25T08:15:44Z"
    reason: ""
    status: "True"
    type: Succeeded

    4. 部署U4A Component

    组件部署信息u4a_componentplan.yaml如下:

    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: ComponentPlan
    metadata:
    name: u4a-component
    namespace: u4a-system
    spec:
    approved: true
    name: u4a-component
    version: 0.1.5
    wait: true
    override:
    valuesFrom:
    - kind: ConfigMap
    name: u4acm
    valuesKey: "values.yaml"
    component:
    name: kubebb.u4a-component
    namespace: kubebb-system

    其中,组件U4A-Component的参数通过ConfigMap方式注入,ConfigMap的创建流程可参考U4A组件部署流程

    通过一下命令部署:

        kubectl apply -nu4a-system -f u4a_componentplan.yaml

    U4A Component部署完成后,可通过以下命令查看组件部署状态:

        kubectl get componentplan -nu4a-system u4a-component -oyaml

    5. 访问底座服务门户

    通过以下命令获取门户服务的访问地址:

    (base) ➜  ~ kubectl get ingress -nu4a-system
    NAME CLASS HOSTS ADDRESS PORTS AGE
    bff-server-ingress <none> portal.172.18.0.2.nip.io 80, 443 4h55m
    bff-server-ingress-socket <none> portal.172.18.0.2.nip.io 80, 443 4h55m
    kube-oidc-proxy-server-ingress <none> k8s.172.18.0.2.nip.io 80, 443 4h55m

    通过浏览器访问https://portal.172.18.0.2.nip.io即可进入服务门户。默认的用户名密码为

    • 用户名: admin
    • 密码: kubebb-admin

    注意: 由于使用了nip.io作为域名解析服务,因此需要将HOSTS中的域名解析到ADDRESS对应的IP地址上。

    卸载

    1. 卸载U4A Component

        kubectl delete componentplan -nu4a-system u4a-component

    2. 卸载Cluster Component

        kubectl delete componentplan -nu4a-system cluster-component
    - + \ No newline at end of file diff --git a/en/docs/quick-start/core_quickstart/index.html b/en/docs/quick-start/core_quickstart/index.html index 1360583c9..9858cf5da 100644 --- a/en/docs/quick-start/core_quickstart/index.html +++ b/en/docs/quick-start/core_quickstart/index.html @@ -5,13 +5,13 @@ 安装内核 | Framework as a Building Block for Kubernetes - +

    安装内核

    tip

    安装前需完成预先准备

    安装

    tip

    Kubebb官方提供了helm仓库,方便用户安装: https://kubebb.github.io/components/

    1. 添加helm仓库
    helm repo add kubebb https://kubebb.github.io/components/
    helm repo update
    1. 创建命名空间

    请根据实际情况修改命名空间名称

    kubectl create namespace kubebb-system
    1. 安装
    helm install -nkubebb-system kubebb-core kubebb/kubebb-core
    1. 查看安装状态
    kubectl get pods -nkubebb-system 

    如果一切正常,输入如下:

    NAME                           READY   STATUS    RESTARTS   AGE
    kubebb-core-6bd7c5f679-742mq 1/1 Running 0 21h

    快速体验

    内核安装完成后可通过官方组件仓库快速体验组件化的部署:

    tip

    kubebb官方组件仓库,内核安装过程中默认添加,提供多个认证仓库、组件和组件应用.

    1. 通过以下命令查看仓库列表:
    kubectl get repository -nkubebb-system

    默认情况下,至少包含仓库kubebb

    (base) ➜  charts git:(dev) kubectl get repository -nkubebb-system
    NAME AGE
    kubebb 14m

    如果没有看到kubebb,可手动添加:

    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/repos/repository_kubebb.yaml
    1. 获取官方仓库中的组件
    kubectl get components -nkubebb-system  -l kubebb.component.repository=kubebb

    如果一切正常,输出如下:

    NAME                       AGE
    kubebb.bc-apis 135m
    kubebb.bc-depository 135m
    kubebb.bc-explorer 135m
    kubebb.cluster-component 135m
    kubebb.fabric-operator 135m
    kubebb.ingress-nginx 135m
    kubebb.kubebb 135m
    kubebb.kubebb-core 135m
    kubebb.minio 135m
    kubebb.tekton-operator 135m
    kubebb.u4a-component 135m
    kubebb.weaviate 135m
    1. 部署一个组件

    以部署kubebb.minio为例

    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/minio/componentplan.yaml

    查看组件部署状态:

    kubectl get componentplan my-minio -oyaml

    查看组件Pod状态

    kubectl get pods -l core.kubebb.k8s.com.cn/componentplan=my-minio

    如果一切正常,输出如下:

    NAME             READY   STATUS    RESTARTS   AGE
    my-minio-0 1/1 Running 0 42h
    my-minio-1 1/1 Running 0 42h
    my-minio-2 1/1 Running 0 42h

    部署一个私有仓库

    1. 在官方仓库中部署chartmuseum
    kubectl apply -f https://raw.githubusercontent.com/kubebb/components/main/examples/chartmuseum/componentplan.yaml
    1. 添加仓库
    # repository_chartmuseum.yaml
    apiVersion: core.kubebb.k8s.com.cn/v1alpha1
    kind: Repository
    metadata:
    name: chartmuseum
    namespace: kubebb-system
    spec:
    url: http://chartmuseum.kubebb-system.svc.cluster.local:8080
    pullStategy:
    intervalSeconds: 120
    retry: 5

    创建仓库

    kubectl apply -f repository_chartmuseum.yaml 

    执行结果

    kubectl get repository -nkubebb-system
    NAME AGE
    chartmuseum 4m41s
    kubebb 15h

    端口暴露

    kubectl port-forward service/chartmuseum 8080:8080 -nkubebb-system

    上传自定义chart

    helm create mychart
    cd mychart
    helm package .
    curl --data-binary "@mychart-0.1.0.tgz" http://localhost:8080/api/charts

    从私有仓库中查看

    kubectl get component -l kubebb.component.repository=chartmuseum -nkubebb-system
    NAME AGE
    chartmuseum.mychart 4m27s
    - + \ No newline at end of file diff --git a/en/docs/quick-start/prerequisite/index.html b/en/docs/quick-start/prerequisite/index.html index ec7c3896d..25b2a67df 100644 --- a/en/docs/quick-start/prerequisite/index.html +++ b/en/docs/quick-start/prerequisite/index.html @@ -5,13 +5,13 @@ 预先准备 | Framework as a Building Block for Kubernetes - +

    预先准备

    基础环境

    Kubernetes集群

    tip

    如果没有kubernets集群,可按照下述教程通过kind部署一个开发集群。默认情况下,为适配building base,集群至少有一个节点需要为Ingress Controller服务节点,并暴露80443端口。

    Kind开发集群

    1. 安装kind

    参考: https://kind.sigs.k8s.io/docs/user/quick-start/#installation

    Linux环境为例:

    # For AMD64 / x86_64
    [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64
    # For ARM64
    [ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-arm64
    chmod +x ./kind
    sudo mv ./kind /usr/local/bin/kind
    1. 准备单节点集群配置文件kind-config.yaml
    tip
    kind: Cluster
    apiVersion: kind.x-k8s.io/v1alpha4
    name: kubebb-core
    nodes:
    - role: control-plane
    image: kindest/node:v1.24.13
    kubeadmConfigPatches:
    - |
    kind: InitConfiguration
    nodeRegistration:
    kubeletExtraArgs:
    node-labels: "ingress-ready=true"
    extraPortMappings:
    - containerPort: 80
    hostPort: 80
    protocol: TCP
    - containerPort: 443
    hostPort: 443
    protocol: TCP
    1. 创建集群
    kind create cluster --config=kind-config.yaml
    1. 查看集群状态
    kubectl cluster-info --context kind-kubebb-core

    如果一切正常,输出如下:

    Kubernetes control plane is running at https://127.0.0.1:42607
    CoreDNS is running at https://127.0.0.1:42607/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

    To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
    1. 查看集群节点
    kubectl get nodes

    如果一切正常,输出如下:

    NAME                              STATUS   ROLES           AGE   VERSION
    kubebb-core-control-plane Ready control-plane 21m v1.24.13

    通过docker ps可发现该节点已经暴露了80443端口:

    (base) ➜  building-base git:(azure) docker ps
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    e4e3820cdb5a kindest/node:v1.24.13 "/usr/local/bin/entr…" 22 minutes ago Up 22 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp, 127.0.0.1:33611->6443/tcp kubebb-core-control-plane
    - + \ No newline at end of file diff --git a/en/docs/quick-start/try_customization/index.html b/en/docs/quick-start/try_customization/index.html index 9c816fe52..d5bb0e0e0 100644 --- a/en/docs/quick-start/try_customization/index.html +++ b/en/docs/quick-start/try_customization/index.html @@ -5,14 +5,14 @@ 体验自定义配置 | Framework as a Building Block for Kubernetes - +

    体验自定义配置

    1. 自定义门户的主色调
    kubectl edit cm portal-global-configs -n u4a-system

    修改 primaryColor 即可自定义门户主色调

    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: portal-global-configs
    data:
    global-configs: |
    {"theme": {"primaryColor": "#FE8F35"}}
    1. 自定义菜单

    kubebb 的所有菜单均基于 menu 的 CRD 进行定义,如果需要添加自己的菜单,可以参考以下 memnu 示例:

    # 主菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu
    spec:
    column: 1
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: ""
    kind: ""
    name: ""
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 测试菜单
    textEn: "Test Menu"
    ---
    # 测试菜单索引菜单
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-index
    spec:
    getTitleForReplaceSider: {}
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu
    uid: ""
    rankingInColumn: 100
    tenant: true
    text: 菜单索引项
    textEn: “Menu Index Item"
    ---
    # 子菜单,具备实际链接功能
    apiVersion: component.t7d.io/v1beta1
    kind: Menu
    metadata:
    name: demo-menu-submenu1
    spec:
    getTitleForReplaceSider: {}
    isRenderSelectCurrent: false
    parentOwnerReferences:
    apiVersion: component.t7d.io/v1beta1
    blockOwnerDeletion: false
    controller: false
    kind: Menu
    name: demo-menu-index
    uid: ""
    pathname: /demo-feature1
    rankingInColumn: 200
    text: 测试子菜单
    textEn: "Test Submenu"

    使用 kubectl apply -f 即可将菜单项部署到环境中,如下图所示: 图 1

    1. 多语言 & 白天/黑夜模式

    1)通过右上角的语言切换按钮进行多语言切换,目前支持中文、英文两种语言

    2)通过右上角的按钮切换白天/黑夜模式

    - + \ No newline at end of file diff --git a/en/index.html b/en/index.html index 26887beeb..88e1e2274 100644 --- a/en/index.html +++ b/en/index.html @@ -5,13 +5,13 @@ Homepage of Framework as a Building Block for Kubernetes | Framework as a Building Block for Kubernetes - +

    Framework as a Building Block for Kubernetes

    在 Kubernetes 上以搭积木的方式构建自己的服务门户

    提供底座服务,快速开启搭建

    提供账号、认证(OIDC)、权限(RBAC)、审计(Audit)的基础功能,采用更加标准的技术方式提供构建的底座服务

    内核驱动的声明式组件管理

    提供声明式的组件仓库管理、组件同步、组件订阅部署以及多维度的组件评级,完整适配Helm生态,集成微前端框架

    灵活的组件开发模式

    基于微前端框架和低代码开发,定义了标准的组件封装及发布模式,让开发者可以在底座之上按照开发规范进行组件的快速开发和发布,并在统一的服务门户上对外提供服务

    - + \ No newline at end of file diff --git a/en/markdown-page/index.html b/en/markdown-page/index.html index d76c25372..852d26fe9 100644 --- a/en/markdown-page/index.html +++ b/en/markdown-page/index.html @@ -5,13 +5,13 @@ Markdown page example | Framework as a Building Block for Kubernetes - +

    Markdown page example

    You don't need React to write simple standalone pages.

    - + \ No newline at end of file diff --git a/index.html b/index.html index 6d0f38347..8fd00629b 100644 --- a/index.html +++ b/index.html @@ -5,13 +5,13 @@ Homepage of Framework as a Building Block for Kubernetes | Framework as a Building Block for Kubernetes - +

    Framework as a Building Block for Kubernetes

    在 Kubernetes 上以搭积木的方式构建自己的服务门户

    提供底座服务,快速开启搭建

    提供账号、认证(OIDC)、权限(RBAC)、审计(Audit)的基础功能,采用更加标准的技术方式提供构建的底座服务

    内核驱动的声明式组件管理

    提供声明式的组件仓库管理、组件同步、组件订阅部署以及多维度的组件评级,完整适配Helm生态,集成微前端框架

    灵活的组件开发模式

    基于微前端框架和低代码开发,定义了标准的组件封装及发布模式,让开发者可以在底座之上按照开发规范进行组件的快速开发和发布,并在统一的服务门户上对外提供服务

    - + \ No newline at end of file diff --git a/markdown-page/index.html b/markdown-page/index.html index 75c84fb5a..de2a984bd 100644 --- a/markdown-page/index.html +++ b/markdown-page/index.html @@ -5,13 +5,13 @@ Markdown page example | Framework as a Building Block for Kubernetes - +

    Markdown page example

    You don't need React to write simple standalone pages.

    - + \ No newline at end of file