diff --git a/go.mod b/go.mod index cd990e107..946690abc 100644 --- a/go.mod +++ b/go.mod @@ -2,16 +2,38 @@ module github.com/uyuni-project/uyuni-tools go 1.21 +toolchain go1.21.9 + require ( github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 github.com/briandowns/spinner v1.23.0 github.com/chai2010/gettext-go v1.0.2 github.com/spf13/cobra v1.8.0 + k8s.io/api v0.29.7 + k8s.io/apimachinery v0.29.7 + k8s.io/cli-runtime v0.29.7 ) require ( - github.com/creack/pty v1.1.17 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/creack/pty v1.1.18 // indirect github.com/fatih/color v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + golang.org/x/net v0.23.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/client-go v0.29.7 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) require ( @@ -30,9 +52,9 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.0 github.com/subosito/gotenv v1.2.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/term v0.10.0 - golang.org/x/text v0.3.2 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 + golang.org/x/text v0.14.0 // indirect gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 652454822..1887571d4 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -39,8 +41,9 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -55,10 +58,14 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -71,6 +78,12 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -108,19 +121,26 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -142,8 +162,14 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -166,6 +192,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= @@ -196,12 +224,15 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -213,6 +244,8 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -231,6 +264,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -245,6 +280,10 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -253,6 +292,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -266,18 +307,22 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -297,7 +342,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -321,9 +372,12 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -332,6 +386,7 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -341,4 +396,22 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.29.7 h1:Q2/thp7YYESgy0MGzxT9RvA/6doLJHBXSFH8GGLxSbc= +k8s.io/api v0.29.7/go.mod h1:mPimdbyuIjwoLtBEVIGVUYb4BKOE+44XHt/n4IqKsLA= +k8s.io/apimachinery v0.29.7 h1:ICXzya58Q7hyEEfnTrbmdfX1n1schSepX2KUfC2/ykc= +k8s.io/apimachinery v0.29.7/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/cli-runtime v0.29.7 h1:6IxyxaIm3N31+PKXb1K7Tpf+100mm9hd9HMMYWMH2QE= +k8s.io/cli-runtime v0.29.7/go.mod h1:0pcclC4k3rkzYNAvw3zeiPNtg8Buv0orK+5MuhEKFBU= +k8s.io/client-go v0.29.7 h1:vTtiFrGBKlcBhxaeZC4eDrqui1e108nsTyue/KU63IY= +k8s.io/client-go v0.29.7/go.mod h1:69BvVqdRozgR/9TP45u/oO0tfrdbP+I8RqrcCJQshzg= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go index 0f1768615..013012c19 100644 --- a/mgradm/cmd/install/kubernetes/utils.go +++ b/mgradm/cmd/install/kubernetes/utils.go @@ -61,10 +61,12 @@ func installForKubernetes(globalFlags *types.GlobalFlags, // Deploy the SSL CA or server certificate if flags.Ssl.UseExisting() { - kubernetes.DeployExistingCertificate(&flags.Helm, &flags.Ssl, clusterInfos.GetKubeconfig()) + if err := kubernetes.DeployExistingCertificate(flags.Helm.Uyuni.Namespace, &flags.Ssl); err != nil { + return err + } } else { ca := ssl.SslPair{} - sslArgs, err := kubernetes.DeployCertificate( + sslArgs, err := kubernetes.DeployGeneratedCa( &flags.Helm, &flags.Ssl, "", &ca, clusterInfos.GetKubeconfig(), fqdn, flags.Image.PullPolicy, ) diff --git a/mgradm/cmd/migrate/kubernetes/dataExtractor.go b/mgradm/cmd/migrate/kubernetes/dataExtractor.go new file mode 100644 index 000000000..89a2dc9c9 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/dataExtractor.go @@ -0,0 +1,157 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "errors" + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + "gopkg.in/yaml.v2" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// MigrationData represents the files and data extracted from the migration sync phase. +type MigrationData struct { + CaKey string + CaCert string + Data *utils.InspectResult + ServerCert string + ServerKey string +} + +func extractMigrationData(namespace string, image string, volume types.VolumeMount) (*MigrationData, error) { + // Read the file from the volume from a container into stdout + mounts := kubernetes.ConvertVolumeMounts([]types.VolumeMount{volume}) + volumes := kubernetes.CreateVolumes([]types.VolumeMount{volume}) + + podName := "uyuni-data-extractor" + + // Use a pod here since this is a very simple task reading out a file from a volume + pod := core.Pod{ + TypeMeta: meta.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: meta.ObjectMeta{Name: podName, Namespace: namespace}, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "extractor", + Image: image, + ImagePullPolicy: core.PullIfNotPresent, + Command: []string{ + "sh", "-c", + "for f in /var/lib/uyuni-tools/*; do echo \"`basename $f`: |2\"; cat $f | sed 's/^/ /'; done", + }, + VolumeMounts: mounts, + }, + }, + Volumes: volumes, + RestartPolicy: core.RestartPolicyNever, + }, + } + + tempDir, err := utils.TempDir() + if err != nil { + return nil, err + } + defer os.RemoveAll(tempDir) + + // Run the pod + extractorPodPath := path.Join(tempDir, "extractor-pod.yaml") + if err := kubernetes.YamlFile([]runtime.Object{&pod}, extractorPodPath); err != nil { + return nil, err + } + + if err := utils.RunCmd("kubectl", "apply", "-f", extractorPodPath); err != nil { + return nil, utils.Errorf(err, L("failed to run the migration data extractor pod")) + } + if err := kubernetes.Apply( + []runtime.Object{&pod}, L("failed to run the migration data extractor pod"), + ); err != nil { + return nil, err + } + + if err := waitForPod(namespace, podName, 60); err != nil { + return nil, err + } + + data, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "logs", "-n", namespace, podName) + if err != nil { + return nil, utils.Errorf(err, L("failed to get the migration data extractor pod logs")) + } + + defer func() { + if err := utils.RunCmd("kubectl", "delete", "pod", "-n", namespace, podName); err != nil { + log.Err(err).Msgf(L("failed to delete the uyuni-data-extractor pod")) + } + }() + + // Parse the content + files := make(map[string]string) + if err := yaml.Unmarshal(data, &files); err != nil { + return nil, utils.Errorf(err, L("failed to parse data extractor pod output")) + } + + var result MigrationData + for file, content := range files { + if file == "RHN-ORG-PRIVATE-SSL-KEY" { + result.CaKey = content + } else if file == "RHN-ORG-TRUSTED-SSL-CERT" { + result.CaCert = content + } else if file == "spacewalk.crt" { + result.ServerCert = content + } else if file == "spacewalk.key" { + result.ServerKey = content + } else if file == "data" { + parsedData, err := utils.ReadInspectDataString[utils.InspectResult]([]byte(content)) + if err != nil { + return nil, utils.Errorf(err, L("failed to parse migration data file")) + } + result.Data = parsedData + } + } + + if result.Data == nil { + return nil, errors.New(L("found no data file after migration")) + } + + return &result, nil +} + +func waitForPod(namespace string, pod string, timeout int) error { + for i := 0; ; i++ { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "pod", "-n", namespace, pod, + "-o", "jsonpath={.status.containerStatuses[0].state.terminated.reason}", + ) + if err != nil { + return utils.Errorf(err, L("failed to get %s pod status"), pod) + } + status := strings.TrimSpace(string(out)) + if status != "" { + if status == "Completed" { + return nil + } + return fmt.Errorf(L("%[1]s pod failed with status %[2]s"), pod, status) + } + + if timeout > 0 && i == timeout { + return fmt.Errorf(L("%[1]s pod failed to complete within %[2]d seconds"), pod, timeout) + } + time.Sleep(1 * time.Second) + } +} diff --git a/mgradm/cmd/migrate/kubernetes/dbFinalize.go b/mgradm/cmd/migrate/kubernetes/dbFinalize.go new file mode 100644 index 000000000..7118c4c8d --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/dbFinalize.go @@ -0,0 +1,60 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const dbFinalizeJobName = "uyuni-db-finalize" + +func startDbFinalizeJob( + namespace string, + serverImage string, + pullPolicy string, + schemaUpdateRequired bool, + migration bool, +) error { + log.Info().Msg(L("Running database finalization, this could be long depending on the size of the database…")) + job, err := getDbFinalizeJob(namespace, serverImage, pullPolicy, schemaUpdateRequired, migration) + if err != nil { + return err + } + + return kubernetes.Apply([]runtime.Object{job}, L("failed to run the database finalization job")) +} + +func getDbFinalizeJob( + namespace string, + image string, + pullPolicy string, + schemaUpdateRequired bool, + migration bool, +) (*batch.Job, error) { + mounts := []types.VolumeMount{ + {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, + {MountPath: "/etc/rhn", Name: "etc-rhn"}, + } + + // Prepare the script + scriptData := templates.FinalizePostgresTemplateData{ + RunAutotune: true, + RunReindex: true, + RunSchemaUpdate: schemaUpdateRequired, + Migration: migration, + Kubernetes: true, + } + + return kubernetes.GetScriptJob(namespace, dbFinalizeJobName, image, pullPolicy, mounts, scriptData) +} diff --git a/mgradm/cmd/migrate/kubernetes/dbUpgradeJob.go b/mgradm/cmd/migrate/kubernetes/dbUpgradeJob.go new file mode 100644 index 000000000..eac6bdcee --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/dbUpgradeJob.go @@ -0,0 +1,75 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const dbUpgradeJobName = "uyuni-db-upgrade" + +func startDbUpgradeJob( + namespace string, + registry string, + image types.ImageFlags, + migrationImage types.ImageFlags, + oldPgsql string, + newPgsql string, +) error { + log.Info().Msgf(L("Upgrading PostgreSQL database from %[1]s to %[2]s…"), oldPgsql, newPgsql) + + var migrationImageUrl string + var err error + if migrationImage.Name == "" { + imageName := fmt.Sprintf("-migration-%s-%s", oldPgsql, newPgsql) + migrationImageUrl, err = utils.ComputeImage(registry, image.Tag, image, imageName) + } else { + migrationImageUrl, err = utils.ComputeImage(registry, image.Tag, migrationImage) + } + if err != nil { + return utils.Errorf(err, L("failed to compute image URL")) + } + + log.Info().Msgf(L("Using database upgrade image %s"), migrationImageUrl) + + job, err := getDbUpgradeJob(namespace, migrationImageUrl, image.PullPolicy, oldPgsql, newPgsql) + if err != nil { + return err + } + + return kubernetes.Apply([]runtime.Object{job}, L("failed to run the database upgrade job")) +} + +func getDbUpgradeJob( + namespace string, + image string, + pullPolicy string, + oldPgsql string, + newPgsql string, +) (*batch.Job, error) { + mounts := []types.VolumeMount{ + {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, + } + + // Prepare the script + scriptData := templates.PostgreSQLVersionUpgradeTemplateData{ + OldVersion: oldPgsql, + NewVersion: newPgsql, + } + + return kubernetes.GetScriptJob(namespace, dbUpgradeJobName, image, pullPolicy, mounts, scriptData) +} diff --git a/mgradm/cmd/migrate/kubernetes/deployment.go b/mgradm/cmd/migrate/kubernetes/deployment.go new file mode 100644 index 000000000..81afe45b5 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/deployment.go @@ -0,0 +1,245 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "os" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const serverDeployName = "uyuni" + +func createServerDeployment( + namespace string, + serverImage string, + pullPolicy string, + timezone string, + debug bool, + mirrorPvName string, +) error { + if mirrorPvName != "" { + // Create a PVC using the required mirror PV + if err := kubernetes.CreatePersistentVolumeClaimForVolume(namespace, mirrorPvName); err != nil { + return err + } + } + + serverDeploy := getServerDeployment( + namespace, serverImage, kubernetes.GetPullPolicy(pullPolicy), timezone, debug, mirrorPvName, + ) + + tempDir, err := utils.TempDir() + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + return kubernetes.Apply([]runtime.Object{serverDeploy}, L("failed to create the server deployment")) +} + +func getServerDeployment( + namespace string, + image string, + pullPolicy core.PullPolicy, + timezone string, + debug bool, + mirrorPvName string, +) *apps.Deployment { + var replicas int32 = 1 + + envs := []core.EnvVar{ + {Name: "TZ", Value: timezone}, + } + + mounts := getServerMounts() + + if mirrorPvName != "" { + // Add a mount for the mirror + mounts = append(mounts, types.VolumeMount{MountPath: "/mirror", Name: mirrorPvName}) + + // Add the environment variable for the deployment to use the mirror + // This doesn't makes sense for migration as the setup script is not executed + envs = append(envs, core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"}) + } + + // Convert our mounts to Kubernetes objects + volumeMounts := kubernetes.ConvertVolumeMounts(mounts) + + // The init mounts are the same mounts but in /mnt just for the init container populating the volumes + var initMounts []core.VolumeMount + for _, mount := range volumeMounts { + initMount := mount.DeepCopy() + initMount.MountPath = "/mnt" + initMount.MountPath + initMounts = append(initMounts, *initMount) + } + + volumes := kubernetes.CreateVolumes(mounts) + + runMount, runVolume := kubernetes.CreateTmpfsMount("/run", "256Mi") + cgroupMount, cgroupVolume := kubernetes.CreateHostPathMount( + "/sys/fs/cgroup", "/sys/fs/cgroup", core.HostPathDirectory, + ) + + caMount := core.VolumeMount{ + Name: "ca-cert", + MountPath: "/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT", + ReadOnly: true, + SubPath: "ca.crt", + } + tlsKeyMount := core.VolumeMount{Name: "tls-key", MountPath: "/etc/pki/spacewalk-tls"} + + caVolume := kubernetes.CreateConfigVolume("ca-cert", "uyuni-ca") + tlsKeyVolume := kubernetes.CreateSecretVolume("tls-key", "uyuni-cert") + var keyMode int32 = 0600 + tlsKeyVolume.VolumeSource.Secret.Items = []core.KeyToPath{ + {Key: "tls.crt", Path: "spacewalk.crt"}, + {Key: "tls.key", Path: "spacewalk.key", Mode: &keyMode}, + } + + initMounts = append(initMounts, tlsKeyMount) + volumeMounts = append(volumeMounts, runMount, cgroupMount, caMount, tlsKeyMount) + volumes = append(volumes, runVolume, cgroupVolume, caVolume, tlsKeyVolume) + + // Compute the needed ports + ports := []types.PortMap{ + utils.NewPortMap("http", 80, 80), + utils.NewPortMap("https", 443, 443), + } + ports = append(ports, utils.TCP_PORTS...) + ports = append(ports, utils.UDP_PORTS...) + if debug { + ports = append(ports, utils.DEBUG_PORTS...) + } + + deployment := apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: serverDeployName, + Namespace: namespace, + Labels: map[string]string{"app": kubernetes.ServerApp}, + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &meta.LabelSelector{ + MatchLabels: map[string]string{"app": kubernetes.ServerApp}, + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{"app": kubernetes.ServerApp}, + }, + Spec: core.PodSpec{ + InitContainers: []core.Container{ + { + Name: "init-volumes", + Image: image, + ImagePullPolicy: pullPolicy, + Command: []string{"sh", "-x", "-c", initScript}, + VolumeMounts: initMounts, + }, + }, + Containers: []core.Container{ + { + Name: "uyuni", + Image: image, + ImagePullPolicy: pullPolicy, + Lifecycle: &core.Lifecycle{ + PreStop: &core.LifecycleHandler{ + Exec: &core.ExecAction{ + Command: []string{"/bin/sh", "-c", "spacewalk-service stop && systemctl stop postgresql"}, + }, + }, + }, + Ports: kubernetes.ConvertPortMaps(ports), + Env: envs, + ReadinessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/manager/login", + }, + }, + PeriodSeconds: 30, + TimeoutSeconds: 20, + FailureThreshold: 5, + }, + LivenessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/manager/login", + }, + }, + InitialDelaySeconds: 60, + PeriodSeconds: 60, + TimeoutSeconds: 20, + FailureThreshold: 5, + }, + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + return &deployment +} + +const initScript = ` +# Fill he empty volumes +for vol in /var/lib/cobbler \ + /var/lib/salt \ + /var/lib/pgsql \ + /var/cache \ + /var/log \ + /srv/salt \ + /srv/www \ + /srv/tftpboot \ + /srv/formula_metadata \ + /srv/pillar \ + /srv/susemanager \ + /srv/spacewalk \ + /root \ + /etc/apache2 \ + /etc/rhn \ + /etc/systemd/system/multi-user.target.wants \ + /etc/systemd/system/sockets.target.wants \ + /etc/salt \ + /etc/tomcat \ + /etc/cobbler \ + /etc/sysconfig \ + /etc/postfix \ + /etc/sssd \ + /etc/pki/tls +do + chown --reference=$vol /mnt$vol; + chmod --reference=$vol /mnt$vol; + if [ -z "$(ls -A /mnt$vol)" ]; then + cp -a $vol/. /mnt$vol; + if [ "$vol" = "/srv/www" ]; then + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt$vol/RHN-ORG-TRUSTED-SSL-CERT; + fi + + if [ "$vol" = "/etc/pki/tls" ]; then + ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/etc/pki/tls/certs/spacewalk.crt; + ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/spacewalk.key; + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/pg-spacewalk.key; + chown postgres:postgres /mnt/etc/pki/tls/private/pg-spacewalk.key; + fi + fi +done +` diff --git a/mgradm/cmd/migrate/kubernetes/hubApi.go b/mgradm/cmd/migrate/kubernetes/hubApi.go new file mode 100644 index 000000000..969e00b14 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/hubApi.go @@ -0,0 +1,93 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + hubApiDeployName = "uyuni-hub-api" + hubApiAppName = "uyuni-hub-api" + hubApiServiceName = "hub-api" +) + +func installHubApi(namespace string, image string, pullPolicy string) error { + if err := startHubApiDeployment(namespace, image, pullPolicy); err != nil { + return err + } + + if err := createHubApiService(namespace); err != nil { + return err + } + + // TODO Do we want an ingress to use port 80 / 443 from the outside too? + // This would have an impact on the user's scripts. + return nil +} + +func startHubApiDeployment(namespace string, image string, pullPolicy string) error { + deploy := getHubApiDeployment(namespace, image, pullPolicy) + return kubernetes.Apply([]runtime.Object{deploy}, L("failed to create the hub API deployment")) +} + +func getHubApiDeployment(namespace string, image string, pullPolicy string) *apps.Deployment { + var replicas int32 = 1 + + return &apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: hubApiDeployName, + Namespace: namespace, + Labels: map[string]string{"app": hubApiAppName}, + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &meta.LabelSelector{ + MatchLabels: map[string]string{"app": hubApiAppName}, + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{"app": hubApiAppName}, + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "uyuni-hub-api", + Image: image, + ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy), + Ports: []core.ContainerPort{ + { + ContainerPort: int32(2830), + }, + }, + Env: []core.EnvVar{ + {Name: "HUB_API_URL", Value: fmt.Sprintf("http://%s/rpc/api", webServiceName)}, + {Name: "HUB_CONNECT_TIMEOUT", Value: "10"}, + {Name: "HUB_REQUEST_TIMEOUT", Value: "10"}, + {Name: "HUB_CONNECT_USING_SSL", Value: "false"}, + }, + }, + }, + }, + }, + }, + } +} + +func createHubApiService(namespace string) error { + svc := getService(namespace, hubApiServiceName, core.ProtocolTCP, utils.NewPortMap("api", 2830, 2830)) + return kubernetes.Apply([]runtime.Object{svc}, L("failed to create the hub API service")) +} diff --git a/mgradm/cmd/migrate/kubernetes/ingress.go b/mgradm/cmd/migrate/kubernetes/ingress.go new file mode 100644 index 000000000..60f8f4537 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/ingress.go @@ -0,0 +1,180 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + net "k8s.io/api/networking/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const certSecretName = "uyuni-cert" + +func createIngress(namespace string, fqdn string, caIssuer string, ingressName string) error { + ingresses := []runtime.Object{ + getSslIngress(namespace, fqdn, caIssuer, ingressName), + getNoSslIngress(namespace, fqdn, ingressName), + } + sslRedirectIngress := getSslRedirectIngress(namespace, fqdn, ingressName) + if sslRedirectIngress != nil { + ingresses = append(ingresses, sslRedirectIngress) + } + + return kubernetes.Apply(ingresses, L("failed to create the hub API service")) +} + +func getSslIngress(namespace string, fqdn string, caIssuer string, ingressName string) *net.Ingress { + annotations := map[string]string{} + if caIssuer != "" { + annotations["cert-manager.io/issuer"] = caIssuer + } + if ingressName == "traefik" { + annotations["traefik.ingress.kubernetes.io/router.tls"] = "true" + annotations["traefik.ingress.kubernetes.io/router.tls.domains.n.main"] = fqdn + annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "websecure,web" + } + + ingress := net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: "uyuni-ingress-ssl", + Annotations: annotations, + Labels: map[string]string{"app": kubernetes.ServerApp}, + }, + Spec: net.IngressSpec{ + TLS: []net.IngressTLS{ + {Hosts: []string{fqdn}, SecretName: certSecretName}, + }, + Rules: []net.IngressRule{ + getIngressWebRule(fqdn), + }, + }, + } + + return &ingress +} + +func getSslRedirectIngress(namespace string, fqdn string, ingressName string) *net.Ingress { + var ingress *net.Ingress + + // Nginx doesn't require a special ingress for the SSL redirection. + if ingressName == "traefik" { + ingress = &net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: "uyuni-ingress-ssl-redirect", + Annotations: map[string]string{ + "traefik.ingress.kubernetes.io/router.middlewares": "default-uyuni-https-redirect@kubernetescrd", + "traefik.ingress.kubernetes.io/router.entrypoints": "web", + }, + Labels: map[string]string{"app": kubernetes.ServerApp}, + }, + Spec: net.IngressSpec{ + Rules: []net.IngressRule{ + getIngressWebRule(fqdn), + }, + }, + } + } + + return ingress +} + +var noSslPaths = []string{ + "/pub", + "/rhn/([^/])+/DownloadFile", + "/(rhn/)?rpc/api", + "/rhn/errors", + "/rhn/ty/TinyUrl", + "/rhn/websocket", + "/rhn/metrics", + "/cobbler_api", + "/cblr", + "/httpboot", + "/images", + "/cobbler", + "/os-images", + "/tftp", + "/docs", +} + +func getNoSslIngress(namespace string, fqdn string, ingressName string) *net.Ingress { + annotations := map[string]string{} + if ingressName == "nginx" { + annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" + } + if ingressName == "traefik" { + annotations["traefik.ingress.kubernetes.io/router.tls"] = "false" + annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "web" + } + + pathType := net.PathTypePrefix + paths := []net.HTTPIngressPath{} + for _, noSslPath := range noSslPaths { + paths = append(paths, net.HTTPIngressPath{ + Backend: webServiceBackend, + Path: noSslPath, + PathType: &pathType, + }) + } + + ingress := net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: "uyuni-ingress-nossl", + Annotations: annotations, + Labels: map[string]string{"app": kubernetes.ServerApp}, + }, + Spec: net.IngressSpec{ + TLS: []net.IngressTLS{ + {Hosts: []string{fqdn}, SecretName: certSecretName}, + }, + Rules: []net.IngressRule{ + { + Host: fqdn, + IngressRuleValue: net.IngressRuleValue{ + HTTP: &net.HTTPIngressRuleValue{Paths: paths}, + }, + }, + }, + }, + } + + return &ingress +} + +// build the ingress rule object catching all HTTP traffic. +func getIngressWebRule(fqdn string) net.IngressRule { + pathType := net.PathTypePrefix + + return net.IngressRule{ + Host: fqdn, + IngressRuleValue: net.IngressRuleValue{ + HTTP: &net.HTTPIngressRuleValue{ + Paths: []net.HTTPIngressPath{ + { + Backend: webServiceBackend, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + } +} + +var webServiceBackend net.IngressBackend = net.IngressBackend{ + Service: &net.IngressServiceBackend{ + Name: webServiceName, + Port: net.ServiceBackendPort{Number: 80}, + }, +} diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes.go b/mgradm/cmd/migrate/kubernetes/kubernetes.go index 19bf749c7..15d4a86ed 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes.go @@ -19,11 +19,22 @@ type kubernetesMigrateFlags struct { shared.MigrateFlags `mapstructure:",squash"` Helm cmd_utils.HelmFlags Ssl cmd_utils.SslCertFlags + Volumes cmd_utils.VolumesFlags + Ssh sshFlags +} + +type sshFlags struct { + Key struct { + Public string + Private string + } + Knownhosts string + Config string } // NewCommand for kubernetes migration. func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command { - migrateCmd := &cobra.Command{ + cmd := &cobra.Command{ Use: "kubernetes [source server FQDN]", Short: L("Migrate a remote server to containers running on a kubernetes cluster"), Long: L(`Migrate a remote server to containers running on a kubernetes cluster @@ -31,15 +42,16 @@ func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command { This migration command assumes a few things: * the SSH configuration for the source server is complete, including user and all needed options to connect to the machine, - * an SSH agent is started and the key to use to connect to the server is added to it, * kubectl and helm are installed locally, * a working kubectl configuration should be set to connect to the cluster to deploy to +The SSH parameters may be left empty if the target Kubernetes namespace contains: + * an uyuni-migration-config ConfigMap with config and known_hosts items, + * an uyuni-migration-key secret with key and key.pub items with a passwordless key. + When migrating a server with a automatically generated SSL Root CA certificate, the private key password will be required to convert it to RSA in a kubernetes secret. This is not needed if the source server does not have a generated SSL CA certificate. - -NOTE: migrating to a remote cluster is not supported yet! `), Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -48,9 +60,23 @@ NOTE: migrating to a remote cluster is not supported yet! }, } - shared.AddMigrateFlags(migrateCmd) - cmd_utils.AddHelmInstallFlag(migrateCmd) - migrateCmd.Flags().String("ssl-password", "", L("SSL CA generated private key password")) + shared.AddMigrateFlags(cmd) + cmd_utils.AddHelmInstallFlag(cmd) + cmd_utils.AddVolumesFlags(cmd) + + cmd.Flags().String("ssl-password", "", L("SSL CA generated private key password")) + + cmd.Flags().String("ssh-key-public", "", L("Path to the SSH public key to use to connect to the source server")) + cmd.Flags().String("ssh-key-private", "", L("Path to the passwordless SSH private key to use to connect to the source server")) + cmd.Flags().String("ssh-knownhosts", "", L("Path to the SSH known_hosts file to use to connect to the source server")) + cmd.Flags().String("ssh-config", "", L("Path to the SSH configuration file to use to connect to the source server")) + + const sshGroupId = "ssh" + _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: sshGroupId, Title: L("SSH Configuration Flags")}) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-public", sshGroupId) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-private", sshGroupId) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-knownhosts", sshGroupId) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-config", sshGroupId) - return migrateCmd + return cmd } diff --git a/mgradm/cmd/migrate/kubernetes/migrationJob.go b/mgradm/cmd/migrate/kubernetes/migrationJob.go new file mode 100644 index 000000000..78c2d0c3b --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/migrationJob.go @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "os" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const migrationJobName = "uyuni-data-sync" + +// Prepares and starts the synchronization job. +// +// This assumes the SSH key is stored in an uyuni-migration-key secret +// and the SSH config in an uyuni-migration-ssh ConfigMap with config and known_hosts keys. +func startMigrationJob( + namespace string, + serverImage string, + pullPolicy string, + fqdn string, + user string, + prepare bool, + mounts []types.VolumeMount, +) error { + job, err := getMigrationJob( + namespace, + serverImage, + pullPolicy, + mounts, + fqdn, + user, + prepare, + ) + if err != nil { + return err + } + + tempDir, err := utils.TempDir() + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + // Run the job + return kubernetes.Apply([]runtime.Object{job}, L("failed to run the migration job")) +} + +func getMigrationJob( + namespace string, + image string, + pullPolicy string, + mounts []types.VolumeMount, + sourceFqdn string, + user string, + prepare bool, +) (*batch.Job, error) { + // Add mount and volume for the uyuni-migration-key secret with key and key.pub items + keyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa", SubPath: "id_rsa"} + pubKeyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa.pub", SubPath: "id_rsa.pub"} + + keyVolume := kubernetes.CreateSecretVolume("ssh-key", "uyuni-migration-key") + var keyMode int32 = 0600 + keyVolume.VolumeSource.Secret.Items = []core.KeyToPath{ + {Key: "key", Path: "id_rsa", Mode: &keyMode}, + {Key: "key.pub", Path: "id_rsa.pub"}, + } + + // Add mounts and volume for the uyuni-migration-ssh config map + // We need one mount for each file using subPath to not have 2 mounts on the same folder + knownHostsMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/known_hosts", SubPath: "known_hosts"} + sshConfMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/config", SubPath: "config"} + sshVolume := kubernetes.CreateConfigVolume("ssh-conf", "uyuni-migration-ssh") + + // Prepare the script + scriptData := templates.MigrateScriptTemplateData{ + Volumes: utils.ServerVolumeMounts, + SourceFqdn: sourceFqdn, + User: user, + Kubernetes: true, + Prepare: prepare, + } + + job, err := kubernetes.GetScriptJob(namespace, migrationJobName, image, pullPolicy, mounts, scriptData) + if err != nil { + return nil, err + } + + // Append the extra volumes and mounts + volumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts + volumes := job.Spec.Template.Spec.Volumes + + volumeMounts = append(volumeMounts, keyMount, pubKeyMount, knownHostsMount, sshConfMount) + volumes = append(volumes, keyVolume, sshVolume) + + job.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts + job.Spec.Template.Spec.Volumes = volumes + + return job, nil +} diff --git a/mgradm/cmd/migrate/kubernetes/namespace.go b/mgradm/cmd/migrate/kubernetes/namespace.go new file mode 100644 index 000000000..952d2a3f2 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/namespace.go @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func createNamespace(namespace string) error { + ns := core.Namespace{ + TypeMeta: meta.TypeMeta{Kind: "Namespace", APIVersion: "v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: namespace, + }, + } + return kubernetes.Apply([]runtime.Object{&ns}, L("failed to create the namespace")) +} diff --git a/mgradm/cmd/migrate/kubernetes/postUpgradeJob.go b/mgradm/cmd/migrate/kubernetes/postUpgradeJob.go new file mode 100644 index 000000000..bfb90c09e --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/postUpgradeJob.go @@ -0,0 +1,37 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const postUpgradeJobName = "uyuni-post-upgrade" + +func startPostUpgradeJob(namespace string, image string, pullPolicy string) error { + log.Info().Msg(L("Performing post upgrade changes…")) + + job, err := getPostUpgradeJob(namespace, image, pullPolicy) + if err != nil { + return err + } + + return kubernetes.Apply([]runtime.Object{job}, L("failed to run the post upgrade job")) +} + +func getPostUpgradeJob(namespace string, image string, pullPolicy string) (*batch.Job, error) { + scriptData := templates.PostUpgradeTemplateData{} + mounts := getServerMounts() + + return kubernetes.GetScriptJob(namespace, postUpgradeJobName, image, pullPolicy, mounts, scriptData) +} diff --git a/mgradm/cmd/migrate/kubernetes/services.go b/mgradm/cmd/migrate/kubernetes/services.go new file mode 100644 index 000000000..984772295 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/services.go @@ -0,0 +1,96 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const webServiceName = "web" +const saltServiceName = "salt" +const cobblerServiceName = "cobbler" +const reportdbServiceName = "report-db" +const taskoServiceName = "taskomatic" +const tftpServiceName = "tftp" + +func createServices(namespace string, debug bool) error { + reportDbPorts := []types.PortMap{ + utils.NewPortMap("pgsql", 5432, 5432), + utils.NewPortMap("exporter", 9187, 9187), + } + saltPorts := []types.PortMap{ + utils.NewPortMap("publish", 4505, 4505), + utils.NewPortMap("request", 4506, 4506), + // TODO Add the salt API if configured + } + + taskoPorts := []types.PortMap{ + utils.NewPortMap("jmx", 5556, 5556), + utils.NewPortMap("metrics", 9800, 9800), + } + tomcatPorts := []types.PortMap{ + utils.NewPortMap("jmx", 5557, 5557), + } + + if debug { + taskoPorts = append(taskoPorts, utils.NewPortMap("debug", 8001, 8001)) + tomcatPorts = append(tomcatPorts, utils.NewPortMap("debug", 8003, 8003)) + } + + services := []runtime.Object{ + getService(namespace, webServiceName, core.ProtocolTCP, utils.NewPortMap("web", 80, 80)), + getService(namespace, saltServiceName, core.ProtocolTCP, saltPorts...), + getService(namespace, cobblerServiceName, core.ProtocolTCP, utils.NewPortMap("cobbler", 25151, 25151)), + getService(namespace, reportdbServiceName, core.ProtocolTCP, reportDbPorts...), + getService(namespace, tftpServiceName, core.ProtocolUDP, utils.NewPortMap("tftp", 69, 69)), + getService(namespace, "tomcat", core.ProtocolTCP, tomcatPorts...), + getService(namespace, taskoServiceName, core.ProtocolTCP, taskoPorts...), + } + + if debug { + services = append(services, + getService(namespace, "search", core.ProtocolTCP, utils.NewPortMap("debug", 8002, 8002)), + ) + } + + return kubernetes.Apply(services, L("failed to create the service")) +} + +func getService(namespace string, name string, protocol core.Protocol, ports ...types.PortMap) *core.Service { + serviceType := core.ServiceTypeClusterIP // TODO make configurable to allow NodePort and maybe LoadBalancer + + portObjs := []core.ServicePort{} + for _, port := range ports { + portObjs = append(portObjs, core.ServicePort{ + Name: port.Name, + Port: int32(port.Exposed), + TargetPort: intstr.FromInt(port.Port), + Protocol: protocol, + }) + } + + return &core.Service{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Service"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{"app": kubernetes.ServerApp}, + }, + Spec: core.ServiceSpec{ + Ports: portObjs, + Selector: map[string]string{"app": kubernetes.ServerApp}, + Type: serviceType, + }, + } +} diff --git a/mgradm/cmd/migrate/kubernetes/ssh.go b/mgradm/cmd/migrate/kubernetes/ssh.go new file mode 100644 index 000000000..f7a93d0f0 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/ssh.go @@ -0,0 +1,159 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func checkSsh(namespace string, flags *sshFlags) error { + if exists, err := checkSshKey(namespace); err != nil { + return err + } else if !exists && flags.Key.Public != "" && flags.Key.Private != "" { + if err := createSshSecret(namespace, flags.Key.Private, flags.Key.Public); err != nil { + return err + } + } else if !exists { + return errors.New(L("no SSH key found to use for migration")) + } + + if exists, err := checkSshConfig(namespace); err != nil { + return err + } else if !exists && flags.Knownhosts != "" { + // The config may be empty, but not the known_hosts + if err := createSshConfig(namespace, flags.Config, flags.Knownhosts); err != nil { + return err + } + } else if !exists { + return errors.New(L("no SSH known_hosts and configuration found to use for migration")) + } + + return nil +} + +const sshSecretName = "uyuni-migration-key" +const sshConfigName = "uyuni-migration-ssh" + +func checkSshKey(namespace string) (bool, error) { + exists := false + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, sshSecretName, "-o", "jsonpath={.data}", + ) + if err != nil { + if strings.Contains(err.Error(), "NotFound") { + log.Debug().Msg("Not found!") + // The secret was not found, it's not really an error + return exists, nil + } + return exists, utils.Errorf(err, L("failed to get %s SSH key secret"), sshSecretName) + } + exists = true + + var data map[string]string + if err := json.Unmarshal(out, &data); err != nil { + return exists, err + } + + for _, key := range []string{"key", "key.pub"} { + if value, ok := data[key]; !ok || value == "" { + return exists, fmt.Errorf(L("%[1]s secret misses the %[2]s value"), sshSecretName, key) + } + } + + return exists, nil +} + +func createSshSecret(namespace string, keyPath string, pubKeyPath string) error { + keyContent, err := os.ReadFile(keyPath) + if err != nil { + return utils.Errorf(err, L("failed to read key file %s"), keyPath) + } + + pubContent, err := os.ReadFile(pubKeyPath) + if err != nil { + return utils.Errorf(err, L("failed to read public key file %s"), pubKeyPath) + } + + secret := core.Secret{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: sshSecretName}, + // It seems serializing this object automatically transforms the secrets to base64. + Data: map[string][]byte{ + "key": keyContent, + "key.pub": pubContent, + }, + } + + return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the SSH migration secret")) +} + +func checkSshConfig(namespace string) (bool, error) { + exists := false + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "cm", "-n", namespace, sshConfigName, "-o", "jsonpath={.data}", + ) + if err != nil { + if strings.Contains(err.Error(), "NotFound") { + // The config map was not found, it's not really an error + return exists, nil + } + return exists, utils.Errorf(err, L("failed to get %s SSH ConfigMap"), sshConfigName) + } + exists = true + + var data map[string]string + if err := json.Unmarshal(out, &data); err != nil { + return exists, utils.Errorf(err, L("failed to parse SSH ConfigMap data")) + } + + // The known_hosts has to contain at least the entry for the source server. + if value, ok := data["known_hosts"]; !ok || value == "" { + return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "known_hosts") + } + + // An empty config is not an error. + if _, ok := data["config"]; !ok { + return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "config") + } + + return exists, nil +} + +func createSshConfig(namespace string, configPath string, KnownhostsPath string) error { + configContent, err := os.ReadFile(configPath) + if err != nil { + return utils.Errorf(err, L("failed to read SSH config file %s"), configPath) + } + + knownhostsContent, err := os.ReadFile(KnownhostsPath) + if err != nil { + return utils.Errorf(err, L("failed to read SSH known_hosts file %s"), KnownhostsPath) + } + + configMap := core.ConfigMap{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"}, + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: sshConfigName}, + Data: map[string]string{ + "config": string(configContent), + "known_hosts": string(knownhostsContent), + }, + } + return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap")) +} diff --git a/mgradm/cmd/migrate/kubernetes/ssl.go b/mgradm/cmd/migrate/kubernetes/ssl.go new file mode 100644 index 000000000..a5d15346f --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/ssl.go @@ -0,0 +1,51 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "os" + "path" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/ssl" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func installExistingCertificate(namespace string, extractedData *MigrationData) error { + // Store the certificates and key to file to load them + tmpDir, err := utils.TempDir() + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + caCrtPath := path.Join(tmpDir, "ca.crt") + if err := os.WriteFile(caCrtPath, []byte(extractedData.CaCert), 0700); err != nil { + return utils.Errorf(err, L("failed to create temporary ca.crt file")) + } + + srvCrtPath := path.Join(tmpDir, "srv.crt") + if err := os.WriteFile(srvCrtPath, []byte(extractedData.ServerCert), 0700); err != nil { + return utils.Errorf(err, L("failed to create temporary srv.crt file")) + } + + srvKeyPath := path.Join(tmpDir, "srv.key") + if err := os.WriteFile(srvKeyPath, []byte(extractedData.ServerKey), 0700); err != nil { + return utils.Errorf(err, L("failed to create temporary srv.key file")) + } + + sslFlags := adm_utils.SslCertFlags{ + Ca: ssl.CaChain{Root: caCrtPath}, + Server: ssl.SslPair{ + Key: srvKeyPath, + Cert: srvCrtPath, + }, + } + return kubernetes.DeployExistingCertificate(namespace, &sslFlags) +} diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index cb0bf3267..6b4d574c4 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -9,46 +9,39 @@ package kubernetes import ( "encoding/base64" "fmt" - "os" - "os/exec" - "path" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/spf13/cobra" - migration_shared "github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared" "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" "github.com/uyuni-project/uyuni-tools/mgradm/shared/ssl" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" + cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) +const migrationDataPvcName = "migration-data" + func migrateToKubernetes( globalFlags *types.GlobalFlags, flags *kubernetesMigrateFlags, cmd *cobra.Command, args []string, ) error { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return fmt.Errorf(L("install %s before running this command"), binary) - } + namespace := flags.Helm.Uyuni.Namespace + // Create the namespace if not present + if err := createNamespace(namespace); err != nil { + return err } - cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) - serverImage, err := utils.ComputeImage(globalFlags.Registry, utils.DefaultTag, flags.Image) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) + // Check the for the required SSH key and configuration + if err := checkSsh(namespace, &flags.Ssh); err != nil { + return err } - hubXmlrpcImage := "" - hubXmlrpcImage, err = utils.ComputeImage(globalFlags.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) + serverImage, err := utils.ComputeImage(globalFlags.Registry, utils.DefaultTag, flags.Image) if err != nil { - return err + return utils.Errorf(err, L("failed to compute image URL")) } fqdn := args[0] @@ -56,187 +49,202 @@ func migrateToKubernetes( return err } - // Find the SSH Socket and paths for the migration - sshAuthSocket := migration_shared.GetSshAuthSocket() - sshConfigPath, sshKnownhostsPath := migration_shared.GetSshPaths() - - // Prepare the migration script and folder - scriptDir, err := adm_utils.GenerateMigrationScript(fqdn, flags.User, true, flags.Prepare) - if err != nil { - return utils.Errorf(err, L("failed to generate migration script")) - } + mounts := getServerMounts() + mounts = tuneMounts(mounts, &flags.Volumes) - defer os.RemoveAll(scriptDir) + // Add a mount and volume for the extracted data + migrationDataVolume := types.VolumeMount{Name: migrationDataPvcName, MountPath: "/var/lib/uyuni-tools"} + migrationMounts := append(mounts, migrationDataVolume) - // We don't need the SSL certs at this point of the migration - clusterInfos, err := shared_kubernetes.CheckCluster() - if err != nil { + if err := shared_kubernetes.CreatePersistentVolumeClaims(namespace, migrationMounts); err != nil { return err } - kubeconfig := clusterInfos.GetKubeconfig() - //TODO: check if we need to handle SELinux policies, as we do in podman - - // Install Uyuni with generated CA cert: an empty struct means no 3rd party cert - var sslFlags adm_utils.SslCertFlags - - // Deploy for running migration command - if err := kubernetes.Deploy( - fmt.Sprintf(L("Deploy to migrate the data from %s"), fqdn), - cnx, globalFlags.Registry, &flags.Image, &flags.HubXmlrpc, - &flags.Helm, &sslFlags, clusterInfos, fqdn, false, flags.Prepare, - "--set", "migration.ssh.agentSocket="+sshAuthSocket, - "--set", "migration.ssh.configPath="+sshConfigPath, - "--set", "migration.ssh.knownHostsPath="+sshKnownhostsPath, - "--set", "migration.dataPath="+scriptDir, + + if err = startMigrationJob( + namespace, + serverImage, + flags.Image.PullPolicy, + fqdn, + flags.User, + flags.Prepare, + migrationMounts, ); err != nil { - return utils.Errorf(err, L("cannot run deploy")) + return err } - //this is needed because folder with script needs to be mounted - //check the node before scaling down - nodeName, err := shared_kubernetes.GetNode(shared_kubernetes.ServerFilter) - if err != nil { - return utils.Errorf(err, L("cannot find node running uyuni")) - } - // Run the actual migration - if err := adm_utils.RunMigration(cnx, scriptDir, "migrate.sh"); err != nil { - return utils.Errorf(err, L("cannot run migration")) + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to copy + if err := shared_kubernetes.WaitForJob(namespace, migrationJobName, -1); err != nil { + return err } - extractedData, err := utils.ReadInspectData[utils.InspectResult](path.Join(scriptDir, "data")) + // Read the extracted data from the migration volume + extractedData, err := extractMigrationData(namespace, serverImage, migrationDataVolume) if err != nil { - return utils.Errorf(err, L("cannot read data from container")) + return err } - // After each command we want to scale to 0 - err = shared_kubernetes.ReplicasTo(shared_kubernetes.ServerApp, 0) - if err != nil { - return utils.Errorf(err, L("cannot set replicas to 0")) - } + oldPgVersion := extractedData.Data.CurrentPgVersion + newPgVersion := extractedData.Data.ImagePgVersion - if flags.Prepare { - log.Info().Msg(L("Migration prepared. Run the 'migrate' command without '--prepare' to finish the migration.")) - return nil - } + // Run the DB Migration job if needed + if oldPgVersion < newPgVersion { + if err := startDbUpgradeJob( + namespace, globalFlags.Registry, flags.Image, flags.DbUpgradeImage, + oldPgVersion, newPgVersion, + ); err != nil { + return err + } - setupSslArray, err := setupSsl(&flags.Helm, kubeconfig, scriptDir, flags.Ssl.Password, flags.Image.PullPolicy) - if err != nil { - return utils.Errorf(err, L("cannot setup SSL")) + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to upgrade + if err := shared_kubernetes.WaitForJob(namespace, dbUpgradeJobName, -1); err != nil { + return err + } + } else if oldPgVersion > newPgVersion { + return fmt.Errorf( + L("downgrading database from PostgreSQL %[1]d to %[2]d is not supported"), oldPgVersion, newPgVersion) } - helmArgs := []string{ - "--reset-values", - "--set", "timezone=" + extractedData.Timezone, + // Run the DB Finalization job + schemaUpdateRequired := oldPgVersion != newPgVersion + if err := startDbFinalizeJob( + namespace, serverImage, flags.Image.PullPolicy, schemaUpdateRequired, true, + ); err != nil { + return err } - if flags.Mirror != "" { - log.Warn().Msgf(L("The mirror data will not be migrated, ensure it is available at %s"), flags.Mirror) - // TODO Handle claims for multi-node clusters - helmArgs = append(helmArgs, "--set", "mirror.hostPath="+flags.Mirror) + + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to reindex + if err := shared_kubernetes.WaitForJob(namespace, dbFinalizeJobName, -1); err != nil { + return err } - helmArgs = append(helmArgs, setupSslArray...) - // Run uyuni upgrade using the new ssl certificate - // We don't need to start the Hub XML-RPC API containers during the setup phase - err = kubernetes.UyuniUpgrade( - L("Upgrade with final volumes"), - serverImage, flags.Image.PullPolicy, 0, hubXmlrpcImage, &flags.Helm, - kubeconfig, fqdn, clusterInfos.Ingress, helmArgs..., - ) - if err != nil { - return utils.Errorf(err, L("cannot upgrade helm chart to image %s using new SSL certificate"), serverImage) + // Run the Post Upgrade job + if err := startPostUpgradeJob(namespace, serverImage, flags.Image.PullPolicy); err != nil { + return err } - if err := shared_kubernetes.WaitForDeployment(flags.Helm.Uyuni.Namespace, "uyuni", "uyuni"); err != nil { - return utils.Errorf(err, L("cannot wait for deployment of %s"), serverImage) + if err := shared_kubernetes.WaitForJob(namespace, postUpgradeJobName, 60); err != nil { + return err } - err = shared_kubernetes.ReplicasTo(shared_kubernetes.ServerApp, 0) + // Extract some data from the cluster to guess how to configure Uyuni. + clusterInfos, err := shared_kubernetes.CheckCluster() if err != nil { - return utils.Errorf(err, L("cannot set replicas to 0")) + return err } - oldPgVersion := extractedData.CurrentPgVersion - newPgVersion := extractedData.ImagePgVersion + // Deploy the SSL CA and server certificates + var caIssuer string + if extractedData.CaKey != "" { + // cert-manager is not required for 3rd party certificates, only if we have the CA key. + // Note that in an operator we won't be able to install cert-manager and just wait for it to be installed. + kubeconfig := clusterInfos.GetKubeconfig() - if oldPgVersion != newPgVersion { - if err := kubernetes.RunPgsqlVersionUpgrade(globalFlags.Registry, flags.Image, - flags.DbUpgradeImage, nodeName, oldPgVersion, newPgVersion, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL version upgrade script")) + if err := kubernetes.InstallCertManager(&flags.Helm, kubeconfig, flags.Image.PullPolicy); err != nil { + return utils.Errorf(err, L("cannot install cert manager")) } - } - schemaUpdateRequired := oldPgVersion != newPgVersion - if err := kubernetes.RunPgsqlFinalizeScript(serverImage, flags.Image.PullPolicy, nodeName, schemaUpdateRequired, true); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL finalisation script")) - } + // Convert CA to RSA to use in a Kubernetes TLS secret. + // In an operator we would have to fail now if there is no SSL password as we cannot prompt it. + ca := ssl.SslPair{ + Key: base64.StdEncoding.EncodeToString(ssl.GetRsaKey(extractedData.CaKey, flags.Ssl.Password)), + Cert: base64.StdEncoding.EncodeToString(ssl.StripTextFromCertificate(extractedData.CaCert)), + } - if err := kubernetes.RunPostUpgradeScript(serverImage, flags.Image.PullPolicy, nodeName); err != nil { - return utils.Errorf(err, L("cannot run post upgrade script")) + // Install the cert-manager issuers + if _, err := kubernetes.DeployReusedCa(namespace, &ca); err != nil { + return err + } + caIssuer = kubernetes.CaIssuerName + } else { + // Most likely a 3rd party certificate: cert-manager is not needed in this case + if err := installExistingCertificate(namespace, extractedData); err != nil { + return err + } } - hubReplicas := flags.HubXmlrpc.Replicas - if extractedData.HasHubXmlrpcApi { - log.Info().Msg(L("Enabling Hub XML-RPC API since it is enabled on the migrated server")) - hubReplicas = 1 + // Create the Ingress routes before the deployments as those are triggering + // the creation of the uyuni-cert secret from cert-manager. + if err := createIngress(namespace, fqdn, caIssuer, clusterInfos.Ingress); err != nil { + return err } - // This is the final deployment, all the replicas need to be correct here. - err = kubernetes.UyuniUpgrade( - L("Complete deployment after migration"), - serverImage, flags.Image.PullPolicy, hubReplicas, hubXmlrpcImage, &flags.Helm, kubeconfig, fqdn, - clusterInfos.Ingress, helmArgs..., - ) - if err != nil { - return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) + // Wait for uyuni-cert secret to be ready + shared_kubernetes.WaitForSecret(namespace, certSecretName) + + // Start the server + if err := createServerDeployment( + namespace, serverImage, flags.Image.PullPolicy, extractedData.Data.Timezone, extractedData.Data.Debug, + flags.Volumes.Mirror, + ); err != nil { + return err } - if err := shared_kubernetes.WaitForDeployment(flags.Helm.Uyuni.Namespace, "uyuni", "uyuni"); err != nil { + // Create the services + if err := createServices(namespace, extractedData.Data.Debug); err != nil { return err } - // Reinitialize the connection since the pod name has changed since we first checked - cnx = shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) - if err := cnx.CopyCaCertificate(fqdn); err != nil { - return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) + // TODO Start the Coco Deployments if requested. + // In an operator mind, the user would just change the custom resource to enable the feature. + if extractedData.Data.HasHubXmlrpcApi { + // Install Hub API deployment, service + hubApiImage, err := utils.ComputeImage(globalFlags.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) + if err != nil { + return err + } + if err := installHubApi(namespace, hubApiImage, flags.Image.PullPolicy); err != nil { + return err + } } - return nil -} -// updateIssuer replaces the temporary SSL certificate issuer with the source server CA. -// Return additional helm args to use the SSL certificates. -func setupSsl(helm *adm_utils.HelmFlags, kubeconfig string, scriptDir string, password string, pullPolicy string) ([]string, error) { - caCert := path.Join(scriptDir, "RHN-ORG-TRUSTED-SSL-CERT") - caKey := path.Join(scriptDir, "RHN-ORG-PRIVATE-SSL-KEY") + // TODO Wait for all the deployments to be ready - if utils.FileExists(caCert) && utils.FileExists(caKey) { - key := base64.StdEncoding.EncodeToString(ssl.GetRsaKey(caKey, password)) + return nil +} - // Strip down the certificate text part - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "openssl", "x509", "-in", caCert) - if err != nil { - return []string{}, utils.Errorf(err, L("failed to strip text part from CA certificate")) +func getServerMounts() []types.VolumeMount { + // Filter out the duplicate mounts to avoid issues applying the jobs + serverMounts := utils.ServerVolumeMounts + mounts := []types.VolumeMount{} + mountsSet := map[string]types.VolumeMount{} + for _, mount := range serverMounts { + switch mount.Name { + // Skip mounts that are not PVCs + case "ca-cert", "tls-key": + continue } - cert := base64.StdEncoding.EncodeToString(out) - ca := ssl.SslPair{Cert: cert, Key: key} + if _, exists := mountsSet[mount.Name]; !exists { + mounts = append(mounts, mount) + mountsSet[mount.Name] = mount + } + } - // An empty struct means no third party certificate - sslFlags := adm_utils.SslCertFlags{} - ret, err := kubernetes.DeployCertificate(helm, &sslFlags, cert, &ca, kubeconfig, "", pullPolicy) - if err != nil { - return []string{}, utils.Errorf(err, L("cannot deploy certificate")) + return mounts +} + +func tuneMounts(mounts []types.VolumeMount, flags *cmd_utils.VolumesFlags) []types.VolumeMount { + tunedMounts := []types.VolumeMount{} + for _, mount := range mounts { + class := flags.Class + var volumeFlags *cmd_utils.VolumeFlags + switch mount.Name { + case "var-pgsql": + volumeFlags = &flags.Database + case "var-spacewalk": + volumeFlags = &flags.Packages + case "var-cache": + volumeFlags = &flags.Cache + case "srv-www": + volumeFlags = &flags.Www } - return ret, nil - } else { - // Handle third party certificates and CA - sslFlags := adm_utils.SslCertFlags{ - Ca: ssl.CaChain{Root: caCert}, - Server: ssl.SslPair{ - Key: path.Join(scriptDir, "spacewalk.key"), - Cert: path.Join(scriptDir, "spacewalk.crt"), - }, + if volumeFlags != nil { + if volumeFlags.Class != "" { + class = volumeFlags.Class + } + mount.Size = volumeFlags.Size } - kubernetes.DeployExistingCertificate(helm, &sslFlags, kubeconfig) + mount.Class = class + tunedMounts = append(tunedMounts, mount) } - return []string{}, nil + return tunedMounts } diff --git a/mgradm/shared/kubernetes/certificates.go b/mgradm/shared/kubernetes/certificates.go index 1e878a136..64806d699 100644 --- a/mgradm/shared/kubernetes/certificates.go +++ b/mgradm/shared/kubernetes/certificates.go @@ -7,6 +7,7 @@ package kubernetes import ( "encoding/base64" "errors" + "fmt" "os" "path/filepath" "time" @@ -19,27 +20,34 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/utils" + + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) +// CaIssuerName is the name of the CA issuer deployed if cert-manager is used. +const CaIssuerName = "uyuni-ca-issuer" + // Helm annotation to add in order to use cert-manager's uyuni CA issuer, in JSON format. -const ingressCertManagerAnnotation = "ingressSslAnnotations={\"cert-manager.io/issuer\": \"uyuni-ca-issuer\"}" +var ingressCertManagerAnnotation string = fmt.Sprintf("ingressSslAnnotations={\"cert-manager.io/issuer\": \"%s\"}", CaIssuerName) // DeployExistingCertificate execute a deploy of an existing certificate. -func DeployExistingCertificate(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.SslCertFlags, kubeconfig string) { +func DeployExistingCertificate(namespace string, sslFlags *cmd_utils.SslCertFlags) error { // Deploy the SSL Certificate secret and CA configmap serverCrt, rootCaCrt := ssl.OrderCas(&sslFlags.Ca, &sslFlags.Server) serverKey := utils.ReadFile(sslFlags.Server.Key) tempDir, err := os.MkdirTemp("", "mgradm-*") if err != nil { - log.Fatal().Err(err).Msgf(L("failed to create temporary directory")) + return utils.Errorf(err, L("failed to create temporary directory")) } defer os.RemoveAll(tempDir) secretPath := filepath.Join(tempDir, "secret.yaml") log.Info().Msg(L("Creating SSL server certificate secret")) tlsSecretData := templates.TlsSecretTemplateData{ - Namespace: helmFlags.Uyuni.Namespace, + Namespace: namespace, Name: "uyuni-cert", Certificate: base64.StdEncoding.EncodeToString(serverCrt), Key: base64.StdEncoding.EncodeToString(serverKey), @@ -47,31 +55,21 @@ func DeployExistingCertificate(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_uti } if err = utils.WriteTemplateToFile(tlsSecretData, secretPath, 0500, true); err != nil { - log.Fatal().Err(err).Msg(L("Failed to generate uyuni-crt secret definition")) + return utils.Errorf(err, L("Failed to generate uyuni-crt secret definition")) } err = utils.RunCmd("kubectl", "apply", "-f", secretPath) if err != nil { - log.Fatal().Err(err).Msg(L("Failed to create uyuni-crt TLS secret")) + return utils.Errorf(err, L("Failed to create uyuni-crt TLS secret")) } // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - createCaConfig(rootCaCrt) + return createCaConfig(namespace, rootCaCrt) } -// DeployReusedCaCertificate deploys an existing SSL CA using cert-manager. -func DeployReusedCa( - helmFlags *cmd_utils.HelmFlags, - ca *ssl.SslPair, - kubeconfig string, - imagePullPolicy string, -) ([]string, error) { +// DeployReusedCa deploys an existing SSL CA using an already installed cert-manager. +func DeployReusedCa(namespace string, ca *ssl.SslPair) ([]string, error) { helmArgs := []string{} - // Install cert-manager if needed - if err := installCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { - return []string{}, utils.Errorf(err, L("cannot install cert manager")) - } - log.Info().Msg(L("Creating cert-manager issuer for existing CA")) tempDir, err := os.MkdirTemp("", "mgradm-*") if err != nil { @@ -82,7 +80,7 @@ func DeployReusedCa( issuerPath := filepath.Join(tempDir, "issuer.yaml") issuerData := templates.ReusedCaIssuerTemplateData{ - Namespace: helmFlags.Uyuni.Namespace, + Namespace: namespace, Key: ca.Key, Certificate: ca.Cert, } @@ -97,21 +95,22 @@ func DeployReusedCa( } // Wait for issuer to be ready - if err := waitForIssuer(helmFlags.Uyuni.Namespace, "uyuni-ca-issuer"); err != nil { + if err := waitForIssuer(namespace, CaIssuerName); err != nil { return nil, err } else { helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) - } // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - createCaConfig([]byte(ca.Cert)) + if err := createCaConfig(namespace, []byte(ca.Cert)); err != nil { + return nil, err + } return helmArgs, nil } // DeployGenerateCa deploys a new SSL CA using cert-manager. -func DeployCertificate( +func DeployGeneratedCa( helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.SslCertFlags, rootCa string, @@ -123,7 +122,7 @@ func DeployCertificate( helmArgs := []string{} // Install cert-manager if needed - if err := installCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { + if err := InstallCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { return []string{}, utils.Errorf(err, L("cannot install cert manager")) } @@ -161,11 +160,12 @@ func DeployCertificate( return nil, err } else { helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) - } // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - extractCaCertToConfig() + if err := extractCaCertToConfig(helmFlags.Uyuni.Namespace); err != nil { + return nil, err + } return helmArgs, nil } @@ -185,10 +185,10 @@ func waitForIssuer(namespace string, name string) error { time.Sleep(1 * time.Second) } return errors.New(L("Issuer didn't turn ready after 60s")) - } -func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, imagePullPolicy string) error { +// InstallCertManager deploys the cert-manager helm chart with the CRDs. +func InstallCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, imagePullPolicy string) error { if !kubernetes.IsDeploymentReady("", "cert-manager") { log.Info().Msg(L("Installing cert-manager")) repo := "" @@ -200,7 +200,7 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image "--set", "crds.enabled=true", "--set", "crds.keep=true", "--set-json", "global.commonLabels={\"installedby\": \"mgradm\"}", - "--set", "images.pullPolicy=" + kubernetes.GetPullPolicy(imagePullPolicy), + "--set", "images.pullPolicy=" + string(kubernetes.GetPullPolicy(imagePullPolicy)), } extraValues := helmFlags.CertManager.Values if extraValues != "" { @@ -227,35 +227,43 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image return nil } -func extractCaCertToConfig() { +func extractCaCertToConfig(namespace string) error { // TODO Replace with [trust-manager](https://cert-manager.io/docs/projects/trust-manager/) to automate this const jsonPath = "-o=jsonpath={.data.ca\\.crt}" log.Info().Msg(L("Extracting CA certificate to a configmap")) // Skip extracting if the configmap is already present - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "configmap", "uyuni-ca", jsonPath) + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "configmap", "-n", namespace, "uyuni-ca", jsonPath, + ) log.Info().Msgf(L("CA cert: %s"), string(out)) if err == nil && len(out) > 0 { log.Info().Msg(L("uyuni-ca configmap already existing, skipping extraction")) - return + return nil } - out, err = utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "secret", "uyuni-ca", jsonPath) + out, err = utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, "uyuni-ca", jsonPath, + ) if err != nil { - log.Fatal().Err(err).Msgf(L("Failed to get uyuni-ca certificate")) + return utils.Errorf(err, L("Failed to get uyuni-ca certificate")) } decoded, err := base64.StdEncoding.DecodeString(string(out)) if err != nil { - log.Fatal().Err(err).Msgf(L("Failed to base64 decode CA certificate")) + return utils.Errorf(err, L("Failed to base64 decode CA certificate")) } - createCaConfig(decoded) + return createCaConfig(namespace, decoded) } -func createCaConfig(ca []byte) { - valueArg := "--from-literal=ca.crt=" + string(ca) - if err := utils.RunCmd("kubectl", "create", "configmap", "uyuni-ca", valueArg); err != nil { - log.Fatal().Err(err).Msg(L("Failed to create uyuni-ca config map from certificate")) +func createCaConfig(namespace string, ca []byte) error { + configMap := core.ConfigMap{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"}, + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: "uyuni-ca"}, + Data: map[string]string{ + "ca.crt": string(ca), + }, } + return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap")) } diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go index e2d339614..c31396711 100644 --- a/mgradm/shared/kubernetes/install.go +++ b/mgradm/shared/kubernetes/install.go @@ -104,7 +104,7 @@ func UyuniUpgrade( // The values computed from the command line need to be last to override what could be in the extras helmParams = append(helmParams, "--set", "images.server="+serverImage, - "--set", "pullPolicy="+kubernetes.GetPullPolicy(pullPolicy), + "--set", "pullPolicy="+string(kubernetes.GetPullPolicy(pullPolicy)), "--set", "fqdn="+fqdn, "--description", reason, ) diff --git a/mgradm/shared/kubernetes/k3s.go b/mgradm/shared/kubernetes/k3s.go index 27a539e6b..9e002495e 100644 --- a/mgradm/shared/kubernetes/k3s.go +++ b/mgradm/shared/kubernetes/k3s.go @@ -57,7 +57,7 @@ func RunPgsqlVersionUpgrade(registry string, image types.ImageFlags, upgradeImag } log.Info().Msgf(L("Using database upgrade image %s"), upgradeImageUrl) - pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql, true) + pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql) if err != nil { return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script")) } diff --git a/mgradm/shared/podman/podman.go b/mgradm/shared/podman/podman.go index 00255deaa..558ba8485 100644 --- a/mgradm/shared/podman/podman.go +++ b/mgradm/shared/podman/podman.go @@ -263,7 +263,7 @@ func RunPgsqlVersionUpgrade( log.Info().Msgf(L("Using database upgrade image %s"), preparedImage) - pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql, false) + pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql) if err != nil { return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script")) } diff --git a/mgradm/shared/ssl/ssl.go b/mgradm/shared/ssl/ssl.go index 17ce9add2..a4da60dd9 100644 --- a/mgradm/shared/ssl/ssl.go +++ b/mgradm/shared/ssl/ssl.go @@ -7,6 +7,7 @@ package ssl import ( "bytes" "errors" + "io" "os" "os/exec" "strings" @@ -251,13 +252,21 @@ func optionalFile(file string) { } // Converts an SSL key to RSA. -func GetRsaKey(keyPath string, password string) []byte { +func GetRsaKey(keyContent string, password string) []byte { // Kubernetes only handles RSA private TLS keys, convert and strip password caPassword := password utils.AskPasswordIfMissing(&caPassword, L("Source server SSL CA private key password"), 0, 0) // Convert the key file to RSA format for kubectl to handle it - cmd := exec.Command("openssl", "rsa", "-in", keyPath, "-passin", "env:pass") + cmd := exec.Command("openssl", "rsa", "-passin", "env:pass") + stdin, err := cmd.StdinPipe() + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to open openssl rsa process input stream")) + } + if _, err := io.WriteString(stdin, keyContent); err != nil { + log.Fatal().Err(err).Msg(L("Failed to write openssl key content to input stream")) + } + cmd.Env = append(cmd.Env, "pass="+caPassword) out, err := cmd.Output() if err != nil { @@ -265,3 +274,20 @@ func GetRsaKey(keyPath string, password string) []byte { } return out } + +// StripTextFromCertificate removes the optional text part of an x509 certificate. +func StripTextFromCertificate(certContent string) []byte { + cmd := exec.Command("openssl", "x509") + stdin, err := cmd.StdinPipe() + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to open openssl x509 process input stream")) + } + if _, err := io.WriteString(stdin, certContent); err != nil { + log.Fatal().Err(err).Msg(L("Failed to write SSL certificate to input stream")) + } + out, err := cmd.Output() + if err != nil { + log.Fatal().Err(err).Msg(L("failed to strip text part from CA certificate")) + } + return out +} diff --git a/mgradm/shared/ssl/ssl_test.go b/mgradm/shared/ssl/ssl_test.go index e0983a498..21162ed44 100644 --- a/mgradm/shared/ssl/ssl_test.go +++ b/mgradm/shared/ssl/ssl_test.go @@ -7,6 +7,8 @@ package ssl import ( "strings" "testing" + + "github.com/uyuni-project/uyuni-tools/shared/test_utils" ) func TestReadCertificatesRootCa(t *testing.T) { @@ -171,7 +173,8 @@ func TestOrderCasChain2(t *testing.T) { } func TestGetRsaKey(t *testing.T) { - actual := string(GetRsaKey("testdata/RootCA.key", "secret")) + key := test_utils.ReadFile(t, "testdata/RootCA.key") + actual := string(GetRsaKey(key, "secret")) if !strings.HasPrefix(actual, "-----BEGIN PRIVATE KEY-----\nMIIEugIBADANBgkqhkiG9w0BAQEFAAS") || !strings.HasSuffix(actual, "DKY9SmW6QD+RJwbMc4M=\n-----END PRIVATE KEY-----\n") { t.Errorf("Unexpected generated RSA key: %s", actual) diff --git a/mgradm/shared/templates/migrateScriptTemplate.go b/mgradm/shared/templates/migrateScriptTemplate.go index 53eb0d44c..239d25990 100644 --- a/mgradm/shared/templates/migrateScriptTemplate.go +++ b/mgradm/shared/templates/migrateScriptTemplate.go @@ -113,6 +113,7 @@ grep '^db_name' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data grep '^db_port' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data $SSH {{ .SourceFqdn }} sh -c "systemctl list-unit-files | grep hub-xmlrpc-api | grep -q active && echo has_hubxmlrpc=true || echo has_hubxmlrpc=false" >>/var/lib/uyuni-tools/data +(test $($SSH {{ .SourceFqdn }} grep jdwp -r /etc/tomcat/conf.d/ /etc/rhn/taskomatic.conf | wc -l) -gt 0 && echo debug=true || echo debug=false) >>/var/lib/uyuni-tools/data echo "Altering configuration for domain resolution..." sed 's/report_db_host = {{ .SourceFqdn }}/report_db_host = localhost/' -i /etc/rhn/rhn.conf; @@ -127,14 +128,8 @@ sed 's/--add-modules java.annotation,com.sun.xml.bind://' -i /etc/tomcat/conf.d/ sed 's/-XX:-UseConcMarkSweepGC//' -i /etc/tomcat/conf.d/* test -f /etc/tomcat/conf.d/remote_debug.conf && sed 's/address=[^:]*:/address=*:/' -i /etc/tomcat/conf.d/remote_debug.conf -# Create a backup copy of the data to prepare DB upgrade. -# We need to upgrade the deployment before upgrading the database to get the SSL certificates ready. -# To avoid corrupting the database files, move them to where the upgrade script will expect them. -echo "Posgresql versions: image: $image_pg_version, current: $current_pg_version" -if test "$image_pg_version" != "$current_pg_version"; then - echo "Backing up the database files ..." - mv /var/lib/pgsql/data /var/lib/pgsql/data-pg$current_pg_version -fi +# Alter rhn.conf to ensure mirror is set to /mirror if set at all +sed 's/server.susemanager.fromdir =.*/server.susemanager.fromdir = \/mirror/' -i /etc/rhn/rhn.conf {{ if .Kubernetes }} echo 'server.no_ssl = 1' >> /etc/rhn/rhn.conf; @@ -165,7 +160,6 @@ if test "extractedSSL" != "1"; then # For third party certificates, the CA chain is in the certificate file. rsync -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/private/spacewalk.key /var/lib/uyuni-tools/ rsync -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/certs/spacewalk.crt /var/lib/uyuni-tools/ - fi echo "Removing useless ssl-build folder..." diff --git a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go index 72bb628f2..3ff687c3b 100644 --- a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go +++ b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go @@ -12,6 +12,10 @@ import ( const postgresFinalizeScriptTemplate = `#!/bin/bash set -e +echo "Temporarily disable SSL in the posgresql configuration" +cp /var/lib/pgsql/data/postgresql.conf /var/lib/pgsql/data/postgresql.conf.bak +sed 's/^ssl/#ssl/' -i /var/lib/pgsql/data/postgresql.conf + {{ if .Migration }} echo "Adding database access for other containers..." db_user=$(sed -n '/^db_user/{s/^.*=[ \t]\+\(.*\)$/\1/ ; p}' /etc/rhn/rhn.conf) @@ -56,9 +60,12 @@ where not exists (select 1 from rhntaskorun r join rhntaskotemplate t on r.templ join rhntaskobunch b on t.bunch_id = b.id where b.name='update-system-overview-bunch' limit 1); EOT - echo "Stopping Postgresql..." su -s /bin/bash - postgres -c "/usr/share/postgresql/postgresql-script stop" + +echo "Enable SSL again" +cp /var/lib/pgsql/data/postgresql.conf.bak /var/lib/pgsql/data/postgresql.conf + echo "DONE" ` diff --git a/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go b/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go index aea38496e..bd193b6fa 100644 --- a/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go +++ b/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go @@ -22,20 +22,23 @@ test -d /usr/lib/postgresql$NEW_VERSION/bin echo "Testing presence of postgresql$OLD_VERSION..." test -d /usr/lib/postgresql$OLD_VERSION/bin -# Data have already been backed up at the end of the migration script -# Reset the potentially created new pgsql data -rm -rf /var/lib/pgsql/data +# Create a backup copy of the data to prepare DB upgrade. +echo "Backing up the database files ..." +mv /var/lib/pgsql/data /var/lib/pgsql/data-pg$OLD_VERSION + echo "Create new database directory..." mkdir -p /var/lib/pgsql/data chown -R postgres:postgres /var/lib/pgsql -echo "Enforce key permission" -chown -R postgres:postgres /etc/pki/tls/private/pg-spacewalk.key -chown -R postgres:postgres /etc/pki/tls/certs/spacewalk.crt + +if [ -e /etc/pki/tls/private/pg-spacewalk.key ]; then + echo "Enforce key permission" + chown -R postgres:postgres /etc/pki/tls/private/pg-spacewalk.key + chown -R postgres:postgres /etc/pki/tls/certs/spacewalk.crt +fi echo "Initialize new postgresql $NEW_VERSION database..." . /etc/sysconfig/postgresql 2>/dev/null # Load locale for SUSE PGHOME=$(getent passwd postgres | cut -d ":" -f6) -#. $PGHOME/.i18n 2>/dev/null # Load locale for Enterprise Linux if [ -z $POSTGRES_LANG ]; then POSTGRES_LANG="en_US.UTF-8" [ ! -z $LC_CTYPE ] && POSTGRES_LANG=$LC_CTYPE @@ -46,9 +49,15 @@ echo "Any suggested command from the console should be run using postgres user" su -s /bin/bash - postgres -c "initdb -D /var/lib/pgsql/data --locale=$POSTGRES_LANG" echo "Successfully initialized new postgresql $NEW_VERSION database." +echo "Temporarily disable SSL in the old posgresql configuration" +cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf.bak +sed 's/^ssl/#ssl/' -i /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf su -s /bin/bash - postgres -c "pg_upgrade --old-bindir=/usr/lib/postgresql$OLD_VERSION/bin --new-bindir=/usr/lib/postgresql$NEW_VERSION/bin --old-datadir=/var/lib/pgsql/data-pg$OLD_VERSION --new-datadir=/var/lib/pgsql/data $FAST_UPGRADE" +echo "Enable SSL again" +cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf.bak /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf + cp /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf /var/lib/pgsql/data cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf /var/lib/pgsql/data/ @@ -58,7 +67,6 @@ echo "DONE"` type PostgreSQLVersionUpgradeTemplateData struct { OldVersion string NewVersion string - Kubernetes bool } // Render will create PostgreSQL upgrade script. diff --git a/mgradm/shared/utils/cmd_utils.go b/mgradm/shared/utils/cmd_utils.go index 48f35b271..2d3cb7479 100644 --- a/mgradm/shared/utils/cmd_utils.go +++ b/mgradm/shared/utils/cmd_utils.go @@ -24,6 +24,31 @@ type HelmFlags struct { CertManager types.ChartFlags } +// VolumeFlags stores the persistent volume claims configuration. +type VolumesFlags struct { + // Class is the default storage class for all the persistent volume claims. + Class string + // Database is the configuration of the var-pgsql volume. + Database VolumeFlags + // Packages is the configuration of the var-spacewalk volume containing the synchronizede repositories. + Packages VolumeFlags + // Www is the configuration of the srv-www volume containing the imags and distributions. + Www VolumeFlags + // Cache is the configuration of the var-cache volume. + Cache VolumeFlags + // Mirror is the PersistentVolume name to use in case of a mirror setup. + // An empty value means no mirror will be used. + Mirror string +} + +// VolumeFlags is the configuration of one volume. +type VolumeFlags struct { + // Size is the requested size of the volume using kubernetes values like '100Gi'. + Size string + // Class is the storage class of the volume. + Class string +} + // SslCertFlags can store SSL Certs information. type SslCertFlags struct { Cnames []string `mapstructure:"cname"` @@ -74,6 +99,39 @@ func AddHelmInstallFlag(cmd *cobra.Command) { _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-values", "helm") } +const volumesFlagsGroupId = "volumes" + +// AddVolumesFlags adds the Kubernetes volumes configuration parameters to the command. +func AddVolumesFlags(cmd *cobra.Command) { + cmd.Flags().String("volumes-class", "", L("Default storage class for all the volumes")) + cmd.Flags().String("volumes-mirror", "", + L("PersistentVolume name to use as a mirror. Empty means no mirror is used"), + ) + + _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: volumesFlagsGroupId, Title: L("Volumes Configuration Flags")}) + _ = utils.AddFlagToHelpGroupID(cmd, "volumes-class", volumesFlagsGroupId) + _ = utils.AddFlagToHelpGroupID(cmd, "volumes-mirror", volumesFlagsGroupId) + + addVolumeFlags(cmd, "database", "var-pgsql", "50Gi") + addVolumeFlags(cmd, "packages", "var-spacewalk", "100Gi") + addVolumeFlags(cmd, "www", "srv-www", "100Gi") + addVolumeFlags(cmd, "cache", "var-cache", "10Gi") +} + +func addVolumeFlags(cmd *cobra.Command, name string, volumeName string, size string) { + sizeName := fmt.Sprintf("volumes-%s-size", name) + cmd.Flags().String( + sizeName, size, fmt.Sprintf(L("Requested size for the %s volume"), volumeName), + ) + _ = utils.AddFlagToHelpGroupID(cmd, sizeName, volumesFlagsGroupId) + + className := fmt.Sprintf("volumes-%s-class", name) + cmd.Flags().String( + className, "", fmt.Sprintf(L("Requested storage class for the %s volume"), volumeName), + ) + _ = utils.AddFlagToHelpGroupID(cmd, className, volumesFlagsGroupId) +} + // AddContainerImageFlags add container image flags to command. func AddContainerImageFlags( cmd *cobra.Command, diff --git a/mgradm/shared/utils/exec.go b/mgradm/shared/utils/exec.go index d3911e0fc..2f9f4dee1 100644 --- a/mgradm/shared/utils/exec.go +++ b/mgradm/shared/utils/exec.go @@ -48,11 +48,10 @@ func ExecCommand(logLevel zerolog.Level, cnx *shared.Connection, args ...string) } // GeneratePgsqlVersionUpgradeScript generates the PostgreSQL version upgrade script. -func GeneratePgsqlVersionUpgradeScript(scriptDir string, oldPgVersion string, newPgVersion string, kubernetes bool) (string, error) { +func GeneratePgsqlVersionUpgradeScript(scriptDir string, oldPgVersion string, newPgVersion string) (string, error) { data := templates.PostgreSQLVersionUpgradeTemplateData{ OldVersion: oldPgVersion, NewVersion: newPgVersion, - Kubernetes: kubernetes, } scriptName := "pgsqlVersionUpgrade.sh" diff --git a/mgrpxy/shared/kubernetes/deploy.go b/mgrpxy/shared/kubernetes/deploy.go index 8d208e5a6..3451a0fb4 100644 --- a/mgrpxy/shared/kubernetes/deploy.go +++ b/mgrpxy/shared/kubernetes/deploy.go @@ -86,7 +86,7 @@ func Deploy(imageFlags *utils.ProxyImageFlags, helmFlags *HelmFlags, configDir s "--set", "images.proxy-tftpd="+imageFlags.GetContainerImage("tftpd"), "--set", "repository="+imageFlags.Registry, "--set", "version="+imageFlags.Tag, - "--set", "pullPolicy="+kubernetes.GetPullPolicy(imageFlags.PullPolicy)) + "--set", "pullPolicy="+string(kubernetes.GetPullPolicy(imageFlags.PullPolicy))) helmParams = append(helmParams, helmArgs...) diff --git a/shared/kubernetes/apply.go b/shared/kubernetes/apply.go new file mode 100644 index 000000000..04889de25 --- /dev/null +++ b/shared/kubernetes/apply.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "os" + "path" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" +) + +// Apply runs kubectl apply for the provided objects. +// +// The message should be a user-friendly localized message to provide in case of error. +func Apply[T runtime.Object](objects []T, message string) error { + tempDir, err := utils.TempDir() + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + // Run the job + definitionPath := path.Join(tempDir, "definition.yaml") + if err := YamlFile(objects, definitionPath); err != nil { + return err + } + + if err := utils.RunCmdStdMapping(zerolog.DebugLevel, "kubectl", "apply", "-f", definitionPath); err != nil { + return utils.Errorf(err, message) + } + return nil +} + +// YamlFile generates a YAML file from a list of kubernetes objects. +func YamlFile[T runtime.Object](objects []T, path string) error { + printer := printers.YAMLPrinter{} + file, err := os.Create(path) + if err != nil { + return utils.Errorf(err, L("failed to create %s YAML file"), path) + } + defer func() { + if err := file.Close(); err != nil { + log.Error().Err(err).Msgf(L("failed to close %s YAML file"), path) + } + }() + + for _, obj := range objects { + err = printer.PrintObj(obj, file) + if err != nil { + return utils.Errorf(err, L("failed to write PVC to file")) + } + } + + return nil +} diff --git a/shared/kubernetes/converters.go b/shared/kubernetes/converters.go new file mode 100644 index 000000000..e4cc663c7 --- /dev/null +++ b/shared/kubernetes/converters.go @@ -0,0 +1,43 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/types" + core "k8s.io/api/core/v1" +) + +// ConvertVolumeMounts converts the internal volume mounts into Kubernetes' ones. +func ConvertVolumeMounts(mounts []types.VolumeMount) []core.VolumeMount { + res := []core.VolumeMount{} + + for _, mount := range mounts { + converted := core.VolumeMount{ + Name: mount.Name, + MountPath: mount.MountPath, + } + res = append(res, converted) + } + + return res +} + +// ConvertPortMaps converts the internal port maps to Kubernetes ContainerPorts. +func ConvertPortMaps(ports []types.PortMap) []core.ContainerPort { + res := []core.ContainerPort{} + + for _, port := range ports { + protocol := core.ProtocolTCP + if port.Protocol == "UDP" { + protocol = core.ProtocolUDP + } + converted := core.ContainerPort{ + ContainerPort: int32(port.Exposed), + Protocol: protocol, + } + res = append(res, converted) + } + return res +} diff --git a/shared/kubernetes/job.go b/shared/kubernetes/job.go new file mode 100644 index 000000000..ef207a8b7 --- /dev/null +++ b/shared/kubernetes/job.go @@ -0,0 +1,68 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "strings" + + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetScriptJob prepares the definition of a kubernetes job running a shell script from a template. +func GetScriptJob( + namespace string, + name string, + image string, + pullPolicy string, + mounts []types.VolumeMount, + template utils.Template, +) (*batch.Job, error) { + var maxFailures int32 = 0 + + // Convert our mounts to Kubernetes objects + volumeMounts := ConvertVolumeMounts(mounts) + volumes := CreateVolumes(mounts) + + // Prepare the script + scriptBuilder := new(strings.Builder) + if err := template.Render(scriptBuilder); err != nil { + return nil, err + } + + // Create the job object running the script wrapped as a sh command + job := batch.Job{ + TypeMeta: meta.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: batch.JobSpec{ + Template: core.PodTemplateSpec{ + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "runner", + Image: image, + ImagePullPolicy: GetPullPolicy(pullPolicy), + Command: []string{"sh", "-c", scriptBuilder.String()}, + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + RestartPolicy: core.RestartPolicyNever, + }, + }, + BackoffLimit: &maxFailures, + }, + } + + return &job, nil +} diff --git a/shared/kubernetes/pvc.go b/shared/kubernetes/pvc.go new file mode 100644 index 000000000..24ae40794 --- /dev/null +++ b/shared/kubernetes/pvc.go @@ -0,0 +1,238 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// CreatePersistentVolumeClaims creates all the PVCs described by the mounts. +func CreatePersistentVolumeClaims( + namespace string, + mounts []types.VolumeMount, +) error { + pvcs := GetPersistentVolumeClaims( + namespace, + "", + core.ReadWriteOnce, + false, + mounts, + ) + return Apply(pvcs, L("failed to run the persistent volume claims")) +} + +// Contains the data extracted from the PV to create the linked PVC for it. +type pvData struct { + ClaimRef struct { + Name string + Namespace string + } + StorageClass string + AccessModes []core.PersistentVolumeAccessMode + Size string +} + +// CreatePersistentVolumeClaimForVolume creates a PVC bound to a specific Volume. +func CreatePersistentVolumeClaimForVolume( + namespace string, + volumeName string, +) error { + // Get the PV Storage class and claimRef + out, err := utils.RunCmdOutput(zerolog.DebugLevel, + "kubectl", "get", "pv", volumeName, "-n", namespace, + "-o", `jsonpath={"{\"claimRef\": "}{.spec.claimRef}, "storageClass": "{.spec.storageClassName}", `+ + `"accessModes": {.spec.accessModes}, "size": "{.spec.capacity.storage}{"\"}"}`, + ) + if err != nil { + return err + } + var pv pvData + if err := json.Unmarshal(out, &pv); err != nil { + return utils.Errorf(err, L("failed to parse pv data")) + } + + // Ensure the claimRef of the volume is for our PVC + if pv.ClaimRef.Name != volumeName && pv.ClaimRef.Namespace != namespace { + return fmt.Errorf(L("the %s volume should reference the %s claim in %s namespace"), volumeName, namespace) + } + + // Create the PVC object + pvc := newPersistentVolumeClaim(namespace, volumeName, pv.StorageClass, pv.Size, pv.AccessModes, false) + + return Apply([]runtime.Object{&pvc}, L("failed to run the persistent volume claims")) +} + +// GetPersistentVolumeClaims creates the PVC objects matching a list of volume mounts. +func GetPersistentVolumeClaims( + namespace string, + storageClass string, + accessMode core.PersistentVolumeAccessMode, + matchPvByLabel bool, + mounts []types.VolumeMount, +) []*core.PersistentVolumeClaim { + var claims []*core.PersistentVolumeClaim + + for _, mount := range mounts { + size := mount.Size + if size == "" { + log.Warn().Msgf(L("no size defined for PersistentVolumeClaim %s, using 10Mi as default"), mount.Name) + size = "10Mi" + } + pv := newPersistentVolumeClaim( + namespace, mount.Name, storageClass, size, []core.PersistentVolumeAccessMode{accessMode}, matchPvByLabel, + ) + claims = append(claims, &pv) + } + + return claims +} + +// Creates a PVC from a few common values. +func newPersistentVolumeClaim( + namespace string, + name string, + storageClass string, + size string, + accessModes []core.PersistentVolumeAccessMode, + matchPvByLabel bool, +) core.PersistentVolumeClaim { + pvc := core.PersistentVolumeClaim{ + TypeMeta: v1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: core.PersistentVolumeClaimSpec{ + AccessModes: accessModes, + Resources: core.VolumeResourceRequirements{ + Requests: core.ResourceList{"storage": resource.MustParse(size)}, + }, + }, + } + + if storageClass != "" { + pvc.Spec.StorageClassName = &storageClass + } + + if matchPvByLabel { + pvc.Spec.Selector = &v1.LabelSelector{ + MatchLabels: map[string]string{"data": name}, + } + } + + return pvc +} + +func createMount(mountPath string) core.VolumeMount { + pattern := regexp.MustCompile("[^a-zA-Z]+") + name := strings.Trim(pattern.ReplaceAllString(mountPath, "-"), "-") + return core.VolumeMount{ + MountPath: mountPath, + Name: name, + } +} + +// CreateTmpfsMount creates a temporary volume and its mount. +func CreateTmpfsMount(mountPath string, size string) (core.VolumeMount, core.Volume) { + mount := createMount(mountPath) + + parsedSize := resource.MustParse(size) + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + EmptyDir: &core.EmptyDirVolumeSource{ + Medium: core.StorageMediumMemory, + SizeLimit: &parsedSize, + }, + }, + } + return mount, volume +} + +// CreateHostPathMount creates the mount and volume for a host path. +// This is not secure and tied to the availability on the node, only use when needed. +func CreateHostPathMount( + mountPath string, + hostPath string, + sourceType core.HostPathType, +) (core.VolumeMount, core.Volume) { + mount := createMount(mountPath) + + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: hostPath, + Type: &sourceType, + }, + }, + } + return mount, volume +} + +// CreateSecretMount creates the volume for a secret. +func CreateSecretVolume(name string, secretName string) core.Volume { + volume := core.Volume{ + Name: name, + VolumeSource: core.VolumeSource{ + Secret: &core.SecretVolumeSource{ + SecretName: secretName, + }, + }, + } + + return volume +} + +// CreateConfigVolume creates the volume for a ConfigMap. +func CreateConfigVolume(name string, configMapName string) core.Volume { + volume := core.Volume{ + Name: name, + VolumeSource: core.VolumeSource{ + ConfigMap: &core.ConfigMapVolumeSource{ + LocalObjectReference: core.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + } + + return volume +} + +// CreateVolumes creates PVC-based volumes matching the internal volumes mounts. +func CreateVolumes(mounts []types.VolumeMount) []core.Volume { + volumes := []core.Volume{} + + for _, mount := range mounts { + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ + ClaimName: mount.Name, + }, + }, + } + volumes = append(volumes, volume) + } + + return volumes +} diff --git a/shared/kubernetes/utils.go b/shared/kubernetes/utils.go index 844793f43..76d0c7f21 100644 --- a/shared/kubernetes/utils.go +++ b/shared/kubernetes/utils.go @@ -17,6 +17,7 @@ import ( . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" ) // ServerApp represent the server app name. @@ -234,12 +235,12 @@ func addNamespace(args []string, namespace string) []string { return args } -// GetPullPolicy return pullpolicy in lower case, if exists. -func GetPullPolicy(name string) string { - policies := map[string]string{ - "always": "Always", - "never": "Never", - "ifnotpresent": "IfNotPresent", +// GetPullPolicy returns the kubernetes PullPolicy value, if exists. +func GetPullPolicy(name string) core.PullPolicy { + policies := map[string]core.PullPolicy{ + "always": core.PullAlways, + "never": core.PullNever, + "ifnotpresent": core.PullIfNotPresent, } policy := policies[strings.ToLower(name)] if policy == "" { diff --git a/shared/kubernetes/waiters.go b/shared/kubernetes/waiters.go new file mode 100644 index 000000000..68f06acc5 --- /dev/null +++ b/shared/kubernetes/waiters.go @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "strings" + "time" + + "github.com/rs/zerolog" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// WaitForSecret waits for a secret to be available. +func WaitForSecret(namespace string, secret string) { + for i := 0; ; i++ { + if err := utils.RunCmd("kubectl", "get", "-n", namespace, "secret", secret); err == nil { + break + } + time.Sleep(1 * time.Second) + } +} + +// WaitForJob waits for a job to be completed before timeout seconds. +// +// If the timeout value is 0 the job will be awaited for for ever. +func WaitForJob(namespace string, name string, timeout int) error { + for i := 0; ; i++ { + status, err := jobStatus(namespace, name) + if err != nil { + return err + } + if status == "error" { + return fmt.Errorf( + L("%[1]s job failed, run kubectl logs -n %[2]s -ljob-name=%[1]s for details"), + name, namespace, + ) + } + if status == "success" { + return nil + } + + if timeout > 0 && i == timeout { + return fmt.Errorf(L("%[1]s job failed to complete within %[2]d seconds"), name, timeout) + } + time.Sleep(1 * time.Second) + } +} + +func jobStatus(namespace string, name string) (string, error) { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "job", "-n", namespace, name, + "-o", "jsonpath={.status.succeeded},{.status.failed}", + ) + if err != nil { + return "", utils.Errorf(err, L("failed to get %s job status"), name) + } + results := strings.SplitN(strings.TrimSpace(string(out)), ",", 2) + if len(results) != 2 { + return "", fmt.Errorf(L("invalid job status response: '%s'"), string(out)) + } + if results[0] == "1" { + return "success", nil + } else if results[1] == "1" { + return "error", nil + } + return "", nil +} diff --git a/shared/types/deployment.go b/shared/types/deployment.go index 67606f33e..2bf35596f 100644 --- a/shared/types/deployment.go +++ b/shared/types/deployment.go @@ -7,6 +7,8 @@ package types type VolumeMount struct { MountPath string `json:"mountPath,omitempty"` Name string `json:"name,omitempty"` + Size string `json:"size,omitempty"` + Class string `json:"class,omitempty"` } // Container type used for mapping pod definition structure. diff --git a/shared/utils/exec.go b/shared/utils/exec.go index ffd5dc61d..57c8fe682 100644 --- a/shared/utils/exec.go +++ b/shared/utils/exec.go @@ -5,6 +5,8 @@ package utils import ( + "bytes" + "errors" "fmt" "os/exec" "strings" @@ -64,11 +66,18 @@ func RunCmdOutput(logLevel zerolog.Level, command string, args ...string) ([]byt s.Start() // Start the spinner } localLogger.Debug().Msgf("Running: %s %s", command, strings.Join(args, " ")) - output, err := exec.Command(command, args...).Output() + cmd := exec.Command(command, args...) + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + output, err := cmd.Output() if logLevel != zerolog.Disabled { s.Stop() } localLogger.Trace().Msgf("Command output: %s, error: %s", output, err) + message := strings.TrimSpace(errBuf.String()) + if message != "" { + err = errors.New(message) + } return output, err } diff --git a/shared/utils/inspector.go b/shared/utils/inspector.go index addf963c6..99f45a3fb 100644 --- a/shared/utils/inspector.go +++ b/shared/utils/inspector.go @@ -83,6 +83,13 @@ func ReadInspectData[T any](dataFile string) (*T, error) { return nil, Errorf(err, L("cannot read file %s"), dataFile) } + return ReadInspectDataString[T](data) +} + +// ReadInspectDataString returns an unmarshalled object of type T from the data as a string. +// +// This function is most likely to be used for the implementation of the inspectors, but can also be used directly. +func ReadInspectDataString[T any](data []byte) (*T, error) { viper.SetConfigType("env") if err := viper.MergeConfig(bytes.NewBuffer(data)); err != nil { return nil, Errorf(err, L("cannot read config")) diff --git a/shared/utils/utils.go b/shared/utils/utils.go index d942857cb..4bc0dde09 100644 --- a/shared/utils/utils.go +++ b/shared/utils/utils.go @@ -42,6 +42,7 @@ type InspectResult struct { CommonInspectData `mapstructure:",squash"` Timezone string HasHubXmlrpcApi bool `mapstructure:"has_hubxmlrpc"` + Debug bool `mapstructure:"debug"` } func checkValueSize(value string, min int, max int) bool { @@ -287,6 +288,15 @@ func UninstallFile(path string, dryRun bool) { } } +// TempDir creates a temporary directory. +func TempDir() (string, error) { + tempDir, err := os.MkdirTemp("", "mgradm-*") + if err != nil { + return "", Errorf(err, L("failed to create temporary directory")) + } + return tempDir, nil +} + // GetRandomBase64 generates random base64-encoded data. func GetRandomBase64(size int) string { data := make([]byte, size) diff --git a/shared/utils/volumes.go b/shared/utils/volumes.go index 1b8f48fb7..c172a2a0d 100644 --- a/shared/utils/volumes.go +++ b/shared/utils/volumes.go @@ -8,9 +8,9 @@ import "github.com/uyuni-project/uyuni-tools/shared/types" // PgsqlRequiredVolumeMounts represents volumes mount used by PostgreSQL. var PgsqlRequiredVolumeMounts = []types.VolumeMount{ - {MountPath: "/etc/pki/tls", Name: "etc-tls"}, - {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, - {MountPath: "/etc/rhn", Name: "etc-rhn"}, + {MountPath: "/etc/pki/tls", Name: "etc-tls", Size: "1Mi"}, + {MountPath: "/var/lib/pgsql", Name: "var-pgsql", Size: "50Gi"}, + {MountPath: "/etc/rhn", Name: "etc-rhn", Size: "1Mi"}, {MountPath: "/etc/pki/spacewalk-tls", Name: "tls-key"}, } @@ -29,18 +29,17 @@ var PgsqlRequiredVolumes = []types.Volume{ }, } -// EtcServerVolumeMounts represents volumes mounted in /etc folder. -var EtcServerVolumeMounts = []types.VolumeMount{ - {MountPath: "/etc/apache2", Name: "etc-apache2"}, - {MountPath: "/etc/systemd/system/multi-user.target.wants", Name: "etc-systemd-multi"}, - {MountPath: "/etc/systemd/system/sockets.target.wants", Name: "etc-systemd-sockets"}, - {MountPath: "/etc/salt", Name: "etc-salt"}, - {MountPath: "/etc/rhn", Name: "etc-rhn"}, - {MountPath: "/etc/tomcat", Name: "etc-tomcat"}, - {MountPath: "/etc/cobbler", Name: "etc-cobbler"}, - {MountPath: "/etc/sysconfig", Name: "etc-sysconfig"}, - {MountPath: "/etc/postfix", Name: "etc-postfix"}, - {MountPath: "/etc/sssd", Name: "etc-sssd"}, +// etcServerVolumeMounts represents volumes mounted in /etc folder. +var etcServerVolumeMounts = []types.VolumeMount{ + {MountPath: "/etc/apache2", Name: "etc-apache2", Size: "1Mi"}, + {MountPath: "/etc/systemd/system/multi-user.target.wants", Name: "etc-systemd-multi", Size: "1Mi"}, + {MountPath: "/etc/systemd/system/sockets.target.wants", Name: "etc-systemd-sockets", Size: "1Mi"}, + {MountPath: "/etc/salt", Name: "etc-salt", Size: "1Mi"}, + {MountPath: "/etc/tomcat", Name: "etc-tomcat", Size: "1Mi"}, + {MountPath: "/etc/cobbler", Name: "etc-cobbler", Size: "1Mi"}, + {MountPath: "/etc/sysconfig", Name: "etc-sysconfig", Size: "20Mi"}, + {MountPath: "/etc/postfix", Name: "etc-postfix", Size: "1Mi"}, + {MountPath: "/etc/sssd", Name: "etc-sssd", Size: "1Mi"}, } // EtcServerVolumes represents volumes used for configuration. @@ -57,26 +56,26 @@ var EtcServerVolumes = []types.Volume{ {Name: "etc-sssd", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-sssd"}}, } -var etcAndPgsqlVolumeMounts = append(PgsqlRequiredVolumeMounts, EtcServerVolumeMounts[:]...) +var etcAndPgsqlVolumeMounts = append(PgsqlRequiredVolumeMounts, etcServerVolumeMounts[:]...) var etcAndPgsqlVolumes = append(PgsqlRequiredVolumes, EtcServerVolumes[:]...) // ServerVolumeMounts should match the volumes mapping from the container definition in both // the helm chart and the systemctl services definitions. var ServerVolumeMounts = append([]types.VolumeMount{ - {MountPath: "/var/lib/cobbler", Name: "var-cobbler"}, - {MountPath: "/var/lib/salt", Name: "var-salt"}, - {MountPath: "/var/cache", Name: "var-cache"}, - {MountPath: "/var/spacewalk", Name: "var-spacewalk"}, - {MountPath: "/var/log", Name: "var-log"}, - {MountPath: "/srv/salt", Name: "srv-salt"}, - {MountPath: "/srv/www/", Name: "srv-www"}, - {MountPath: "/srv/tftpboot", Name: "srv-tftpboot"}, - {MountPath: "/srv/formula_metadata", Name: "srv-formulametadata"}, - {MountPath: "/srv/pillar", Name: "srv-pillar"}, - {MountPath: "/srv/susemanager", Name: "srv-susemanager"}, - {MountPath: "/srv/spacewalk", Name: "srv-spacewalk"}, - {MountPath: "/root", Name: "root"}, - {MountPath: "/etc/pki/trust/anchors", Name: "ca-cert"}, + {MountPath: "/var/lib/cobbler", Name: "var-cobbler", Size: "10Mi"}, + {MountPath: "/var/lib/salt", Name: "var-salt", Size: "10Mi"}, + {MountPath: "/var/cache", Name: "var-cache", Size: "10Gi"}, + {MountPath: "/var/spacewalk", Name: "var-spacewalk", Size: "100Gi"}, + {MountPath: "/var/log", Name: "var-log", Size: "2Gi"}, + {MountPath: "/srv/salt", Name: "srv-salt", Size: "10Mi"}, + {MountPath: "/srv/www/", Name: "srv-www", Size: "100Gi"}, + {MountPath: "/srv/tftpboot", Name: "srv-tftpboot", Size: "300Mi"}, + {MountPath: "/srv/formula_metadata", Name: "srv-formulametadata", Size: "10Mi"}, + {MountPath: "/srv/pillar", Name: "srv-pillar", Size: "10Mi"}, + {MountPath: "/srv/susemanager", Name: "srv-susemanager", Size: "1Mi"}, + {MountPath: "/srv/spacewalk", Name: "srv-spacewalk", Size: "10Mi"}, + {MountPath: "/root", Name: "root", Size: "1Mi"}, + {MountPath: "/etc/pki/trust/anchors/", Name: "ca-cert"}, }, etcAndPgsqlVolumeMounts[:]...) // ServerVolumes match the volume with Persistent Volume Claim.