From ff14117f809e10fb8a0865c433b31cdde36a8e60 Mon Sep 17 00:00:00 2001 From: Vanessasaurus <814322+vsoch@users.noreply.github.com> Date: Wed, 26 Jul 2023 20:35:52 -0600 Subject: [PATCH] add goshare example (#195) * add goshare example * remove pre_command from test files * add missing start template this is working between two containers, but not from flux run Signed-off-by: vsoch --- api/v1alpha1/minicluster_types.go | 34 +- api/v1alpha1/swagger.json | 18 +- api/v1alpha1/zz_generated.deepcopy.go | 7 +- api/v1alpha1/zz_generated.openapi.go | 31 +- chart/templates/minicluster-crd.yaml | 27 +- .../flux-framework.org_miniclusters.yaml | 27 +- controllers/flux/containers.go | 16 + controllers/flux/job.go | 15 +- controllers/flux/minicluster.go | 27 +- controllers/flux/templates.go | 5 +- controllers/flux/templates/start.sh | 19 + controllers/flux/templates/wait.sh | 3 - controllers/flux/volumes.go | 10 + docs/development/debugging.md | 19 + .../custom-resource-definition.md | 17 +- docs/tutorials/index.md | 3 + examples/dist/flux-operator-arm.yaml | 27 +- examples/dist/flux-operator.yaml | 27 +- .../shared-process-space/README.md | 57 ++ .../shared-process-space/minicluster.yaml | 74 ++ .../tests/osu-benchmarks/minicluster.yaml | 7 +- examples/tests/snakemake/minicluster.yaml | 11 +- sdk/python/v1alpha1/.openapi-generator/FILES | 9 + .../v1alpha1/docs/MiniClusterContainer.md | 1 - sdk/python/v1alpha1/docs/MiniClusterSpec.md | 1 + sdk/python/v1alpha1/docs/SecurityContext.md | 1 + .../models/mini_cluster_container.py | 30 +- .../fluxoperator/models/mini_cluster_spec.py | 30 +- .../fluxoperator/models/security_context.py | 30 +- sdk/python/v1alpha1/test/test_mini_cluster.py | 330 +++++---- .../test/test_mini_cluster_archive.py | 21 +- .../test/test_mini_cluster_container.py | 140 ++-- .../test/test_mini_cluster_existing_volume.py | 31 +- .../v1alpha1/test/test_mini_cluster_list.py | 658 ++++++++++-------- .../v1alpha1/test/test_mini_cluster_spec.py | 535 +++++++------- .../v1alpha1/test/test_mini_cluster_status.py | 30 +- .../v1alpha1/test/test_mini_cluster_user.py | 21 +- .../v1alpha1/test/test_mini_cluster_volume.py | 50 +- 38 files changed, 1458 insertions(+), 941 deletions(-) create mode 100644 controllers/flux/templates/start.sh create mode 100644 examples/experimental/shared-process-space/README.md create mode 100755 examples/experimental/shared-process-space/minicluster.yaml diff --git a/api/v1alpha1/minicluster_types.go b/api/v1alpha1/minicluster_types.go index 607844e3..b7d7f7b8 100644 --- a/api/v1alpha1/minicluster_types.go +++ b/api/v1alpha1/minicluster_types.go @@ -68,6 +68,10 @@ type MiniClusterSpec struct { // +optional Archive MiniClusterArchive `json:"archive"` + // Share process namespace? + // +optional + ShareProcessNamespace bool `json:"shareProcessNamespace"` + // Customization to Flux Restful API // There should only be one container to run flux with runFlux // +optional @@ -548,13 +552,6 @@ type MiniClusterContainer struct { // +optional ExistingVolumes map[string]MiniClusterExistingVolume `json:"existingVolumes"` - // Special command to run at beginning of script, directly after asFlux - // is defined as sudo -u flux -E (so you can change that if desired.) - // This is only valid if FluxRunner is set (that writes a wait.sh script) - // This is for the indexed job pods and the certificate generation container. - // +optional - PreCommand string `json:"preCommand"` - // Lifecycle can handle post start commands, etc. // +optional LifeCycle LifeCycle `json:"lifeCycle"` @@ -578,6 +575,10 @@ type SecurityContext struct { // Privileged container // +optional Privileged bool `json:"privileged,omitempty"` + + // Capabilities to add + // +optional + AddCapabilities []string `json:"addCapabilities,omitempty"` } type LifeCycle struct { @@ -680,6 +681,17 @@ func (f *MiniCluster) MultiUser() bool { return len(f.Spec.Users) > 0 } +// Determine if a MiniCluster container has custom commands +// if we have custom commands and a command entrypoint we can support additional custom logic +func (c *MiniClusterContainer) HasCommands() bool { + return c.Commands.Pre != "" || c.Commands.BrokerPre != "" || c.Commands.WorkerPre != "" || c.Commands.Init != "" || c.Commands.Post != "" +} + +// Determine if we should generate a start.sh entrypoint for a sidecar +func (c *MiniClusterContainer) GenerateEntrypoint() bool { + return c.HasCommands() && !c.RunFlux && c.Command != "" +} + // Return a lookup of all container existing volumes (for the higher level Pod) // Volumes are unique by name. func (f *MiniCluster) ExistingContainerVolumes() map[string]MiniClusterExistingVolume { @@ -766,7 +778,7 @@ func (f *MiniCluster) Validate() bool { fmt.Printf("😥️ Service containers always require a name.\n") return false } - if service.PreCommand != "" || service.Commands.Pre != "" || + if service.Commands.Pre != "" || service.Commands.BrokerPre != "" || service.Commands.WorkerPre != "" { fmt.Printf("😥️ Services do not support Commands.\n") return false @@ -829,6 +841,12 @@ func (f *MiniCluster) Validate() bool { fmt.Printf("🤓 %s.Command %s\n", name, container.Command) fmt.Printf("🤓 %s.FluxRunner %t\n", name, container.RunFlux) + // A non-flux runner container with any commands also needs a command + // Don't allow the user to specify commands without a main command! + if !container.RunFlux && container.HasCommands() && container.Command == "" { + fmt.Printf("😥️ %s has commands, but not a main entrypoint command. Both are required to customize entrypoint logic..\n", name) + return false + } // Launcher mode does not work with batch if container.Launcher && container.Batch { fmt.Printf("😥️ %s is indicated for batch and launcher, choose one.\n", name) diff --git a/api/v1alpha1/swagger.json b/api/v1alpha1/swagger.json index 437d87bb..aa94c1c7 100644 --- a/api/v1alpha1/swagger.json +++ b/api/v1alpha1/swagger.json @@ -436,11 +436,6 @@ }, "x-kubernetes-list-type": "atomic" }, - "preCommand": { - "description": "Special command to run at beginning of script, directly after asFlux is defined as sudo -u flux -E (so you can change that if desired.) This is only valid if FluxRunner is set (that writes a wait.sh script) This is for the indexed job pods and the certificate generation container.", - "type": "string", - "default": "" - }, "pullAlways": { "description": "Allow the user to dictate pulling By default we pull if not present. Setting this to true will indicate to pull always", "type": "boolean", @@ -630,6 +625,11 @@ }, "x-kubernetes-list-type": "atomic" }, + "shareProcessNamespace": { + "description": "Share process namespace?", + "type": "boolean", + "default": false + }, "size": { "description": "Size (number of job pods to run, size of minicluster in pods) This is also the minimum number required to start Flux", "type": "integer", @@ -873,6 +873,14 @@ "SecurityContext": { "type": "object", "properties": { + "addCapabilities": { + "description": "Capabilities to add", + "type": "array", + "items": { + "type": "string", + "default": "" + } + }, "privileged": { "description": "Privileged container", "type": "boolean" diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1282320c..f4da6827 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -309,7 +309,7 @@ func (in *MiniClusterContainer) DeepCopyInto(out *MiniClusterContainer) { out.LifeCycle = in.LifeCycle in.Resources.DeepCopyInto(&out.Resources) out.Commands = in.Commands - out.SecurityContext = in.SecurityContext + in.SecurityContext.DeepCopyInto(&out.SecurityContext) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MiniClusterContainer. @@ -586,6 +586,11 @@ func (in *Secret) DeepCopy() *Secret { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = *in + if in.AddCapabilities != nil { + in, out := &in.AddCapabilities, &out.AddCapabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. diff --git a/api/v1alpha1/zz_generated.openapi.go b/api/v1alpha1/zz_generated.openapi.go index f7fbcf30..4edeb516 100644 --- a/api/v1alpha1/zz_generated.openapi.go +++ b/api/v1alpha1/zz_generated.openapi.go @@ -829,14 +829,6 @@ func schema__api_v1alpha1__MiniClusterContainer(ref common.ReferenceCallback) co }, }, }, - "preCommand": { - SchemaProps: spec.SchemaProps{ - Description: "Special command to run at beginning of script, directly after asFlux is defined as sudo -u flux -E (so you can change that if desired.) This is only valid if FluxRunner is set (that writes a wait.sh script) This is for the indexed job pods and the certificate generation container.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, "lifeCycle": { SchemaProps: spec.SchemaProps{ Description: "Lifecycle can handle post start commands, etc.", @@ -1117,6 +1109,14 @@ func schema__api_v1alpha1__MiniClusterSpec(ref common.ReferenceCallback) common. Ref: ref("./api/v1alpha1/.MiniClusterArchive"), }, }, + "shareProcessNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "Share process namespace?", + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, "fluxRestful": { SchemaProps: spec.SchemaProps{ Description: "Customization to Flux Restful API There should only be one container to run flux with runFlux", @@ -1560,6 +1560,21 @@ func schema__api_v1alpha1__SecurityContext(ref common.ReferenceCallback) common. Format: "", }, }, + "addCapabilities": { + SchemaProps: spec.SchemaProps{ + Description: "Capabilities to add", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, diff --git a/chart/templates/minicluster-crd.yaml b/chart/templates/minicluster-crd.yaml index b38ab798..052ee7fb 100644 --- a/chart/templates/minicluster-crd.yaml +++ b/chart/templates/minicluster-crd.yaml @@ -187,13 +187,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, directly - after asFlux is defined as sudo -u flux -E (so you can change - that if desired.) This is only valid if FluxRunner is set (that - writes a wait.sh script) This is for the indexed job pods and - the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we pull @@ -242,6 +235,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -617,13 +615,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, directly - after asFlux is defined as sudo -u flux -E (so you can change - that if desired.) This is only valid if FluxRunner is set (that - writes a wait.sh script) This is for the indexed job pods and - the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we pull @@ -672,6 +663,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -697,6 +693,9 @@ spec: type: object type: array x-kubernetes-list-type: atomic + shareProcessNamespace: + description: Share process namespace? + type: boolean size: default: 1 description: Size (number of job pods to run, size of minicluster in diff --git a/config/crd/bases/flux-framework.org_miniclusters.yaml b/config/crd/bases/flux-framework.org_miniclusters.yaml index ea0a10eb..4afcc7f2 100644 --- a/config/crd/bases/flux-framework.org_miniclusters.yaml +++ b/config/crd/bases/flux-framework.org_miniclusters.yaml @@ -187,13 +187,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, - directly after asFlux is defined as sudo -u flux -E (so you - can change that if desired.) This is only valid if FluxRunner - is set (that writes a wait.sh script) This is for the indexed - job pods and the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we @@ -243,6 +236,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -622,13 +620,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, - directly after asFlux is defined as sudo -u flux -E (so you - can change that if desired.) This is only valid if FluxRunner - is set (that writes a wait.sh script) This is for the indexed - job pods and the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we @@ -678,6 +669,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -704,6 +700,9 @@ spec: type: object type: array x-kubernetes-list-type: atomic + shareProcessNamespace: + description: Share process namespace? + type: boolean size: default: 1 description: Size (number of job pods to run, size of minicluster diff --git a/controllers/flux/containers.go b/controllers/flux/containers.go index 871b26bd..2014cc24 100644 --- a/controllers/flux/containers.go +++ b/controllers/flux/containers.go @@ -51,6 +51,13 @@ func (r *MiniClusterReconciler) getContainers( containerName = defaultName } + // A container not running flux can only have pre/post sections + // in a custom script if we know the entrypoint. + if container.GenerateEntrypoint() { + startScript := fmt.Sprintf("/flux_operator/start-%d.sh", i) + command = []string{"/bin/bash", startScript, container.Command} + } + // Prepare lifescycle commands for the container lifecycle := r.createContainerLifecycle(container) @@ -81,8 +88,17 @@ func (r *MiniClusterReconciler) getContainers( if err != nil { return containers, err } + + addCaps := []corev1.Capability{} + for _, cap := range container.SecurityContext.AddCapabilities { + addCaps = append(addCaps, corev1.Capability(cap)) + } + securityContext := corev1.SecurityContext{ Privileged: &container.SecurityContext.Privileged, + Capabilities: &corev1.Capabilities{ + Add: addCaps, + }, } newContainer := corev1.Container{ diff --git a/controllers/flux/job.go b/controllers/flux/job.go index d8c46990..376e8ce3 100644 --- a/controllers/flux/job.go +++ b/controllers/flux/job.go @@ -61,13 +61,14 @@ func (r *MiniClusterReconciler) newMiniClusterJob( }, Spec: corev1.PodSpec{ // matches the service - Subdomain: cluster.Spec.Network.HeadlessName, - SetHostnameAsFQDN: &setAsFQDN, - Volumes: getVolumes(cluster), - RestartPolicy: corev1.RestartPolicyOnFailure, - ImagePullSecrets: getImagePullSecrets(cluster), - ServiceAccountName: cluster.Spec.Pod.ServiceAccountName, - NodeSelector: cluster.Spec.Pod.NodeSelector, + Subdomain: cluster.Spec.Network.HeadlessName, + ShareProcessNamespace: &cluster.Spec.ShareProcessNamespace, + SetHostnameAsFQDN: &setAsFQDN, + Volumes: getVolumes(cluster), + RestartPolicy: corev1.RestartPolicyOnFailure, + ImagePullSecrets: getImagePullSecrets(cluster), + ServiceAccountName: cluster.Spec.Pod.ServiceAccountName, + NodeSelector: cluster.Spec.Pod.NodeSelector, }}, }, } diff --git a/controllers/flux/minicluster.go b/controllers/flux/minicluster.go index 9f3f8fb1..9de8dde6 100644 --- a/controllers/flux/minicluster.go +++ b/controllers/flux/minicluster.go @@ -54,7 +54,7 @@ func (r *MiniClusterReconciler) ensureMiniCluster( return result, err } - // Add initial config map with entrypoint scripts (wait.sh, start.sh, empty update_hosts.sh) + // Add initial config map with entrypoint scripts (wait.sh, start.sh, etc.) _, result, err = r.getConfigMap(ctx, cluster, "entrypoint", cluster.Name+entrypointSuffix) if err != nil { return result, err @@ -348,13 +348,25 @@ func (r *MiniClusterReconciler) getConfigMap( // meaning a Flux Runner. for i, container := range cluster.Spec.Containers { if container.RunFlux { + r.log.Info("✨ Generating main flux container wait.sh entrypoint ✨") waitScriptID := fmt.Sprintf("wait-%d", i) - waitScript, err := generateWaitScript(cluster, i) + waitScript, err := generateEntrypointScript(cluster, i, "wait-sh", waitToStartTemplate) if err != nil { return existing, ctrl.Result{}, err } data[waitScriptID] = waitScript } + + // Custom logic for a sidecar container alongside flux + if container.GenerateEntrypoint() { + r.log.Info("✨ Generating sidecar container start.sh entrypoint ✨") + startScriptID := fmt.Sprintf("start-%d", i) + startScript, err := generateEntrypointScript(cluster, i, "start-sh", sidecarStartTemplate) + if err != nil { + return existing, ctrl.Result{}, err + } + data[startScriptID] = startScript + } } } @@ -487,8 +499,13 @@ func getRequiredRanks(cluster *api.MiniCluster) string { return generateRange(cluster.Spec.Size, 0) } -// generateWaitScript generates the main script to start everything up! -func generateWaitScript(cluster *api.MiniCluster, containerIndex int) (string, error) { +// generateEntrypointScript generates an entrypoint script to start everything up! +func generateEntrypointScript( + cluster *api.MiniCluster, + containerIndex int, + templateName string, + templateScriptName string, +) (string, error) { container := cluster.Spec.Containers[containerIndex] mainHost := fmt.Sprintf("%s-0", cluster.Name) @@ -534,7 +551,7 @@ func generateWaitScript(cluster *api.MiniCluster, containerIndex int) (string, e Batch: batchCommand, RequiredRanks: requiredRanks, } - t, err := template.New("wait-sh").Parse(waitToStartTemplate) + t, err := template.New(templateName).Parse(templateScriptName) if err != nil { return "", err } diff --git a/controllers/flux/templates.go b/controllers/flux/templates.go index 0e8ceaf5..c9c0d57b 100644 --- a/controllers/flux/templates.go +++ b/controllers/flux/templates.go @@ -25,7 +25,10 @@ var brokerConfigJobManagerPlugin string //go:embed templates/wait.sh var waitToStartTemplate string -// WaitTemplate populates wait.sh +//go:embed templates/start.sh +var sidecarStartTemplate string + +// WaitTemplate populates wait.sh and start.sh type WaitTemplate struct { FluxToken string // Token to log into the UI, should be consistent across containers FluxUser string // Username for Flux Restful API diff --git a/controllers/flux/templates/start.sh b/controllers/flux/templates/start.sh new file mode 100644 index 00000000..684a1928 --- /dev/null +++ b/controllers/flux/templates/start.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# A custom startscript can be supported for a non flux runner given that +# the container also provides the entrypoint command to run. To be consitent, +# we provide the same blocks of commands as we do to wait.sh. + +# If any initCommand logic is defined +{{ .Container.Commands.Init}} {{ if .Spec.Logging.Quiet }}> /dev/null{{ end }} + +# If we are not in strict, don't set strict mode +{{ if .Spec.Logging.Strict }}set -eEu -o pipefail{{ end }} + +{{ .Container.Commands.BrokerPre}} {{ if .Spec.Logging.Quiet }}> /dev/null 2>&1{{ end }} +{{ .Container.Commands.WorkerPre}} {{ if .Spec.Logging.Quiet }}> /dev/null 2>&1{{ end }} +{{ .Container.Commands.Pre}} {{ if .Spec.Logging.Quiet }}> /dev/null 2>&1{{ end }} + +{{ .Container.Command }} + +{{ .Container.Commands.Post}} diff --git a/controllers/flux/templates/wait.sh b/controllers/flux/templates/wait.sh index 9043dcc3..ed6831d1 100644 --- a/controllers/flux/templates/wait.sh +++ b/controllers/flux/templates/wait.sh @@ -45,9 +45,6 @@ asFlux="${asSudo} -E HOME=/home/${fluxuser}"{{ else }} asFlux="sudo -u ${fluxuser} -E PYTHONPATH=$PYTHONPATH -E PATH=$PATH -E LD_LIBRARY_PATH=${LD_LIBRARY_PATH} -E HOME=/home/${fluxuser}" {{ end }} -# If any preCommand logic is defined -{{ .Container.PreCommand}} {{ if .Spec.Logging.Quiet }}> /dev/null 2>&1{{ end }} - # And pre command logic that isn't passed to the certificate generator {{ .Container.Commands.Pre}} {{ if .Spec.Logging.Quiet }}> /dev/null 2>&1{{ end }} diff --git a/controllers/flux/volumes.go b/controllers/flux/volumes.go index ac1c071d..39de5406 100644 --- a/controllers/flux/volumes.go +++ b/controllers/flux/volumes.go @@ -81,6 +81,16 @@ func getVolumes(cluster *api.MiniCluster) []corev1.Volume { } runnerStartScripts = append(runnerStartScripts, startScript) } + + // A non flux container can also handle custom logic, if command is provided + if container.GenerateEntrypoint() { + startScript := corev1.KeyToPath{ + Key: fmt.Sprintf("start-%d", i), + Path: fmt.Sprintf("start-%d.sh", i), + Mode: &makeExecutable, + } + runnerStartScripts = append(runnerStartScripts, startScript) + } } // If we have Multi User mode, we need to set permission 0644 diff --git a/docs/development/debugging.md b/docs/development/debugging.md index c691742d..33942356 100644 --- a/docs/development/debugging.md +++ b/docs/development/debugging.md @@ -24,6 +24,25 @@ One time I messed something up and my metrics server was still running (and I co $ kill $(lsof -t -i:8080) ``` +### Workers completed too early + +If you see that your workers are entering state `Completed` and not connecting to the main broker, first try adding a worker pre command block to sleep (and ensure they start after the main broker): + +```yaml +commands: + workerPre: sleep 60 +``` +This will fix the error if you have a lot of setup logic and there is some race for the workers and lead broker starting. If that doesn't work, +then look at the logs of the lead broker (index 0) and see if there is an obvious error message. If so, this would result in the behavior we see here. +If that doesn't give insight, then likely the lead broker is still not working, but it's hard to see, and your best bet is to set `interactive: true` +under the main spec, and then shell into the lead broker container, connect via flux proxy (and that usually looks like this): + +```bash +$ sudo -u flux -E $(env) -E HOME=/home/flux flux proxy local:///run/flux/local bash +``` + +And then try running your command to look for obvious issues. + ### CRD should be installed If you see something like: diff --git a/docs/getting_started/custom-resource-definition.md b/docs/getting_started/custom-resource-definition.md index c6cc5ebc..c043d350 100644 --- a/docs/getting_started/custom-resource-definition.md +++ b/docs/getting_started/custom-resource-definition.md @@ -966,9 +966,7 @@ asFlux="sudo -u flux -E PYTHONPATH=$PYTHONPATH -E PATH=$PATH -E HOME=/home/flux" #### commands -A special "commands" section is available for commands that you want to run in the broker and workers containers, -but not during certificate generation. As an example, if you print extra output to the certificate generator, -it will mangle the certificate output. Instead, you could write debug statements in this section. +A special "commands" section is available for commands that you want to run in the broker and workers containers. ##### init @@ -1000,6 +998,19 @@ containers: ls /workdir ``` +##### workerPre and brokerPre + +This is akin to pre, but only for the broker OR the workers. + +``` +containers: + - image: my-flux-image + ... + commands: + preWorker: echo hello I am a worker + preBroker: echo hello I am the lead broker +``` + ##### post The "post" command is run for in the entrypoint after everything finishes up. diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index c6ed2b0b..5c221209 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -33,6 +33,9 @@ The following tutorials are provided from their respective directories (and are - [K3s](https://github.com/flux-framework/flux-operator/tree/main/examples/nested/k3s/basic): instiatiate k3s inside Flux, and deploy an app. +#### Process Namespace + + - [shared-process-space](https://github.com/flux-framework/flux-operator/tree/main/examples/experimental/shared-process-space): Allow flux to execute a command into another container ### Machine Learning diff --git a/examples/dist/flux-operator-arm.yaml b/examples/dist/flux-operator-arm.yaml index 3a38cfbd..f119b610 100644 --- a/examples/dist/flux-operator-arm.yaml +++ b/examples/dist/flux-operator-arm.yaml @@ -193,13 +193,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, - directly after asFlux is defined as sudo -u flux -E (so you - can change that if desired.) This is only valid if FluxRunner - is set (that writes a wait.sh script) This is for the indexed - job pods and the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we @@ -249,6 +242,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -628,13 +626,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, - directly after asFlux is defined as sudo -u flux -E (so you - can change that if desired.) This is only valid if FluxRunner - is set (that writes a wait.sh script) This is for the indexed - job pods and the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we @@ -684,6 +675,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -710,6 +706,9 @@ spec: type: object type: array x-kubernetes-list-type: atomic + shareProcessNamespace: + description: Share process namespace? + type: boolean size: default: 1 description: Size (number of job pods to run, size of minicluster diff --git a/examples/dist/flux-operator.yaml b/examples/dist/flux-operator.yaml index f6d50b1e..1abb1573 100644 --- a/examples/dist/flux-operator.yaml +++ b/examples/dist/flux-operator.yaml @@ -193,13 +193,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, - directly after asFlux is defined as sudo -u flux -E (so you - can change that if desired.) This is only valid if FluxRunner - is set (that writes a wait.sh script) This is for the indexed - job pods and the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we @@ -249,6 +242,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -628,13 +626,6 @@ spec: type: integer type: array x-kubernetes-list-type: atomic - preCommand: - description: Special command to run at beginning of script, - directly after asFlux is defined as sudo -u flux -E (so you - can change that if desired.) This is only valid if FluxRunner - is set (that writes a wait.sh script) This is for the indexed - job pods and the certificate generation container. - type: string pullAlways: default: false description: Allow the user to dictate pulling By default we @@ -684,6 +675,11 @@ spec: securityContext: description: Security Context https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: + addCapabilities: + description: Capabilities to add + items: + type: string + type: array privileged: description: Privileged container type: boolean @@ -710,6 +706,9 @@ spec: type: object type: array x-kubernetes-list-type: atomic + shareProcessNamespace: + description: Share process namespace? + type: boolean size: default: 1 description: Size (number of job pods to run, size of minicluster diff --git a/examples/experimental/shared-process-space/README.md b/examples/experimental/shared-process-space/README.md new file mode 100644 index 00000000..da28c75a --- /dev/null +++ b/examples/experimental/shared-process-space/README.md @@ -0,0 +1,57 @@ +# Testing Communication between Containers + +We are going to test running this application in the context of a [shared process namespace between containers in a pod](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/). + +## Go Experiment + +Create a cluster, and install the Flux Operator + +```bash +kind create cluster +kubectl apply -f ../../dist/flux-operator-dev.yaml +``` + +Create the interactive Minicluster. The [goshare](https://github.com/converged-computing/goshare) client and server will +be installed to two containers. The server has the application we want to run, and the client has flux. + +```bash +$ kubectl create namespace flux-operator +$ kubectl apply -f minicluster.yaml +``` + +We will test this interactively for now. In the future we will want to: + +- install the client/server depending on container +- find the correct PID for the running server based on matching some name or similar +- start the client with the common socket path. + +Wait until your pods are all running: + +```bash +$ kubectl get pods -n flux-operator +``` +```console +NAME READY STATUS RESTARTS AGE +flux-sample-0-k5ccg 2/2 Running 0 7m36s +flux-sample-1-bb8ks 2/2 Running 0 7m36s +flux-sample-2-5cwk4 2/2 Running 0 7m36s +flux-sample-3-jggrg 2/2 Running 0 7m36s +``` + +You can then watch the logs of a server container to see the command being run. +```bash +$ kubectl logs -n flux-operator flux-sample-0-wpsnj -c server +``` +```console +task: [build] GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o bin/client cmd/client/client.go +🟦️ service: 2023/07/26 22:42:52 server.go:38: starting service at socket /dinosaur.sock +🟦️ service: 2023/07/26 22:42:52 server.go:50: creating a new service to listen at /dinosaur.sock +🟦️ service: 2023/07/26 22:43:57 command.go:26: start new stream request +🟦️ service: 2023/07/26 22:43:57 command.go:54: Received command echo hello world +🟦️ service: 2023/07/26 22:43:57 command.go:67: send new pid=3025 +🟦️ service: 2023/07/26 22:43:57 command.go:70: Process started with PID: 3025 +🟦️ service: 2023/07/26 22:43:57 command.go:76: send final output: hello world +``` + +Note that this experiment has it running twice - once outside of flux, and one with flux run. +The latter doesn't seem to work, at least I haven't figured out why it works outside flux but not within it. \ No newline at end of file diff --git a/examples/experimental/shared-process-space/minicluster.yaml b/examples/experimental/shared-process-space/minicluster.yaml new file mode 100755 index 00000000..257c903a --- /dev/null +++ b/examples/experimental/shared-process-space/minicluster.yaml @@ -0,0 +1,74 @@ +apiVersion: flux-framework.org/v1alpha1 +kind: MiniCluster +metadata: + name: flux-sample + namespace: flux-operator +spec: + size: 4 + shareProcessNamespace: true + + # This allows us to see zeromq and debug logging + flux: + logLevel: 7 + + logging: + zeromq: true + + # This is a list because a pod can support multiple containers + containers: + + # The client container (issuing commands) has flux + - image: ghcr.io/flux-framework/flux-restful-api + runFlux: true + # This is what we can run in interactive mode + command: goshare-cli -s /proc/$(cat /goshare.pid)/root/dinosaur.sock echo hello world + commands: + # users in both containers need to be same + runFluxAsRoot: true + pre: | + wget https://github.com/converged-computing/goshare/releases/download/2023-07-26-rc2/client + chmod +x ./client + mv ./client /bin/goshare-cli + + # This block ensures we have the pid of the running client before continuing + # I hope we can improve on this! + sleep 20 + while true + do + echo "Looking for PID for goshare-srv" + pid=$(ps aux | grep -i "goshare-srv -s /dinosaur.sock" | grep -Eiv "flux_operator" | grep -Eiv "grep" | awk -v OFS=, '{print $1, $2}' | head -n 1 | jq -R 'split(",") | {user: .[0], pid: .[1]}' | jq -r .pid) + if [[ "${pid}" != "" ]]; then + echo "Found PID ${pid} for goshare-srv" + break + fi + sleep 3 + done + + # Keep this around if we want it + echo "${pid}" > /goshare.pid + export GOSHARE_PID=$pid + + # Extra sleep to allow socket to start first + sleep 20 + + # This works running outside of flux + echo "Running hello world" + goshare-cli -s /proc/$GOSHARE_PID/root/dinosaur.sock echo hello world + + echo "Running lammps" + goshare-cli -s /proc/$GOSHARE_PID/root/dinosaur.sock mpirun lmp -v x 1 -v y 1 -v z 1 -in in.reaxc.hns -nocite + + securityContext: + addCapabilities: + - SYS_PTRACE + + - image: ghcr.io/rse-ops/vanilla-lammps:tag-latest + command: exec goshare-srv -s /dinosaur.sock + name: server + + # The server expects to receive commands + commands: + pre: | + wget https://github.com/converged-computing/goshare/releases/download/2023-07-26-rc2/server + chmod +x ./server + mv ./server /bin/goshare-srv \ No newline at end of file diff --git a/examples/tests/osu-benchmarks/minicluster.yaml b/examples/tests/osu-benchmarks/minicluster.yaml index 055facc5..e45112ce 100644 --- a/examples/tests/osu-benchmarks/minicluster.yaml +++ b/examples/tests/osu-benchmarks/minicluster.yaml @@ -21,9 +21,10 @@ spec: - image: ghcr.io/rse-ops/osu-microbench:app-latest # custom preCommand logic (run at start of script) - preCommand: | - source /etc/profile.d/z10_spack_environment.sh - asFlux="sudo -u flux -E PYTHONPATH=$PYTHONPATH" + command: + pre: | + source /etc/profile.d/z10_spack_environment.sh + asFlux="sudo -u flux -E PYTHONPATH=$PYTHONPATH" # While these aren't set, these are the workdir and command for a benchmark workingDir: /opt/osu-benchmark/build.openmpi/libexec/osu-micro-benchmarks/mpi/one-sided diff --git a/examples/tests/snakemake/minicluster.yaml b/examples/tests/snakemake/minicluster.yaml index 10aa246c..63c63ca7 100644 --- a/examples/tests/snakemake/minicluster.yaml +++ b/examples/tests/snakemake/minicluster.yaml @@ -44,8 +44,9 @@ spec: # Give the command directly to flux start, instead of wrapping in flux submit launcher: true - preCommand: | - # Ensure the cache targets our flux user home - asFlux="sudo -u flux -E PYTHONPATH=$PYTHONPATH -E PATH=$PATH -E HOME=/home/flux" - # Add the flux user beforehand, and ensure we own the working directory with data - sudo adduser --disabled-password --uid 1000 --gecos "" flux > /dev/null 2>&1 + commands: + pre: | + # Ensure the cache targets our flux user home + asFlux="sudo -u flux -E PYTHONPATH=$PYTHONPATH -E PATH=$PATH -E HOME=/home/flux" + # Add the flux user beforehand, and ensure we own the working directory with data + sudo adduser --disabled-password --uid 1000 --gecos "" flux > /dev/null 2>&1 diff --git a/sdk/python/v1alpha1/.openapi-generator/FILES b/sdk/python/v1alpha1/.openapi-generator/FILES index 45fe2d73..770b549d 100644 --- a/sdk/python/v1alpha1/.openapi-generator/FILES +++ b/sdk/python/v1alpha1/.openapi-generator/FILES @@ -63,4 +63,13 @@ setup.cfg setup.py test-requirements.txt test/__init__.py +test/test_mini_cluster.py +test/test_mini_cluster_archive.py +test/test_mini_cluster_container.py +test/test_mini_cluster_existing_volume.py +test/test_mini_cluster_list.py +test/test_mini_cluster_spec.py +test/test_mini_cluster_status.py +test/test_mini_cluster_user.py +test/test_mini_cluster_volume.py tox.ini diff --git a/sdk/python/v1alpha1/docs/MiniClusterContainer.md b/sdk/python/v1alpha1/docs/MiniClusterContainer.md index 07837727..5ee9294b 100644 --- a/sdk/python/v1alpha1/docs/MiniClusterContainer.md +++ b/sdk/python/v1alpha1/docs/MiniClusterContainer.md @@ -20,7 +20,6 @@ Name | Type | Description | Notes **logs** | **str** | Log output directory | [optional] [default to ''] **name** | **str** | Container name is only required for non flux runners | [optional] [default to ''] **ports** | **list[int]** | Ports to be exposed to other containers in the cluster We take a single list of integers and map to the same | [optional] -**pre_command** | **str** | Special command to run at beginning of script, directly after asFlux is defined as sudo -u flux -E (so you can change that if desired.) This is only valid if FluxRunner is set (that writes a wait.sh script) This is for the indexed job pods and the certificate generation container. | [optional] [default to ''] **pull_always** | **bool** | Allow the user to dictate pulling By default we pull if not present. Setting this to true will indicate to pull always | [optional] [default to False] **resources** | [**ContainerResources**](ContainerResources.md) | | [optional] **run_flux** | **bool** | Main container to run flux (only should be one) | [optional] [default to False] diff --git a/sdk/python/v1alpha1/docs/MiniClusterSpec.md b/sdk/python/v1alpha1/docs/MiniClusterSpec.md index 208a69aa..35bbcc1b 100644 --- a/sdk/python/v1alpha1/docs/MiniClusterSpec.md +++ b/sdk/python/v1alpha1/docs/MiniClusterSpec.md @@ -18,6 +18,7 @@ Name | Type | Description | Notes **network** | [**Network**](Network.md) | | [optional] **pod** | [**PodSpec**](PodSpec.md) | | [optional] **services** | [**list[MiniClusterContainer]**](MiniClusterContainer.md) | Services are one or more service containers to bring up alongside the MiniCluster. | [optional] +**share_process_namespace** | **bool** | Share process namespace? | [optional] [default to False] **size** | **int** | Size (number of job pods to run, size of minicluster in pods) This is also the minimum number required to start Flux | [optional] [default to 1] **tasks** | **int** | Total number of CPUs being run across entire cluster | [optional] [default to 1] **users** | [**list[MiniClusterUser]**](MiniClusterUser.md) | Users of the MiniCluster | [optional] diff --git a/sdk/python/v1alpha1/docs/SecurityContext.md b/sdk/python/v1alpha1/docs/SecurityContext.md index 15c49b8b..66b4b0cb 100644 --- a/sdk/python/v1alpha1/docs/SecurityContext.md +++ b/sdk/python/v1alpha1/docs/SecurityContext.md @@ -4,6 +4,7 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**add_capabilities** | **list[str]** | Capabilities to add | [optional] **privileged** | **bool** | Privileged container | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_container.py b/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_container.py index f8fb02ae..0152d7ed 100644 --- a/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_container.py +++ b/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_container.py @@ -49,7 +49,6 @@ class MiniClusterContainer(object): 'logs': 'str', 'name': 'str', 'ports': 'list[int]', - 'pre_command': 'str', 'pull_always': 'bool', 'resources': 'ContainerResources', 'run_flux': 'bool', @@ -76,7 +75,6 @@ class MiniClusterContainer(object): 'logs': 'logs', 'name': 'name', 'ports': 'ports', - 'pre_command': 'preCommand', 'pull_always': 'pullAlways', 'resources': 'resources', 'run_flux': 'runFlux', @@ -86,7 +84,7 @@ class MiniClusterContainer(object): 'working_dir': 'workingDir' } - def __init__(self, batch=False, batch_raw=False, command='', commands=None, cores=0, diagnostics=False, environment=None, existing_volumes=None, flux_user=None, image='ghcr.io/rse-ops/accounting:app-latest', image_pull_secret='', launcher=False, life_cycle=None, logs='', name='', ports=None, pre_command='', pull_always=False, resources=None, run_flux=False, secrets=None, security_context=None, volumes=None, working_dir='', local_vars_configuration=None): # noqa: E501 + def __init__(self, batch=False, batch_raw=False, command='', commands=None, cores=0, diagnostics=False, environment=None, existing_volumes=None, flux_user=None, image='ghcr.io/rse-ops/accounting:app-latest', image_pull_secret='', launcher=False, life_cycle=None, logs='', name='', ports=None, pull_always=False, resources=None, run_flux=False, secrets=None, security_context=None, volumes=None, working_dir='', local_vars_configuration=None): # noqa: E501 """MiniClusterContainer - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() @@ -108,7 +106,6 @@ def __init__(self, batch=False, batch_raw=False, command='', commands=None, core self._logs = None self._name = None self._ports = None - self._pre_command = None self._pull_always = None self._resources = None self._run_flux = None @@ -150,8 +147,6 @@ def __init__(self, batch=False, batch_raw=False, command='', commands=None, core self.name = name if ports is not None: self.ports = ports - if pre_command is not None: - self.pre_command = pre_command if pull_always is not None: self.pull_always = pull_always if resources is not None: @@ -529,29 +524,6 @@ def ports(self, ports): self._ports = ports - @property - def pre_command(self): - """Gets the pre_command of this MiniClusterContainer. # noqa: E501 - - Special command to run at beginning of script, directly after asFlux is defined as sudo -u flux -E (so you can change that if desired.) This is only valid if FluxRunner is set (that writes a wait.sh script) This is for the indexed job pods and the certificate generation container. # noqa: E501 - - :return: The pre_command of this MiniClusterContainer. # noqa: E501 - :rtype: str - """ - return self._pre_command - - @pre_command.setter - def pre_command(self, pre_command): - """Sets the pre_command of this MiniClusterContainer. - - Special command to run at beginning of script, directly after asFlux is defined as sudo -u flux -E (so you can change that if desired.) This is only valid if FluxRunner is set (that writes a wait.sh script) This is for the indexed job pods and the certificate generation container. # noqa: E501 - - :param pre_command: The pre_command of this MiniClusterContainer. # noqa: E501 - :type pre_command: str - """ - - self._pre_command = pre_command - @property def pull_always(self): """Gets the pull_always of this MiniClusterContainer. # noqa: E501 diff --git a/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_spec.py b/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_spec.py index d9361a85..c4e6c28b 100644 --- a/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_spec.py +++ b/sdk/python/v1alpha1/fluxoperator/models/mini_cluster_spec.py @@ -46,6 +46,7 @@ class MiniClusterSpec(object): 'network': 'Network', 'pod': 'PodSpec', 'services': 'list[MiniClusterContainer]', + 'share_process_namespace': 'bool', 'size': 'int', 'tasks': 'int', 'users': 'list[MiniClusterUser]', @@ -66,13 +67,14 @@ class MiniClusterSpec(object): 'network': 'network', 'pod': 'pod', 'services': 'services', + 'share_process_namespace': 'shareProcessNamespace', 'size': 'size', 'tasks': 'tasks', 'users': 'users', 'volumes': 'volumes' } - def __init__(self, archive=None, cleanup=False, containers=None, deadline_seconds=31500000, flux=None, flux_restful=None, interactive=False, job_labels=None, logging=None, max_size=None, network=None, pod=None, services=None, size=1, tasks=1, users=None, volumes=None, local_vars_configuration=None): # noqa: E501 + def __init__(self, archive=None, cleanup=False, containers=None, deadline_seconds=31500000, flux=None, flux_restful=None, interactive=False, job_labels=None, logging=None, max_size=None, network=None, pod=None, services=None, share_process_namespace=False, size=1, tasks=1, users=None, volumes=None, local_vars_configuration=None): # noqa: E501 """MiniClusterSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() @@ -91,6 +93,7 @@ def __init__(self, archive=None, cleanup=False, containers=None, deadline_second self._network = None self._pod = None self._services = None + self._share_process_namespace = None self._size = None self._tasks = None self._users = None @@ -122,6 +125,8 @@ def __init__(self, archive=None, cleanup=False, containers=None, deadline_second self.pod = pod if services is not None: self.services = services + if share_process_namespace is not None: + self.share_process_namespace = share_process_namespace if size is not None: self.size = size if tasks is not None: @@ -420,6 +425,29 @@ def services(self, services): self._services = services + @property + def share_process_namespace(self): + """Gets the share_process_namespace of this MiniClusterSpec. # noqa: E501 + + Share process namespace? # noqa: E501 + + :return: The share_process_namespace of this MiniClusterSpec. # noqa: E501 + :rtype: bool + """ + return self._share_process_namespace + + @share_process_namespace.setter + def share_process_namespace(self, share_process_namespace): + """Sets the share_process_namespace of this MiniClusterSpec. + + Share process namespace? # noqa: E501 + + :param share_process_namespace: The share_process_namespace of this MiniClusterSpec. # noqa: E501 + :type share_process_namespace: bool + """ + + self._share_process_namespace = share_process_namespace + @property def size(self): """Gets the size of this MiniClusterSpec. # noqa: E501 diff --git a/sdk/python/v1alpha1/fluxoperator/models/security_context.py b/sdk/python/v1alpha1/fluxoperator/models/security_context.py index ee7ba85c..475d284d 100644 --- a/sdk/python/v1alpha1/fluxoperator/models/security_context.py +++ b/sdk/python/v1alpha1/fluxoperator/models/security_context.py @@ -33,25 +33,53 @@ class SecurityContext(object): and the value is json key in definition. """ openapi_types = { + 'add_capabilities': 'list[str]', 'privileged': 'bool' } attribute_map = { + 'add_capabilities': 'addCapabilities', 'privileged': 'privileged' } - def __init__(self, privileged=None, local_vars_configuration=None): # noqa: E501 + def __init__(self, add_capabilities=None, privileged=None, local_vars_configuration=None): # noqa: E501 """SecurityContext - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() self.local_vars_configuration = local_vars_configuration + self._add_capabilities = None self._privileged = None self.discriminator = None + if add_capabilities is not None: + self.add_capabilities = add_capabilities if privileged is not None: self.privileged = privileged + @property + def add_capabilities(self): + """Gets the add_capabilities of this SecurityContext. # noqa: E501 + + Capabilities to add # noqa: E501 + + :return: The add_capabilities of this SecurityContext. # noqa: E501 + :rtype: list[str] + """ + return self._add_capabilities + + @add_capabilities.setter + def add_capabilities(self, add_capabilities): + """Sets the add_capabilities of this SecurityContext. + + Capabilities to add # noqa: E501 + + :param add_capabilities: The add_capabilities of this SecurityContext. # noqa: E501 + :type add_capabilities: list[str] + """ + + self._add_capabilities = add_capabilities + @property def privileged(self): """Gets the privileged of this SecurityContext. # noqa: E501 diff --git a/sdk/python/v1alpha1/test/test_mini_cluster.py b/sdk/python/v1alpha1/test/test_mini_cluster.py index a70e682d..86d75624 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster.py @@ -19,7 +19,6 @@ from fluxoperator.models.mini_cluster import MiniCluster # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniCluster(unittest.TestCase): """MiniCluster unit test stubs""" @@ -31,161 +30,200 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniCluster - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster.MiniCluster() # noqa: E501 - if include_optional: + if include_optional : return MiniCluster( - api_version="", - kind="", - metadata=None, - spec=fluxoperator.models.mini_cluster_spec.MiniClusterSpec( - archive=fluxoperator.models.mini_cluster_archive.MiniClusterArchive( - path="", - ), - cleanup=True, - containers=[ + api_version = '', + kind = '', + metadata = None, + spec = fluxoperator.models.mini_cluster_spec.MiniClusterSpec( + archive = fluxoperator.models.mini_cluster_archive.MiniClusterArchive( + path = '', ), + cleanup = True, + containers = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - commands=fluxoperator.models.commands.Commands( - broker_pre="", - init="", - post="", - pre="", - prefix="", - run_flux_as_root=True, - worker_pre="", - ), - cores=56, - diagnostics=True, - environment={"key": ""}, - existing_volumes={ - "key": fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( - claim_name="", - path="", - read_only=True, - ) - }, - flux_user=fluxoperator.models.flux_user.FluxUser( - name="flux", - uid=56, - ), - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - life_cycle=fluxoperator.models.life_cycle.LifeCycle( - post_start_exec="", - pre_stop_exec="", - ), - logs="", - name="", - ports=[56], - pre_command="", - pull_always=True, - resources=fluxoperator.models.container_resources.ContainerResources( - limits={"key": None}, - requests={"key": None}, - ), - run_flux=True, - security_context=fluxoperator.models.security_context.SecurityContext( - privileged=True, - ), - volumes={ - "key": fluxoperator.models.container_volume.ContainerVolume( - path="", - read_only=True, - ) - }, - working_dir="", - ) - ], - deadline_seconds=56, - flux=fluxoperator.models.flux_spec.FluxSpec( - connect_timeout="5s", - log_level=56, - option_flags="", - ), - flux_restful=fluxoperator.models.flux_restful.FluxRestful( - branch="main", - port=56, - secret_key="", - token="", - username="", - ), - interactive=True, - job_labels={"key": ""}, - logging=fluxoperator.models.logging_spec.LoggingSpec( - debug=True, - quiet=True, - strict=True, - timed=True, - zeromq=True, - ), - max_size=56, - pod=fluxoperator.models.pod_spec.PodSpec( - annotations={"key": ""}, - labels={"key": ""}, - node_selector={"key": ""}, - service_account_name="", - ), - services=[ + batch = True, + batch_raw = True, + command = '', + commands = fluxoperator.models.commands.Commands( + broker_pre = '', + init = '', + post = '', + pre = '', + prefix = '', + run_flux_as_root = True, + worker_pre = '', ), + cores = 56, + diagnostics = True, + environment = { + 'key' : '' + }, + existing_volumes = { + 'key' : fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '', ) + }, + flux_user = fluxoperator.models.flux_user.FluxUser( + name = 'flux', + uid = 56, ), + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + life_cycle = fluxoperator.models.life_cycle.LifeCycle( + post_start_exec = '', + pre_stop_exec = '', ), + logs = '', + name = '', + ports = [ + 56 + ], + pull_always = True, + resources = fluxoperator.models.container_resources.ContainerResources( + limits = { + 'key' : None + }, + requests = { + 'key' : None + }, ), + run_flux = True, + secrets = { + 'key' : fluxoperator.models.secret.Secret( + key = '', + name = '', ) + }, + security_context = fluxoperator.models.security_context.SecurityContext( + add_capabilities = [ + '' + ], + privileged = True, ), + volumes = { + 'key' : fluxoperator.models.container_volume.ContainerVolume( + path = '', + read_only = True, ) + }, + working_dir = '', ) + ], + deadline_seconds = 56, + flux = fluxoperator.models.flux_spec.FluxSpec( + broker_config = '', + bursting = fluxoperator.models.bursting.Bursting( + clusters = [ + fluxoperator.models.bursted_cluster.BurstedCluster( + name = '', + size = 56, ) + ], + hostlist = '', + lead_broker = fluxoperator.models.flux_broker.FluxBroker( + address = '', + name = '', + port = 56, + size = 56, ), ), + connect_timeout = '5s', + curve_cert = '', + curve_cert_secret = '', + install_root = '/usr', + log_level = 56, + minimal_service = True, + munge_secret = '', + option_flags = '', + wrap = '', ), + flux_restful = fluxoperator.models.flux_restful.FluxRestful( + branch = 'main', + port = 56, + secret_key = '', + token = '', + username = '', ), + interactive = True, + job_labels = { + 'key' : '' + }, + logging = fluxoperator.models.logging_spec.LoggingSpec( + debug = True, + quiet = True, + strict = True, + timed = True, + zeromq = True, ), + max_size = 56, + network = fluxoperator.models.network.Network( + headless_name = 'flux-service', ), + pod = fluxoperator.models.pod_spec.PodSpec( + annotations = { + 'key' : '' + }, + labels = { + 'key' : '' + }, + node_selector = { + 'key' : '' + }, + service_account_name = '', ), + services = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - cores=56, - diagnostics=True, - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - logs="", - name="", - pre_command="", - pull_always=True, - run_flux=True, - working_dir="", - ) - ], - size=56, - tasks=56, - users=[ + batch = True, + batch_raw = True, + command = '', + cores = 56, + diagnostics = True, + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + logs = '', + name = '', + pull_always = True, + run_flux = True, + working_dir = '', ) + ], + share_process_namespace = True, + size = 56, + tasks = 56, + users = [ fluxoperator.models.mini_cluster_user.MiniClusterUser( - name="", - password="", - ) - ], - volumes={ - "key": fluxoperator.models.mini_cluster_volume.MiniClusterVolume( - attributes={"key": ""}, - capacity="5Gi", - claim_annotations={"key": ""}, - delete=True, - driver="", - path="", - secret="", - secret_namespace="default", - storage_class="hostpath", - volume_handle="", - ) - }, - ), - status=fluxoperator.models.mini_cluster_status.MiniClusterStatus( - conditions=[None], - jobid="", - maximum_size=56, - ), + name = '', + password = '', ) + ], + volumes = { + 'key' : fluxoperator.models.mini_cluster_volume.MiniClusterVolume( + attributes = { + 'key' : '' + }, + capacity = '5Gi', + claim_annotations = { + 'key' : '' + }, + delete = True, + driver = '', + path = '', + secret = '', + secret_namespace = 'default', + storage_class = 'hostpath', + volume_handle = '', ) + }, ), + status = fluxoperator.models.mini_cluster_status.MiniClusterStatus( + conditions = [ + None + ], + jobid = '', + maximum_size = 56, + selector = '', + size = 56, ) ) - else: - return MiniCluster() + else : + return MiniCluster( + ) def testMiniCluster(self): """Test MiniCluster""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_archive.py b/sdk/python/v1alpha1/test/test_mini_cluster_archive.py index 020859a4..c1136870 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_archive.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_archive.py @@ -19,7 +19,6 @@ from fluxoperator.models.mini_cluster_archive import MiniClusterArchive # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterArchive(unittest.TestCase): """MiniClusterArchive unit test stubs""" @@ -31,20 +30,22 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterArchive - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_archive.MiniClusterArchive() # noqa: E501 - if include_optional: - return MiniClusterArchive(path="") - else: - return MiniClusterArchive() + if include_optional : + return MiniClusterArchive( + path = '' + ) + else : + return MiniClusterArchive( + ) def testMiniClusterArchive(self): """Test MiniClusterArchive""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_container.py b/sdk/python/v1alpha1/test/test_mini_cluster_container.py index 2c59c74b..a66545c1 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_container.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_container.py @@ -16,12 +16,9 @@ import datetime import fluxoperator -from fluxoperator.models.mini_cluster_container import ( - MiniClusterContainer, -) # noqa: E501 +from fluxoperator.models.mini_cluster_container import MiniClusterContainer # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterContainer(unittest.TestCase): """MiniClusterContainer unit test stubs""" @@ -33,74 +30,87 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterContainer - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_container.MiniClusterContainer() # noqa: E501 - if include_optional: + if include_optional : return MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - commands=fluxoperator.models.commands.Commands( - broker_pre="", - init="", - post="", - pre="", - prefix="", - run_flux_as_root=True, - worker_pre="", - ), - cores=56, - diagnostics=True, - environment={"key": ""}, - existing_volumes={ - "key": fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( - claim_name="", - path="", - read_only=True, - ) - }, - flux_user=fluxoperator.models.flux_user.FluxUser( - name="flux", - uid=56, - ), - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - life_cycle=fluxoperator.models.life_cycle.LifeCycle( - post_start_exec="", - pre_stop_exec="", - ), - logs="", - name="", - ports=[56], - pre_command="", - pull_always=True, - resources=fluxoperator.models.container_resources.ContainerResources( - limits={"key": None}, - requests={"key": None}, - ), - run_flux=True, - security_context=fluxoperator.models.security_context.SecurityContext( - privileged=True, - ), - volumes={ - "key": fluxoperator.models.container_volume.ContainerVolume( - path="", - read_only=True, - ) - }, - working_dir="", + batch = True, + batch_raw = True, + command = '', + commands = fluxoperator.models.commands.Commands( + broker_pre = '', + init = '', + post = '', + pre = '', + prefix = '', + run_flux_as_root = True, + worker_pre = '', ), + cores = 56, + diagnostics = True, + environment = { + 'key' : '' + }, + existing_volumes = { + 'key' : fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '', ) + }, + flux_user = fluxoperator.models.flux_user.FluxUser( + name = 'flux', + uid = 56, ), + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + life_cycle = fluxoperator.models.life_cycle.LifeCycle( + post_start_exec = '', + pre_stop_exec = '', ), + logs = '', + name = '', + ports = [ + 56 + ], + pull_always = True, + resources = fluxoperator.models.container_resources.ContainerResources( + limits = { + 'key' : None + }, + requests = { + 'key' : None + }, ), + run_flux = True, + secrets = { + 'key' : fluxoperator.models.secret.Secret( + key = '', + name = '', ) + }, + security_context = fluxoperator.models.security_context.SecurityContext( + add_capabilities = [ + '' + ], + privileged = True, ), + volumes = { + 'key' : fluxoperator.models.container_volume.ContainerVolume( + path = '', + read_only = True, ) + }, + working_dir = '' ) - else: - return MiniClusterContainer() + else : + return MiniClusterContainer( + ) def testMiniClusterContainer(self): """Test MiniClusterContainer""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_existing_volume.py b/sdk/python/v1alpha1/test/test_mini_cluster_existing_volume.py index e867d5ec..88f63e62 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_existing_volume.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_existing_volume.py @@ -16,12 +16,9 @@ import datetime import fluxoperator -from fluxoperator.models.mini_cluster_existing_volume import ( - MiniClusterExistingVolume, -) # noqa: E501 +from fluxoperator.models.mini_cluster_existing_volume import MiniClusterExistingVolume # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterExistingVolume(unittest.TestCase): """MiniClusterExistingVolume unit test stubs""" @@ -33,23 +30,29 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterExistingVolume - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume() # noqa: E501 - if include_optional: - return MiniClusterExistingVolume(claim_name="", path="", read_only=True) - else: + if include_optional : return MiniClusterExistingVolume( - claim_name="", - path="", + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '' ) + else : + return MiniClusterExistingVolume( + ) def testMiniClusterExistingVolume(self): """Test MiniClusterExistingVolume""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_list.py b/sdk/python/v1alpha1/test/test_mini_cluster_list.py index 3fed3e86..206169a1 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_list.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_list.py @@ -19,7 +19,6 @@ from fluxoperator.models.mini_cluster_list import MiniClusterList # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterList(unittest.TestCase): """MiniClusterList unit test stubs""" @@ -31,313 +30,388 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterList - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_list.MiniClusterList() # noqa: E501 - if include_optional: + if include_optional : return MiniClusterList( - api_version="", - items=[ + api_version = '', + items = [ fluxoperator.models.mini_cluster.MiniCluster( - api_version="", - kind="", - metadata=None, - spec=fluxoperator.models.mini_cluster_spec.MiniClusterSpec( - archive=fluxoperator.models.mini_cluster_archive.MiniClusterArchive( - path="", - ), - cleanup=True, - containers=[ + api_version = '', + kind = '', + metadata = None, + spec = fluxoperator.models.mini_cluster_spec.MiniClusterSpec( + archive = fluxoperator.models.mini_cluster_archive.MiniClusterArchive( + path = '', ), + cleanup = True, + containers = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - commands=fluxoperator.models.commands.Commands( - broker_pre="", - init="", - post="", - pre="", - prefix="", - run_flux_as_root=True, - worker_pre="", - ), - cores=56, - diagnostics=True, - environment={"key": ""}, - existing_volumes={ - "key": fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( - claim_name="", - path="", - read_only=True, - ) - }, - flux_user=fluxoperator.models.flux_user.FluxUser( - name="flux", - uid=56, - ), - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - life_cycle=fluxoperator.models.life_cycle.LifeCycle( - post_start_exec="", - pre_stop_exec="", - ), - logs="", - name="", - ports=[56], - pre_command="", - pull_always=True, - resources=fluxoperator.models.container_resources.ContainerResources( - limits={"key": None}, - requests={"key": None}, - ), - run_flux=True, - security_context=fluxoperator.models.security_context.SecurityContext( - privileged=True, - ), - volumes={ - "key": fluxoperator.models.container_volume.ContainerVolume( - path="", - read_only=True, - ) - }, - working_dir="", - ) - ], - deadline_seconds=56, - flux=fluxoperator.models.flux_spec.FluxSpec( - connect_timeout="5s", - log_level=56, - option_flags="", - ), - flux_restful=fluxoperator.models.flux_restful.FluxRestful( - branch="main", - port=56, - secret_key="", - token="", - username="", - ), - interactive=True, - job_labels={"key": ""}, - logging=fluxoperator.models.logging_spec.LoggingSpec( - debug=True, - quiet=True, - strict=True, - timed=True, - zeromq=True, - ), - max_size=56, - pod=fluxoperator.models.pod_spec.PodSpec( - annotations={"key": ""}, - labels={"key": ""}, - node_selector={"key": ""}, - service_account_name="", - ), - services=[ + batch = True, + batch_raw = True, + command = '', + commands = fluxoperator.models.commands.Commands( + broker_pre = '', + init = '', + post = '', + pre = '', + prefix = '', + run_flux_as_root = True, + worker_pre = '', ), + cores = 56, + diagnostics = True, + environment = { + 'key' : '' + }, + existing_volumes = { + 'key' : fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '', ) + }, + flux_user = fluxoperator.models.flux_user.FluxUser( + name = 'flux', + uid = 56, ), + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + life_cycle = fluxoperator.models.life_cycle.LifeCycle( + post_start_exec = '', + pre_stop_exec = '', ), + logs = '', + name = '', + ports = [ + 56 + ], + pull_always = True, + resources = fluxoperator.models.container_resources.ContainerResources( + limits = { + 'key' : None + }, + requests = { + 'key' : None + }, ), + run_flux = True, + secrets = { + 'key' : fluxoperator.models.secret.Secret( + key = '', + name = '', ) + }, + security_context = fluxoperator.models.security_context.SecurityContext( + add_capabilities = [ + '' + ], + privileged = True, ), + volumes = { + 'key' : fluxoperator.models.container_volume.ContainerVolume( + path = '', + read_only = True, ) + }, + working_dir = '', ) + ], + deadline_seconds = 56, + flux = fluxoperator.models.flux_spec.FluxSpec( + broker_config = '', + bursting = fluxoperator.models.bursting.Bursting( + clusters = [ + fluxoperator.models.bursted_cluster.BurstedCluster( + name = '', + size = 56, ) + ], + hostlist = '', + lead_broker = fluxoperator.models.flux_broker.FluxBroker( + address = '', + name = '', + port = 56, + size = 56, ), ), + connect_timeout = '5s', + curve_cert = '', + curve_cert_secret = '', + install_root = '/usr', + log_level = 56, + minimal_service = True, + munge_secret = '', + option_flags = '', + wrap = '', ), + flux_restful = fluxoperator.models.flux_restful.FluxRestful( + branch = 'main', + port = 56, + secret_key = '', + token = '', + username = '', ), + interactive = True, + job_labels = { + 'key' : '' + }, + logging = fluxoperator.models.logging_spec.LoggingSpec( + debug = True, + quiet = True, + strict = True, + timed = True, + zeromq = True, ), + max_size = 56, + network = fluxoperator.models.network.Network( + headless_name = 'flux-service', ), + pod = fluxoperator.models.pod_spec.PodSpec( + annotations = { + 'key' : '' + }, + labels = { + 'key' : '' + }, + node_selector = { + 'key' : '' + }, + service_account_name = '', ), + services = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - cores=56, - diagnostics=True, - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - logs="", - name="", - pre_command="", - pull_always=True, - run_flux=True, - working_dir="", - ) - ], - size=56, - tasks=56, - users=[ + batch = True, + batch_raw = True, + command = '', + cores = 56, + diagnostics = True, + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + logs = '', + name = '', + pull_always = True, + run_flux = True, + working_dir = '', ) + ], + share_process_namespace = True, + size = 56, + tasks = 56, + users = [ fluxoperator.models.mini_cluster_user.MiniClusterUser( - name="", - password="", - ) - ], - volumes={ - "key": fluxoperator.models.mini_cluster_volume.MiniClusterVolume( - attributes={"key": ""}, - capacity="5Gi", - claim_annotations={"key": ""}, - delete=True, - driver="", - path="", - secret="", - secret_namespace="default", - storage_class="hostpath", - volume_handle="", - ) - }, - ), - status=fluxoperator.models.mini_cluster_status.MiniClusterStatus( - conditions=[None], - jobid="", - maximum_size=56, - ), - ) - ], - kind="", - metadata=None, + name = '', + password = '', ) + ], + volumes = { + 'key' : fluxoperator.models.mini_cluster_volume.MiniClusterVolume( + attributes = { + 'key' : '' + }, + capacity = '5Gi', + claim_annotations = { + 'key' : '' + }, + delete = True, + driver = '', + path = '', + secret = '', + secret_namespace = 'default', + storage_class = 'hostpath', + volume_handle = '', ) + }, ), + status = fluxoperator.models.mini_cluster_status.MiniClusterStatus( + conditions = [ + None + ], + jobid = '', + maximum_size = 56, + selector = '', + size = 56, ), ) + ], + kind = '', + metadata = None ) - else: + else : return MiniClusterList( - items=[ + items = [ fluxoperator.models.mini_cluster.MiniCluster( - api_version="", - kind="", - metadata=None, - spec=fluxoperator.models.mini_cluster_spec.MiniClusterSpec( - archive=fluxoperator.models.mini_cluster_archive.MiniClusterArchive( - path="", - ), - cleanup=True, - containers=[ + api_version = '', + kind = '', + metadata = None, + spec = fluxoperator.models.mini_cluster_spec.MiniClusterSpec( + archive = fluxoperator.models.mini_cluster_archive.MiniClusterArchive( + path = '', ), + cleanup = True, + containers = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - commands=fluxoperator.models.commands.Commands( - broker_pre="", - init="", - post="", - pre="", - prefix="", - run_flux_as_root=True, - worker_pre="", - ), - cores=56, - diagnostics=True, - environment={"key": ""}, - existing_volumes={ - "key": fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( - claim_name="", - path="", - read_only=True, - ) - }, - flux_user=fluxoperator.models.flux_user.FluxUser( - name="flux", - uid=56, - ), - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - life_cycle=fluxoperator.models.life_cycle.LifeCycle( - post_start_exec="", - pre_stop_exec="", - ), - logs="", - name="", - ports=[56], - pre_command="", - pull_always=True, - resources=fluxoperator.models.container_resources.ContainerResources( - limits={"key": None}, - requests={"key": None}, - ), - run_flux=True, - security_context=fluxoperator.models.security_context.SecurityContext( - privileged=True, - ), - volumes={ - "key": fluxoperator.models.container_volume.ContainerVolume( - path="", - read_only=True, - ) - }, - working_dir="", - ) - ], - deadline_seconds=56, - flux=fluxoperator.models.flux_spec.FluxSpec( - connect_timeout="5s", - log_level=56, - option_flags="", - ), - flux_restful=fluxoperator.models.flux_restful.FluxRestful( - branch="main", - port=56, - secret_key="", - token="", - username="", - ), - interactive=True, - job_labels={"key": ""}, - logging=fluxoperator.models.logging_spec.LoggingSpec( - debug=True, - quiet=True, - strict=True, - timed=True, - zeromq=True, - ), - max_size=56, - pod=fluxoperator.models.pod_spec.PodSpec( - annotations={"key": ""}, - labels={"key": ""}, - node_selector={"key": ""}, - service_account_name="", - ), - services=[ + batch = True, + batch_raw = True, + command = '', + commands = fluxoperator.models.commands.Commands( + broker_pre = '', + init = '', + post = '', + pre = '', + prefix = '', + run_flux_as_root = True, + worker_pre = '', ), + cores = 56, + diagnostics = True, + environment = { + 'key' : '' + }, + existing_volumes = { + 'key' : fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '', ) + }, + flux_user = fluxoperator.models.flux_user.FluxUser( + name = 'flux', + uid = 56, ), + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + life_cycle = fluxoperator.models.life_cycle.LifeCycle( + post_start_exec = '', + pre_stop_exec = '', ), + logs = '', + name = '', + ports = [ + 56 + ], + pull_always = True, + resources = fluxoperator.models.container_resources.ContainerResources( + limits = { + 'key' : None + }, + requests = { + 'key' : None + }, ), + run_flux = True, + secrets = { + 'key' : fluxoperator.models.secret.Secret( + key = '', + name = '', ) + }, + security_context = fluxoperator.models.security_context.SecurityContext( + add_capabilities = [ + '' + ], + privileged = True, ), + volumes = { + 'key' : fluxoperator.models.container_volume.ContainerVolume( + path = '', + read_only = True, ) + }, + working_dir = '', ) + ], + deadline_seconds = 56, + flux = fluxoperator.models.flux_spec.FluxSpec( + broker_config = '', + bursting = fluxoperator.models.bursting.Bursting( + clusters = [ + fluxoperator.models.bursted_cluster.BurstedCluster( + name = '', + size = 56, ) + ], + hostlist = '', + lead_broker = fluxoperator.models.flux_broker.FluxBroker( + address = '', + name = '', + port = 56, + size = 56, ), ), + connect_timeout = '5s', + curve_cert = '', + curve_cert_secret = '', + install_root = '/usr', + log_level = 56, + minimal_service = True, + munge_secret = '', + option_flags = '', + wrap = '', ), + flux_restful = fluxoperator.models.flux_restful.FluxRestful( + branch = 'main', + port = 56, + secret_key = '', + token = '', + username = '', ), + interactive = True, + job_labels = { + 'key' : '' + }, + logging = fluxoperator.models.logging_spec.LoggingSpec( + debug = True, + quiet = True, + strict = True, + timed = True, + zeromq = True, ), + max_size = 56, + network = fluxoperator.models.network.Network( + headless_name = 'flux-service', ), + pod = fluxoperator.models.pod_spec.PodSpec( + annotations = { + 'key' : '' + }, + labels = { + 'key' : '' + }, + node_selector = { + 'key' : '' + }, + service_account_name = '', ), + services = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - cores=56, - diagnostics=True, - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - logs="", - name="", - pre_command="", - pull_always=True, - run_flux=True, - working_dir="", - ) - ], - size=56, - tasks=56, - users=[ + batch = True, + batch_raw = True, + command = '', + cores = 56, + diagnostics = True, + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + logs = '', + name = '', + pull_always = True, + run_flux = True, + working_dir = '', ) + ], + share_process_namespace = True, + size = 56, + tasks = 56, + users = [ fluxoperator.models.mini_cluster_user.MiniClusterUser( - name="", - password="", - ) - ], - volumes={ - "key": fluxoperator.models.mini_cluster_volume.MiniClusterVolume( - attributes={"key": ""}, - capacity="5Gi", - claim_annotations={"key": ""}, - delete=True, - driver="", - path="", - secret="", - secret_namespace="default", - storage_class="hostpath", - volume_handle="", - ) - }, - ), - status=fluxoperator.models.mini_cluster_status.MiniClusterStatus( - conditions=[None], - jobid="", - maximum_size=56, - ), - ) - ], - ) + name = '', + password = '', ) + ], + volumes = { + 'key' : fluxoperator.models.mini_cluster_volume.MiniClusterVolume( + attributes = { + 'key' : '' + }, + capacity = '5Gi', + claim_annotations = { + 'key' : '' + }, + delete = True, + driver = '', + path = '', + secret = '', + secret_namespace = 'default', + storage_class = 'hostpath', + volume_handle = '', ) + }, ), + status = fluxoperator.models.mini_cluster_status.MiniClusterStatus( + conditions = [ + None + ], + jobid = '', + maximum_size = 56, + selector = '', + size = 56, ), ) + ], + ) def testMiniClusterList(self): """Test MiniClusterList""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_spec.py b/sdk/python/v1alpha1/test/test_mini_cluster_spec.py index cebf8d93..79afc91c 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_spec.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_spec.py @@ -19,7 +19,6 @@ from fluxoperator.models.mini_cluster_spec import MiniClusterSpec # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterSpec(unittest.TestCase): """MiniClusterSpec unit test stubs""" @@ -31,251 +30,319 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterSpec - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_spec.MiniClusterSpec() # noqa: E501 - if include_optional: + if include_optional : return MiniClusterSpec( - archive=fluxoperator.models.mini_cluster_archive.MiniClusterArchive( - path="", - ), - cleanup=True, - containers=[ + archive = fluxoperator.models.mini_cluster_archive.MiniClusterArchive( + path = '', ), + cleanup = True, + containers = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - commands=fluxoperator.models.commands.Commands( - broker_pre="", - init="", - post="", - pre="", - prefix="", - run_flux_as_root=True, - worker_pre="", - ), - cores=56, - diagnostics=True, - environment={"key": ""}, - existing_volumes={ - "key": fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( - claim_name="", - path="", - read_only=True, - ) - }, - flux_user=fluxoperator.models.flux_user.FluxUser( - name="flux", - uid=56, - ), - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - life_cycle=fluxoperator.models.life_cycle.LifeCycle( - post_start_exec="", - pre_stop_exec="", - ), - logs="", - name="", - ports=[56], - pre_command="", - pull_always=True, - resources=fluxoperator.models.container_resources.ContainerResources( - limits={"key": None}, - requests={"key": None}, - ), - run_flux=True, - security_context=fluxoperator.models.security_context.SecurityContext( - privileged=True, - ), - volumes={ - "key": fluxoperator.models.container_volume.ContainerVolume( - path="", - read_only=True, - ) - }, - working_dir="", - ) - ], - deadline_seconds=56, - flux=fluxoperator.models.flux_spec.FluxSpec( - connect_timeout="5s", - log_level=56, - option_flags="", - ), - flux_restful=fluxoperator.models.flux_restful.FluxRestful( - branch="main", - port=56, - secret_key="", - token="", - username="", - ), - interactive=True, - job_labels={"key": ""}, - logging=fluxoperator.models.logging_spec.LoggingSpec( - debug=True, - quiet=True, - strict=True, - timed=True, - zeromq=True, - ), - max_size=56, - pod=fluxoperator.models.pod_spec.PodSpec( - annotations={"key": ""}, - labels={"key": ""}, - node_selector={"key": ""}, - resources={"key": None}, - service_account_name="", - ), - services=[ + batch = True, + batch_raw = True, + command = '', + commands = fluxoperator.models.commands.Commands( + broker_pre = '', + init = '', + post = '', + pre = '', + prefix = '', + run_flux_as_root = True, + worker_pre = '', ), + cores = 56, + diagnostics = True, + environment = { + 'key' : '' + }, + existing_volumes = { + 'key' : fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '', ) + }, + flux_user = fluxoperator.models.flux_user.FluxUser( + name = 'flux', + uid = 56, ), + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + life_cycle = fluxoperator.models.life_cycle.LifeCycle( + post_start_exec = '', + pre_stop_exec = '', ), + logs = '', + name = '', + ports = [ + 56 + ], + pull_always = True, + resources = fluxoperator.models.container_resources.ContainerResources( + limits = { + 'key' : None + }, + requests = { + 'key' : None + }, ), + run_flux = True, + secrets = { + 'key' : fluxoperator.models.secret.Secret( + key = '', + name = '', ) + }, + security_context = fluxoperator.models.security_context.SecurityContext( + add_capabilities = [ + '' + ], + privileged = True, ), + volumes = { + 'key' : fluxoperator.models.container_volume.ContainerVolume( + path = '', + read_only = True, ) + }, + working_dir = '', ) + ], + deadline_seconds = 56, + flux = fluxoperator.models.flux_spec.FluxSpec( + broker_config = '', + bursting = fluxoperator.models.bursting.Bursting( + clusters = [ + fluxoperator.models.bursted_cluster.BurstedCluster( + name = '', + size = 56, ) + ], + hostlist = '', + lead_broker = fluxoperator.models.flux_broker.FluxBroker( + address = '', + name = '', + port = 56, + size = 56, ), ), + connect_timeout = '5s', + curve_cert = '', + curve_cert_secret = '', + install_root = '/usr', + log_level = 56, + minimal_service = True, + munge_secret = '', + option_flags = '', + wrap = '', ), + flux_restful = fluxoperator.models.flux_restful.FluxRestful( + branch = 'main', + port = 56, + secret_key = '', + token = '', + username = '', ), + interactive = True, + job_labels = { + 'key' : '' + }, + logging = fluxoperator.models.logging_spec.LoggingSpec( + debug = True, + quiet = True, + strict = True, + timed = True, + zeromq = True, ), + max_size = 56, + network = fluxoperator.models.network.Network( + headless_name = 'flux-service', ), + pod = fluxoperator.models.pod_spec.PodSpec( + annotations = { + 'key' : '' + }, + labels = { + 'key' : '' + }, + node_selector = { + 'key' : '' + }, + resources = { + 'key' : None + }, + service_account_name = '', ), + services = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - commands=fluxoperator.models.commands.Commands( - broker_pre="", - init="", - post="", - pre="", - prefix="", - run_flux_as_root=True, - worker_pre="", - ), - cores=56, - diagnostics=True, - environment={"key": ""}, - existing_volumes={ - "key": fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( - claim_name="", - path="", - read_only=True, - ) - }, - flux_user=fluxoperator.models.flux_user.FluxUser( - name="flux", - uid=56, - ), - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - life_cycle=fluxoperator.models.life_cycle.LifeCycle( - post_start_exec="", - pre_stop_exec="", - ), - logs="", - name="", - ports=[56], - pre_command="", - pull_always=True, - resources=fluxoperator.models.container_resources.ContainerResources( - limits={"key": None}, - requests={"key": None}, - ), - run_flux=True, - security_context=fluxoperator.models.security_context.SecurityContext( - privileged=True, - ), - volumes={ - "key": fluxoperator.models.container_volume.ContainerVolume( - path="", - read_only=True, - ) - }, - working_dir="", - ) - ], - size=56, - tasks=56, - users=[ + batch = True, + batch_raw = True, + command = '', + commands = fluxoperator.models.commands.Commands( + broker_pre = '', + init = '', + post = '', + pre = '', + prefix = '', + run_flux_as_root = True, + worker_pre = '', ), + cores = 56, + diagnostics = True, + environment = { + 'key' : '' + }, + existing_volumes = { + 'key' : fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '', ) + }, + flux_user = fluxoperator.models.flux_user.FluxUser( + name = 'flux', + uid = 56, ), + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + life_cycle = fluxoperator.models.life_cycle.LifeCycle( + post_start_exec = '', + pre_stop_exec = '', ), + logs = '', + name = '', + ports = [ + 56 + ], + pull_always = True, + resources = fluxoperator.models.container_resources.ContainerResources( + limits = { + 'key' : None + }, + requests = { + 'key' : None + }, ), + run_flux = True, + secrets = { + 'key' : fluxoperator.models.secret.Secret( + key = '', + name = '', ) + }, + security_context = fluxoperator.models.security_context.SecurityContext( + add_capabilities = [ + '' + ], + privileged = True, ), + volumes = { + 'key' : fluxoperator.models.container_volume.ContainerVolume( + path = '', + read_only = True, ) + }, + working_dir = '', ) + ], + share_process_namespace = True, + size = 56, + tasks = 56, + users = [ fluxoperator.models.mini_cluster_user.MiniClusterUser( - name="", - password="", - ) - ], - volumes={ - "key": fluxoperator.models.mini_cluster_volume.MiniClusterVolume( - annotations={"key": ""}, - attributes={"key": ""}, - capacity="5Gi", - claim_annotations={"key": ""}, - delete=True, - driver="", - labels={"key": ""}, - path="", - secret="", - secret_namespace="default", - storage_class="hostpath", - volume_handle="", - ) - }, + name = '', + password = '', ) + ], + volumes = { + 'key' : fluxoperator.models.mini_cluster_volume.MiniClusterVolume( + annotations = { + 'key' : '' + }, + attributes = { + 'key' : '' + }, + capacity = '5Gi', + claim_annotations = { + 'key' : '' + }, + delete = True, + driver = '', + labels = { + 'key' : '' + }, + path = '', + secret = '', + secret_namespace = 'default', + storage_class = 'hostpath', + volume_handle = '', ) + } ) - else: + else : return MiniClusterSpec( - containers=[ + containers = [ fluxoperator.models.mini_cluster_container.MiniClusterContainer( - batch=True, - batch_raw=True, - command="", - commands=fluxoperator.models.commands.Commands( - broker_pre="", - init="", - post="", - pre="", - prefix="", - run_flux_as_root=True, - worker_pre="", - ), - cores=56, - diagnostics=True, - environment={"key": ""}, - existing_volumes={ - "key": fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( - claim_name="", - path="", - read_only=True, - ) - }, - flux_user=fluxoperator.models.flux_user.FluxUser( - name="flux", - uid=56, - ), - image="ghcr.io/rse-ops/accounting:app-latest", - image_pull_secret="", - launcher=True, - life_cycle=fluxoperator.models.life_cycle.LifeCycle( - post_start_exec="", - pre_stop_exec="", - ), - logs="", - name="", - ports=[56], - pre_command="", - pull_always=True, - resources=fluxoperator.models.container_resources.ContainerResources( - limits={"key": None}, - requests={"key": None}, - ), - run_flux=True, - security_context=fluxoperator.models.security_context.SecurityContext( - privileged=True, - ), - volumes={ - "key": fluxoperator.models.container_volume.ContainerVolume( - path="", - read_only=True, - ) - }, - working_dir="", - ) - ], - ) + batch = True, + batch_raw = True, + command = '', + commands = fluxoperator.models.commands.Commands( + broker_pre = '', + init = '', + post = '', + pre = '', + prefix = '', + run_flux_as_root = True, + worker_pre = '', ), + cores = 56, + diagnostics = True, + environment = { + 'key' : '' + }, + existing_volumes = { + 'key' : fluxoperator.models.mini_cluster_existing_volume.MiniClusterExistingVolume( + claim_name = '', + config_map_name = '', + items = { + 'key' : '' + }, + path = '', + read_only = True, + secret_name = '', ) + }, + flux_user = fluxoperator.models.flux_user.FluxUser( + name = 'flux', + uid = 56, ), + image = 'ghcr.io/rse-ops/accounting:app-latest', + image_pull_secret = '', + launcher = True, + life_cycle = fluxoperator.models.life_cycle.LifeCycle( + post_start_exec = '', + pre_stop_exec = '', ), + logs = '', + name = '', + ports = [ + 56 + ], + pull_always = True, + resources = fluxoperator.models.container_resources.ContainerResources( + limits = { + 'key' : None + }, + requests = { + 'key' : None + }, ), + run_flux = True, + secrets = { + 'key' : fluxoperator.models.secret.Secret( + key = '', + name = '', ) + }, + security_context = fluxoperator.models.security_context.SecurityContext( + add_capabilities = [ + '' + ], + privileged = True, ), + volumes = { + 'key' : fluxoperator.models.container_volume.ContainerVolume( + path = '', + read_only = True, ) + }, + working_dir = '', ) + ], + ) def testMiniClusterSpec(self): """Test MiniClusterSpec""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_status.py b/sdk/python/v1alpha1/test/test_mini_cluster_status.py index ece80446..2444d30d 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_status.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_status.py @@ -19,7 +19,6 @@ from fluxoperator.models.mini_cluster_status import MiniClusterStatus # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterStatus(unittest.TestCase): """MiniClusterStatus unit test stubs""" @@ -31,23 +30,32 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterStatus - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_status.MiniClusterStatus() # noqa: E501 - if include_optional: - return MiniClusterStatus(conditions=[None], jobid="", maximum_size=56) - else: + if include_optional : return MiniClusterStatus( - jobid="", - maximum_size=56, + conditions = [ + None + ], + jobid = '', + maximum_size = 56, + selector = '', + size = 56 ) + else : + return MiniClusterStatus( + jobid = '', + maximum_size = 56, + selector = '', + size = 56, + ) def testMiniClusterStatus(self): """Test MiniClusterStatus""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_user.py b/sdk/python/v1alpha1/test/test_mini_cluster_user.py index daebfd43..12887066 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_user.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_user.py @@ -19,7 +19,6 @@ from fluxoperator.models.mini_cluster_user import MiniClusterUser # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterUser(unittest.TestCase): """MiniClusterUser unit test stubs""" @@ -31,22 +30,24 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterUser - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_user.MiniClusterUser() # noqa: E501 - if include_optional: - return MiniClusterUser(name="", password="") - else: + if include_optional : return MiniClusterUser( - name="", + name = '', + password = '' ) + else : + return MiniClusterUser( + name = '', + ) def testMiniClusterUser(self): """Test MiniClusterUser""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/sdk/python/v1alpha1/test/test_mini_cluster_volume.py b/sdk/python/v1alpha1/test/test_mini_cluster_volume.py index 432a8632..efe9b200 100644 --- a/sdk/python/v1alpha1/test/test_mini_cluster_volume.py +++ b/sdk/python/v1alpha1/test/test_mini_cluster_volume.py @@ -19,7 +19,6 @@ from fluxoperator.models.mini_cluster_volume import MiniClusterVolume # noqa: E501 from fluxoperator.rest import ApiException - class TestMiniClusterVolume(unittest.TestCase): """MiniClusterVolume unit test stubs""" @@ -31,35 +30,42 @@ def tearDown(self): def make_instance(self, include_optional): """Test MiniClusterVolume - include_option is a boolean, when False only required - params are included, when True both required and - optional params are included""" + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ # model = fluxoperator.models.mini_cluster_volume.MiniClusterVolume() # noqa: E501 - if include_optional: + if include_optional : return MiniClusterVolume( - annotations={"key": ""}, - attributes={"key": ""}, - capacity="5Gi", - claim_annotations={"key": ""}, - delete=True, - driver="", - labels={"key": ""}, - path="", - secret="", - secret_namespace="default", - storage_class="hostpath", - volume_handle="", + annotations = { + 'key' : '' + }, + attributes = { + 'key' : '' + }, + capacity = '5Gi', + claim_annotations = { + 'key' : '' + }, + delete = True, + driver = '', + labels = { + 'key' : '' + }, + path = '', + secret = '', + secret_namespace = 'default', + storage_class = 'hostpath', + volume_handle = '' ) - else: + else : return MiniClusterVolume( - path="", - ) + path = '', + ) def testMiniClusterVolume(self): """Test MiniClusterVolume""" inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True) - -if __name__ == "__main__": +if __name__ == '__main__': unittest.main()