From ee2d9743551c82635d411da878389ffbd9f5f655 Mon Sep 17 00:00:00 2001 From: Jose Porrua Date: Fri, 30 Jun 2023 21:35:02 -0400 Subject: [PATCH 01/17] Modify SCC allowed capabilities for OCP 4.13 (#1389) --- cli/k8s_client/yaml_factory.go | 2 ++ cli/k8s_client/yaml_factory_test.go | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cli/k8s_client/yaml_factory.go b/cli/k8s_client/yaml_factory.go index 91a98a1bf..77ef21ada 100644 --- a/cli/k8s_client/yaml_factory.go +++ b/cli/k8s_client/yaml_factory.go @@ -1364,6 +1364,8 @@ allowHostPID: true allowHostPorts: false allowPrivilegeEscalation: true allowPrivilegedContainer: true +allowedCapabilities: +- SYS_ADMIN allowedUnsafeSysctls: null defaultAddCapabilities: null fsGroup: diff --git a/cli/k8s_client/yaml_factory_test.go b/cli/k8s_client/yaml_factory_test.go index 6ed74c5dd..1c0f90d4e 100644 --- a/cli/k8s_client/yaml_factory_test.go +++ b/cli/k8s_client/yaml_factory_test.go @@ -981,7 +981,10 @@ func TestGetOpenShiftSCCYAML(t *testing.T) { AllowHostPorts: false, AllowPrivilegeEscalation: &allowPrivilegeEscalation, AllowPrivilegedContainer: true, - DefaultAddCapabilities: nil, + AllowedCapabilities: []v1.Capability{ + "SYS_ADMIN", + }, + DefaultAddCapabilities: nil, FSGroup: scc.FSGroupStrategyOptions{ Type: "RunAsAny", }, From c2683f925f81a1df203c8033cc70ffc64b1a0d81 Mon Sep 17 00:00:00 2001 From: reederc42 Date: Mon, 3 Jul 2023 14:44:20 -0700 Subject: [PATCH 02/17] Skips single platform retagging if images are not in default context --- Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index bc049bc6a..303ae6dd9 100644 --- a/Makefile +++ b/Makefile @@ -323,20 +323,24 @@ ifeq ($(BUILD_CLI),$(DOCKER_BUILDX_BUILD_CLI)) -@$(call buildx_create_instance,$(BUILDX_CONFIG_FILE)) endif @$(call build_images_for_platforms,$(call all_image_platforms,$(PLATFORMS)),$(BUILD_CLI),$(TRIDENT_TAG),$(BUILDX_OUTPUT)) -# if a single image platform is specified, retag image without platform +# if a single image platform is specified and the BUILD_CLI places images in the default context, retag image without platform ifeq (1,$(words $(call all_image_platforms,$(PLATFORMS)))) +ifneq (,$(if $(findstring $(DOCKER_BUILDX_BUILD_CLI),$(BUILD_CLI)),$(findstring load,$(BUILDX_OUTPUT)),true)) @$(DOCKER_CLI) tag $(call image_tag,$(TRIDENT_TAG),$(call all_image_platforms,$(PLATFORMS))) $(MANIFEST_TAG) endif +endif operator_images: operator_binaries ifeq ($(BUILD_CLI),$(DOCKER_BUILDX_BUILD_CLI)) -@$(call buildx_create_instance,$(BUILDX_CONFIG_FILE)) endif @$(call build_operator_images_for_platforms,$(call operator_image_platforms,$(PLATFORMS)),$(BUILD_CLI),$(OPERATOR_TAG),$(BUILDX_OUTPUT)) -# if a single operator image platform is specified, retag image without platform +# if a single operator image platform is specified and the BUILD_CLI places images in the default context, retag image without platform ifeq (1,$(words $(call operator_image_platforms,$(PLATFORMS)))) +ifneq (,$(if $(findstring $(DOCKER_BUILDX_BUILD_CLI),$(BUILD_CLI)),$(findstring load,$(BUILDX_OUTPUT)),true)) @$(DOCKER_CLI) tag $(call image_tag,$(OPERATOR_TAG),$(call operator_image_platforms,$(PLATFORMS))) $(OPERATOR_MANIFEST_TAG) endif +endif # creates multi-platform image manifest manifest: images From db499f3f98bed3c5274397ae6689a7e9a4dc963c Mon Sep 17 00:00:00 2001 From: Gaurav Bhatnagar <103039430+bhatnag@users.noreply.github.com> Date: Thu, 6 Jul 2023 14:55:05 +0530 Subject: [PATCH 03/17] Fixing NVMe bugs 1. Failure to attach a pod after trident controller reset 2. Volume publish info remains even after NVMe volume unstage is successful 3. Subsystem is deleted when one of the pod attached to same PVC is deleted in multi-node setup --- core/orchestrator_core.go | 7 + frontend/csi/node_server.go | 2 + .../mock_ontap/mock_api.go | 27 ++- .../mock_ontap/mock_ontap_rest_interface.go | 14 ++ persistent_store/crd/apis/netapp/v1/node.go | 2 + persistent_store/crd/apis/netapp/v1/types.go | 2 + storage_drivers/ontap/api/abstraction.go | 3 +- storage_drivers/ontap/api/abstraction_rest.go | 144 +++++++++++----- .../ontap/api/abstraction_rest_test.go | 160 +++++++++++++++--- storage_drivers/ontap/api/abstraction_zapi.go | 8 +- storage_drivers/ontap/api/ontap_rest.go | 37 +++- .../ontap/api/ontap_rest_interface.go | 1 + storage_drivers/ontap/ontap_san_nvme.go | 9 +- storage_drivers/ontap/ontap_san_nvme_test.go | 4 +- 14 files changed, 330 insertions(+), 90 deletions(-) diff --git a/core/orchestrator_core.go b/core/orchestrator_core.go index 91b7f6eed..f8ce83119 100644 --- a/core/orchestrator_core.go +++ b/core/orchestrator_core.go @@ -3271,9 +3271,16 @@ func (o *TridentOrchestrator) unpublishVolume(ctx context.Context, volumeName, n return fmt.Errorf(msg) } + // Get node attributes from the node ID + nodeInfo, err := o.GetNode(ctx, nodeName) + if err != nil { + Logc(ctx).WithError(err).WithField("Node info not found for node ", nodeName) + return err + } publishInfo := &utils.VolumePublishInfo{ HostName: nodeName, TridentUUID: o.uuid, + HostNQN: nodeInfo.NQN, } volume, ok := o.subordinateVolumes[volumeName] diff --git a/frontend/csi/node_server.go b/frontend/csi/node_server.go index 1f5997163..69ea67f5d 100644 --- a/frontend/csi/node_server.go +++ b/frontend/csi/node_server.go @@ -695,6 +695,8 @@ func (p *Plugin) nodeGetInfo(ctx context.Context) *utils.Node { nvmeNQN, err = p.nvmeHandler.GetHostNqn(ctx) if err != nil { Logc(ctx).WithError(err).Warn("Problem getting Host NQN.") + } else { + Logc(ctx).WithField("NQN", nvmeNQN).Debug("Discovered NQN.") } } else { Logc(ctx).Info("NVMe is not active on this host.") diff --git a/mocks/mock_storage_drivers/mock_ontap/mock_api.go b/mocks/mock_storage_drivers/mock_ontap/mock_api.go index 9e456fdf3..5dc1a05b1 100644 --- a/mocks/mock_storage_drivers/mock_ontap/mock_api.go +++ b/mocks/mock_storage_drivers/mock_ontap/mock_api.go @@ -992,17 +992,18 @@ func (mr *MockOntapAPIMockRecorder) NVMeEnsureNamespaceMapped(arg0, arg1, arg2 i } // NVMeEnsureNamespaceUnmapped mocks base method. -func (m *MockOntapAPI) NVMeEnsureNamespaceUnmapped(arg0 context.Context, arg1, arg2 string) error { +func (m *MockOntapAPI) NVMeEnsureNamespaceUnmapped(arg0 context.Context, arg1, arg2, arg3 string) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NVMeEnsureNamespaceUnmapped", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "NVMeEnsureNamespaceUnmapped", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 } // NVMeEnsureNamespaceUnmapped indicates an expected call of NVMeEnsureNamespaceUnmapped. -func (mr *MockOntapAPIMockRecorder) NVMeEnsureNamespaceUnmapped(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOntapAPIMockRecorder) NVMeEnsureNamespaceUnmapped(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeEnsureNamespaceUnmapped", reflect.TypeOf((*MockOntapAPI)(nil).NVMeEnsureNamespaceUnmapped), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeEnsureNamespaceUnmapped", reflect.TypeOf((*MockOntapAPI)(nil).NVMeEnsureNamespaceUnmapped), arg0, arg1, arg2, arg3) } // NVMeIsNamespaceMapped mocks base method. @@ -1094,6 +1095,20 @@ func (mr *MockOntapAPIMockRecorder) NVMeNamespaceSetSize(arg0, arg1, arg2 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeNamespaceSetSize", reflect.TypeOf((*MockOntapAPI)(nil).NVMeNamespaceSetSize), arg0, arg1, arg2) } +// NVMeRemoveHostFromSubsystem mocks base method. +func (m *MockOntapAPI) NVMeRemoveHostFromSubsystem(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NVMeRemoveHostFromSubsystem", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NVMeRemoveHostFromSubsystem indicates an expected call of NVMeRemoveHostFromSubsystem. +func (mr *MockOntapAPIMockRecorder) NVMeRemoveHostFromSubsystem(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeRemoveHostFromSubsystem", reflect.TypeOf((*MockOntapAPI)(nil).NVMeRemoveHostFromSubsystem), arg0, arg1, arg2) +} + // NVMeSubsystemAddNamespace mocks base method. func (m *MockOntapAPI) NVMeSubsystemAddNamespace(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() diff --git a/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go b/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go index 39c134de6..f0057935c 100644 --- a/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go +++ b/mocks/mock_storage_drivers/mock_ontap/mock_ontap_rest_interface.go @@ -1156,6 +1156,20 @@ func (mr *MockRestClientInterfaceMockRecorder) NVMeNamespaceSize(arg0, arg1 inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeNamespaceSize", reflect.TypeOf((*MockRestClientInterface)(nil).NVMeNamespaceSize), arg0, arg1) } +// NVMeRemoveHostFromSubsystem mocks base method. +func (m *MockRestClientInterface) NVMeRemoveHostFromSubsystem(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NVMeRemoveHostFromSubsystem", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NVMeRemoveHostFromSubsystem indicates an expected call of NVMeRemoveHostFromSubsystem. +func (mr *MockRestClientInterfaceMockRecorder) NVMeRemoveHostFromSubsystem(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NVMeRemoveHostFromSubsystem", reflect.TypeOf((*MockRestClientInterface)(nil).NVMeRemoveHostFromSubsystem), arg0, arg1, arg2) +} + // NVMeSubsystemAddNamespace mocks base method. func (m *MockRestClientInterface) NVMeSubsystemAddNamespace(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() diff --git a/persistent_store/crd/apis/netapp/v1/node.go b/persistent_store/crd/apis/netapp/v1/node.go index 3ec951f66..c62112c09 100644 --- a/persistent_store/crd/apis/netapp/v1/node.go +++ b/persistent_store/crd/apis/netapp/v1/node.go @@ -39,6 +39,7 @@ func (in *TridentNode) Apply(persistent *utils.Node) error { in.Name = persistent.Name in.IQN = persistent.IQN + in.NQN = persistent.NQN in.IPs = persistent.IPs in.Deleted = persistent.Deleted in.PublicationState = string(persistent.PublicationState) @@ -68,6 +69,7 @@ func (in *TridentNode) Persistent() (*utils.Node, error) { persistent := &utils.Node{ Name: in.Name, IQN: in.IQN, + NQN: in.NQN, IPs: in.IPs, NodePrep: &utils.NodePrep{}, HostInfo: &utils.HostSystem{}, diff --git a/persistent_store/crd/apis/netapp/v1/types.go b/persistent_store/crd/apis/netapp/v1/types.go index 31a673483..3d105fe92 100644 --- a/persistent_store/crd/apis/netapp/v1/types.go +++ b/persistent_store/crd/apis/netapp/v1/types.go @@ -362,6 +362,8 @@ type TridentNode struct { NodeName string `json:"name"` // IQN is the iqn of the node IQN string `json:"iqn,omitempty"` + // NQN is the nqn of the node + NQN string `json:"nqn,omitempty"` // IPs is a list of IP addresses for the TridentNode IPs []string `json:"ips,omitempty"` // NodePrep is the current status of node preparation for this node diff --git a/storage_drivers/ontap/api/abstraction.go b/storage_drivers/ontap/api/abstraction.go index 7bf4afc48..f748b397a 100644 --- a/storage_drivers/ontap/api/abstraction.go +++ b/storage_drivers/ontap/api/abstraction.go @@ -230,10 +230,11 @@ type OntapAPI interface { NVMeSubsystemAddNamespace(ctx context.Context, subsystemUUID, nsUUID string) error NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUUID, nsUUID string) error NVMeAddHostToSubsystem(ctx context.Context, hostNQN, subsUUID string) error + NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsUUID string) error NVMeSubsystemGetNamespaceCount(ctx context.Context, subsysUUID string) (int64, error) NVMeIsNamespaceMapped(ctx context.Context, subsysUUID, nsUUID string) (bool, error) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUUID, nsUUID string) error - NVMeEnsureNamespaceUnmapped(ctx context.Context, subsytemUUID, nsUUID string) error + NVMeEnsureNamespaceUnmapped(ctx context.Context, hostNQN, subsytemUUID, nsUUID string) (bool, error) } type AggregateSpace interface { diff --git a/storage_drivers/ontap/api/abstraction_rest.go b/storage_drivers/ontap/api/abstraction_rest.go index 1f22ccfaa..e89789f66 100644 --- a/storage_drivers/ontap/api/abstraction_rest.go +++ b/storage_drivers/ontap/api/abstraction_rest.go @@ -143,7 +143,7 @@ func (d OntapAPIREST) ValidateAPIVersion(ctx context.Context) error { // Make sure we're using a valid ONTAP version ontapVersion, err := d.APIVersion(ctx) if err != nil { - return fmt.Errorf("could not determine Data ONTAP version: %v", err) + return fmt.Errorf("could not determine Data ONTAP version; %v", err) } Logc(ctx).WithField("ontapVersion", ontapVersion).Debug("ONTAP version.") @@ -534,14 +534,14 @@ func (d OntapAPIREST) FlexgroupCreate(ctx context.Context, volume Volume) error func (d OntapAPIREST) FlexgroupCloneSplitStart(ctx context.Context, cloneName string) error { if err := d.api.FlexgroupCloneSplitStart(ctx, cloneName); err != nil { - return fmt.Errorf("error splitting clone: %v", err) + return fmt.Errorf("error splitting clone; %v", err) } return nil } func (d OntapAPIREST) FlexgroupDisableSnapshotDirectoryAccess(ctx context.Context, volumeName string) error { if err := d.api.FlexGroupVolumeDisableSnapshotDirectoryAccess(ctx, volumeName); err != nil { - return fmt.Errorf("error disabling snapshot directory access: %v", err) + return fmt.Errorf("error disabling snapshot directory access; %v", err) } return nil @@ -571,7 +571,7 @@ func (d OntapAPIREST) FlexgroupSetComment( ctx context.Context, volumeNameInternal, volumeNameExternal, comment string, ) error { if err := d.api.FlexGroupSetComment(ctx, volumeNameInternal, comment); err != nil { - Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed: %v", err) + Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } return nil @@ -579,7 +579,7 @@ func (d OntapAPIREST) FlexgroupSetComment( func (d OntapAPIREST) FlexgroupSetQosPolicyGroupName(ctx context.Context, name string, qos QosPolicyGroup) error { if err := d.api.FlexgroupSetQosPolicyGroupName(ctx, name, qos); err != nil { - return fmt.Errorf("error setting quality of service policy: %v", err) + return fmt.Errorf("error setting quality of service policy; %v", err) } return nil @@ -588,14 +588,14 @@ func (d OntapAPIREST) FlexgroupSetQosPolicyGroupName(ctx context.Context, name s func (d OntapAPIREST) FlexgroupSnapshotCreate(ctx context.Context, snapshotName, sourceVolume string) error { volume, err := d.FlexgroupInfo(ctx, sourceVolume) if err != nil { - return fmt.Errorf("error looking up source volume %v: %v", sourceVolume, err) + return fmt.Errorf("error looking up source volume %v; %v", sourceVolume, err) } if volume == nil { return fmt.Errorf("error looking up source volume: %v", sourceVolume) } if err = d.api.SnapshotCreateAndWait(ctx, volume.UUID, snapshotName); err != nil { - return fmt.Errorf("could not create snapshot: %v", err) + return fmt.Errorf("could not create snapshot; %v", err) } return nil } @@ -603,7 +603,7 @@ func (d OntapAPIREST) FlexgroupSnapshotCreate(ctx context.Context, snapshotName, func (d OntapAPIREST) FlexgroupSnapshotList(ctx context.Context, sourceVolume string) (Snapshots, error) { volume, err := d.FlexgroupInfo(ctx, sourceVolume) if err != nil { - return nil, fmt.Errorf("error looking up source volume: %v", err) + return nil, fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return nil, fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -611,7 +611,7 @@ func (d OntapAPIREST) FlexgroupSnapshotList(ctx context.Context, sourceVolume st snapListResponse, err := d.api.SnapshotList(ctx, volume.UUID) if err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) + return nil, fmt.Errorf("error enumerating snapshots; %v", err) } if snapListResponse == nil { return nil, fmt.Errorf("error enumerating snapshots") @@ -641,7 +641,7 @@ func (d OntapAPIREST) FlexgroupModifyUnixPermissions( err := d.api.FlexGroupModifyUnixPermissions(ctx, volumeNameInternal, unixPermissions) if err != nil { Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Could not import volume, "+ - "modifying unix permissions failed: %v", err) + "modifying unix permissions failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } @@ -769,7 +769,7 @@ func (d OntapAPIREST) GetSVMAggregateAttributes(ctx context.Context) (aggrList m func (d OntapAPIREST) ExportPolicyDestroy(ctx context.Context, policy string) error { exportPolicyDestroyResult, err := d.api.ExportPolicyDestroy(ctx, policy) if err != nil { - return fmt.Errorf("error deleting export policy: %v", err) + return fmt.Errorf("error deleting export policy; %v", err) } if exportPolicyDestroyResult == nil { return fmt.Errorf("error deleting export policy") @@ -867,7 +867,7 @@ func (d OntapAPIREST) GetSVMAggregateSpace(ctx context.Context, aggregate string func (d OntapAPIREST) VolumeDisableSnapshotDirectoryAccess(ctx context.Context, name string) error { if err := d.api.VolumeDisableSnapshotDirectoryAccess(ctx, name); err != nil { - return fmt.Errorf("error disabling snapshot directory access: %v", err) + return fmt.Errorf("error disabling snapshot directory access; %v", err) } return nil @@ -896,7 +896,7 @@ func (d OntapAPIREST) VolumeSetComment( ctx context.Context, volumeNameInternal, volumeNameExternal, comment string, ) error { if err := d.api.VolumeSetComment(ctx, volumeNameInternal, comment); err != nil { - Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed: %v", err) + Logc(ctx).WithField("originalName", volumeNameExternal).Errorf("Modifying comment failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } return nil @@ -948,7 +948,7 @@ func (d OntapAPIREST) VolumeModifyUnixPermissions( err := d.api.VolumeModifyUnixPermissions(ctx, volumeNameInternal, unixPermissions) if err != nil { Logc(ctx).WithField("originalName", volumeNameExternal).Errorf( - "Could not import volume, modifying unix permissions failed: %v", err) + "Could not import volume, modifying unix permissions failed; %v", err) return fmt.Errorf("volume %s modify failed: %v", volumeNameExternal, err) } @@ -1016,7 +1016,7 @@ func (d OntapAPIREST) ExportRuleCreate(ctx context.Context, policyName, desiredP ruleResponse, err = d.api.ExportRuleCreate(ctx, policyName, desiredPolicyRule, protocol, []string{"any"}, []string{"any"}, []string{"any"}) if err != nil { - err = fmt.Errorf("error creating export rule: %v", err) + err = fmt.Errorf("error creating export rule; %v", err) Logc(ctx).WithFields(LogFields{ "ExportPolicy": policyName, "ClientMatch": desiredPolicyRule, @@ -1074,7 +1074,7 @@ func (d OntapAPIREST) ExportPolicyExists(ctx context.Context, policyName string) func (d OntapAPIREST) ExportRuleList(ctx context.Context, policyName string) (map[string]int, error) { ruleListResponse, err := d.api.ExportRuleList(ctx, policyName) if err != nil { - return nil, fmt.Errorf("error listing export policy rules: %v", err) + return nil, fmt.Errorf("error listing export policy rules; %v", err) } rules := make(map[string]int) @@ -1130,7 +1130,7 @@ func (d OntapAPIREST) QtreeCount(ctx context.Context, volumeName string) (int, e func (d OntapAPIREST) QtreeListByPrefix(ctx context.Context, prefix, volumePrefix string) (Qtrees, error) { qtreeList, err := d.api.QtreeList(ctx, prefix, volumePrefix) if err != nil { - msg := fmt.Sprintf("Error listing qtrees. %v", err) + msg := fmt.Sprintf("Error listing qtrees; %v", err) Logc(ctx).Errorf(msg) return nil, fmt.Errorf(msg) } @@ -1286,7 +1286,7 @@ func (d OntapAPIREST) VolumeSnapshotCreate(ctx context.Context, snapshotName, so } if err = d.api.SnapshotCreateAndWait(ctx, volume.UUID, snapshotName); err != nil { - return fmt.Errorf("could not create snapshot: %v", err) + return fmt.Errorf("could not create snapshot; %v", err) } return nil } @@ -1325,7 +1325,7 @@ func (d OntapAPIREST) pollVolumeExistence(ctx context.Context, volumeName string func (d OntapAPIREST) VolumeCloneCreate(ctx context.Context, cloneName, sourceName, snapshot string, async bool) error { err := d.api.VolumeCloneCreateAsync(ctx, cloneName, sourceName, snapshot) if err != nil { - return fmt.Errorf("error creating clone: %v", err) + return fmt.Errorf("error creating clone; %v", err) } return nil @@ -1415,7 +1415,7 @@ func (d OntapAPIREST) VolumeSnapshotInfo(ctx context.Context, snapshotName, sour volume, err := d.VolumeInfo(ctx, sourceVolume) if err != nil { - return emptyResult, fmt.Errorf("error looking up source volume: %v", err) + return emptyResult, fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return emptyResult, fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -1451,7 +1451,7 @@ func (d OntapAPIREST) VolumeSnapshotInfo(ctx context.Context, snapshotName, sour func (d OntapAPIREST) VolumeSnapshotList(ctx context.Context, sourceVolume string) (Snapshots, error) { volume, err := d.VolumeInfo(ctx, sourceVolume) if err != nil { - return nil, fmt.Errorf("error looking up source volume: %v", err) + return nil, fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return nil, fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -1459,7 +1459,7 @@ func (d OntapAPIREST) VolumeSnapshotList(ctx context.Context, sourceVolume strin snapListResponse, err := d.api.SnapshotList(ctx, volume.UUID) if err != nil { - return nil, fmt.Errorf("error enumerating snapshots: %v", err) + return nil, fmt.Errorf("error enumerating snapshots; %v", err) } if snapListResponse == nil { return nil, fmt.Errorf("error enumerating snapshots") @@ -1485,7 +1485,7 @@ func (d OntapAPIREST) VolumeSnapshotList(ctx context.Context, sourceVolume strin func (d OntapAPIREST) VolumeSetQosPolicyGroupName(ctx context.Context, name string, qos QosPolicyGroup) error { if err := d.api.VolumeSetQosPolicyGroupName(ctx, name, qos); err != nil { - return fmt.Errorf("error setting quality of service policy: %v", err) + return fmt.Errorf("error setting quality of service policy; %v", err) } return nil @@ -1493,7 +1493,7 @@ func (d OntapAPIREST) VolumeSetQosPolicyGroupName(ctx context.Context, name stri func (d OntapAPIREST) VolumeCloneSplitStart(ctx context.Context, cloneName string) error { if err := d.api.VolumeCloneSplitStart(ctx, cloneName); err != nil { - return fmt.Errorf("error splitting clone: %v", err) + return fmt.Errorf("error splitting clone; %v", err) } return nil } @@ -1502,7 +1502,7 @@ func (d OntapAPIREST) SnapshotRestoreVolume( ctx context.Context, snapshotName, sourceVolume string, ) error { if err := d.api.SnapshotRestoreVolume(ctx, snapshotName, sourceVolume); err != nil { - return fmt.Errorf("error restoring snapshot: %v", err) + return fmt.Errorf("error restoring snapshot; %v", err) } return nil @@ -1510,7 +1510,7 @@ func (d OntapAPIREST) SnapshotRestoreVolume( func (d OntapAPIREST) SnapshotRestoreFlexgroup(ctx context.Context, snapshotName, sourceVolume string) error { if err := d.api.SnapshotRestoreFlexgroup(ctx, snapshotName, sourceVolume); err != nil { - return fmt.Errorf("error restoring snapshot: %v", err) + return fmt.Errorf("error restoring snapshot; %v", err) } return nil @@ -1522,7 +1522,7 @@ func (d OntapAPIREST) SnapshotDeleteByNameAndStyle( // GET the snapshot by name snapshot, err := d.api.SnapshotGetByName(ctx, sourceVolumeUUID, snapshotName) if err != nil { - return fmt.Errorf("error checking for snapshot: %v", err) + return fmt.Errorf("error checking for snapshot; %v", err) } if snapshot == nil { return fmt.Errorf("error looking up snapshot: %v", snapshotName) @@ -1535,7 +1535,7 @@ func (d OntapAPIREST) SnapshotDeleteByNameAndStyle( // DELETE the snapshot snapshotDeleteResult, err := d.api.SnapshotDelete(ctx, sourceVolumeUUID, snapshotUUID) if err != nil { - return fmt.Errorf("error while deleting snapshot: %v", err) + return fmt.Errorf("error while deleting snapshot; %v", err) } if snapshotDeleteResult == nil { return fmt.Errorf("error while deleting snapshot: %v", snapshotName) @@ -1565,7 +1565,7 @@ func (d OntapAPIREST) SnapshotDeleteByNameAndStyle( func (d OntapAPIREST) FlexgroupSnapshotDelete(ctx context.Context, snapshotName, sourceVolume string) error { volume, err := d.FlexgroupInfo(ctx, sourceVolume) if err != nil { - return fmt.Errorf("error looking up source volume: %v", err) + return fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -1578,7 +1578,7 @@ func (d OntapAPIREST) FlexgroupSnapshotDelete(ctx context.Context, snapshotName, func (d OntapAPIREST) VolumeSnapshotDelete(ctx context.Context, snapshotName, sourceVolume string) error { volume, err := d.VolumeInfo(ctx, sourceVolume) if err != nil { - return fmt.Errorf("error looking up source volume: %v", err) + return fmt.Errorf("error looking up source volume; %v", err) } if volume == nil { return fmt.Errorf("error looking up source volume: %v", sourceVolume) @@ -2441,7 +2441,7 @@ func (d OntapAPIREST) IscsiInterfaceGet(ctx context.Context, svm string) ([]stri var iSCSINodeNames []string interfaceResponse, err := d.api.IscsiInterfaceGet(ctx) if err != nil { - return nil, fmt.Errorf("could not get SVM iSCSI node name: %v", err) + return nil, fmt.Errorf("could not get SVM iSCSI node name; %v", err) } if interfaceResponse == nil || interfaceResponse.Payload == nil { return nil, nil @@ -2653,7 +2653,7 @@ func (d OntapAPIREST) GetSLMDataLifs(ctx context.Context, ips, reportingNodeName netInterfaces, err := d.api.NetworkIPInterfacesList(ctx) if err != nil { - return nil, fmt.Errorf("error checking network interfaces: %v", err) + return nil, fmt.Errorf("error checking network interfaces; %v", err) } if netInterfaces == nil || netInterfaces.Payload == nil { @@ -2806,7 +2806,7 @@ func (d OntapAPIREST) NVMeSubsystemAddNamespace(ctx context.Context, subsystemUU defer Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< NVMeSubsystemAddNamespace") if err := d.api.NVMeSubsystemAddNamespace(ctx, subsystemUUID, nsUUID); err != nil { - return fmt.Errorf("error adding namespace to subsystem: %v", err) + return fmt.Errorf("error adding namespace to subsystem; %v", err) } return nil @@ -2824,7 +2824,7 @@ func (d OntapAPIREST) NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUU defer Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< NVMeSubsystemRemoveNamespace") if err := d.api.NVMeSubsystemRemoveNamespace(ctx, subsysUUID, nsUUID); err != nil { - return fmt.Errorf("error removing Namespace from subsystem map: %v", err) + return fmt.Errorf("error removing Namespace from subsystem map; %v", err) } return nil @@ -2906,11 +2906,43 @@ func (d OntapAPIREST) NVMeAddHostToSubsystem(ctx context.Context, hostNQN, subsy // Add new host to the subsystem if err := d.api.NVMeAddHostNqnToSubsystem(ctx, hostNQN, subsysUUID); err != nil { - return fmt.Errorf("failed to add host nqn to subsystem, %v", err) + return fmt.Errorf("failed to add host nqn to subsystem; %v", err) } return nil } +func (d OntapAPIREST) NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsysUUID string) error { + fields := LogFields{ + "Method": "NVMeRemoveHostToSubsystem", + "Type": "OntapAPIREST", + "subsystem uuid": subsysUUID, + } + Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> NVMeRemoveHostToSubsystem") + defer Logd(ctx, d.driverName, d.api.ClientConfig().DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< NVMeRemoveHostToSubsystem") + + hosts, err := d.api.NVMeGetHostsOfSubsystem(ctx, subsysUUID) + if err != nil { + return err + } + + hostFound := false + for _, host := range hosts { + if host != nil && *host.Nqn == hostNQN { + hostFound = true + break + } + } + + if hostFound { + // Remove host from the subsystem + if err := d.api.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsysUUID); err != nil { + return fmt.Errorf("failed to remove host nqn from subsystem; %v", err) + } + } + + return nil +} + // NVMeSubsystemCreate Checks if the subsystem is already there or not. If not, creates a new one. func (d OntapAPIREST) NVMeSubsystemCreate(ctx context.Context, subsystemName string) (*NVMeSubsystem, error) { fields := LogFields{ @@ -2923,7 +2955,7 @@ func (d OntapAPIREST) NVMeSubsystemCreate(ctx context.Context, subsystemName str subsystem, err := d.api.NVMeSubsystemGetByName(ctx, subsystemName) if err != nil { - Logc(ctx).Infof("problem getting subsystem %v", err) + Logc(ctx).Infof("problem getting subsystem; %v", err) return nil, err } if subsystem == nil { @@ -2958,7 +2990,7 @@ func (d OntapAPIREST) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUU // map namespace to the subsystem isNameSpaceMapped, err := d.api.NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID) if err != nil { - return fmt.Errorf("Unable to get namespace subsystem mapping: err:%v", err) + return fmt.Errorf("Unable to get namespace subsystem mapping; %v", err) } // check if it is mapped already or not. if not mapped, add it to subsystem, else treat it as success @@ -2977,37 +3009,61 @@ func (d OntapAPIREST) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUU // a) removes the namespace from the subsystem // b) deletes the subsystem if no more namespaces are attached to it // If namespace is not mapped to subsystem, it is treated as success -func (d OntapAPIREST) NVMeEnsureNamespaceUnmapped(ctx context.Context, subsystemUUID, namespaceUUID string) error { +// The function also returns a bool value along with error. A true value denotes the subsystem is deleted +// successfully and Published info can be removed for the NVMe volume +func (d OntapAPIREST) NVMeEnsureNamespaceUnmapped(ctx context.Context, hostNQN, subsystemUUID, namespaceUUID string) (bool, error) { // check is namespace is mapped to the subsystem before attempting to remove it isNameSpaceMapped, err := d.api.NVMeIsNamespaceMapped(ctx, subsystemUUID, namespaceUUID) if err != nil { - return fmt.Errorf("Error getting namespace %s from subsystem %s. API returned error %v", namespaceUUID, subsystemUUID, err) + return false, fmt.Errorf("error getting namespace %s from subsystem %s; %v", namespaceUUID, subsystemUUID, err) } + // If namespace is not mapped, remove the published info if there is any if isNameSpaceMapped == false { Logc(ctx).Infof("Namespace %v is not mapped to subsystem %v", namespaceUUID, subsystemUUID) - return nil + return true, nil + } + + subsystemHosts, err := d.api.NVMeGetHostsOfSubsystem(ctx, subsystemUUID) + if err != nil { + return false, fmt.Errorf("error getting hosts mapped to subsystem with UUID %s; %v", subsystemUUID, err) + } + + if subsystemHosts == nil { + return false, fmt.Errorf("error getting hosts attached to subsystem %v", subsystemUUID) + } + + // In case of multiple hosts attached to a subsystem (e.g. in RWX case), do not delete the namespace, + // subsystem or the published info + if len(subsystemHosts) > 1 { + Logc(ctx).Infof("Multiple hosts are attached to this subsystem %v. Do not delete namespace or subsystem", subsystemUUID) + // Remove HostNQN from the subsystem using api call + if err := d.api.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID); err != nil { + Logc(ctx).Errorf("Remove host from subsystem failed; %v", err) + return false, err + } + return false, nil } // Unmap the namespace from the subsystem err = d.api.NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, namespaceUUID) if err != nil { - return fmt.Errorf("Error removing namespace %s from subsystem %s. API returned error %v", namespaceUUID, subsystemUUID, err) + return false, fmt.Errorf("error removing namespace %s from subsystem %s; %v", namespaceUUID, subsystemUUID, err) } // Get the number of namespaces present in the subsystem count, err := d.api.NVMeNamespaceCount(ctx, subsystemUUID) if err != nil { - return fmt.Errorf("Error getting namespace count for subsystem %s. API returned error %v", subsystemUUID, err) + return false, fmt.Errorf("error getting namespace count for subsystem %s; %v", subsystemUUID, err) } // Delete the subsystem if no. of namespaces is 0 if count == 0 { if err := d.api.NVMeSubsystemDelete(ctx, subsystemUUID); err != nil { - return fmt.Errorf("Error deleting subsystem %s. API returned error %v", subsystemUUID, err) + return false, fmt.Errorf("error deleting subsystem %s; %v", subsystemUUID, err) } } - return nil + return true, nil } func (d OntapAPIREST) NVMeNamespaceGetSize(ctx context.Context, namespacePath string) (int, error) { diff --git a/storage_drivers/ontap/api/abstraction_rest_test.go b/storage_drivers/ontap/api/abstraction_rest_test.go index 68a525216..d725f0318 100644 --- a/storage_drivers/ontap/api/abstraction_rest_test.go +++ b/storage_drivers/ontap/api/abstraction_rest_test.go @@ -542,6 +542,59 @@ func TestNVMeAddHostToSubsystem(t *testing.T) { assert.NoError(t, err) } +func TestNVMeRemoveHostFromSubsystem(t *testing.T) { + clientConfig := api.ClientConfig{ + DebugTraceFlags: map[string]bool{"method": true}, + } + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mock := mockapi.NewMockRestClientInterface(ctrl) + oapi, err := api.NewOntapAPIRESTFromRestClientInterface(mock) + assert.NoError(t, err) + + // case 1 : Error removing host from subsystem + hostNQN := "fakeNQN" + subsystemUUID := "fakesubsysUUID" + host1 := &models.NvmeSubsystemHost{} + + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return(nil, fmt.Errorf("Error while getting hosts for subsystem")) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.Error(t, err) + + // case 2 : host not found + Nqn := "wrongNQN" + host1.Nqn = &Nqn + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.NoError(t, err) + + // case 3 : host found but failed to remove it + host1.Nqn = &hostNQN + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(fmt.Errorf("Error while removing host")) + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.Error(t, err) + + // case 4 : Success- host found and removed it + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil) + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(nil) + + err = oapi.NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID) + + assert.NoError(t, err) +} + func TestNVMeSubsystemCreate(t *testing.T) { clientConfig := api.ClientConfig{ DebugTraceFlags: map[string]bool{"method": true}, @@ -669,62 +722,119 @@ func TestNVMeNamespaceUnmapped(t *testing.T) { subsystemUUID := "fakeSubsysUUID" nsUUID := "fakeNsUUID" + hostNQN := "fakeHostNQN" + host2NQN := "fakeHost2NQN" + host1 := &models.NvmeSubsystemHost{Nqn: &hostNQN} + host2 := &models.NvmeSubsystemHost{Nqn: &host2NQN} + var removePublishInfo bool // case 1: Error getting namespace from subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, fmt.Errorf("Error getting namespace subsystem mapping")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, fmt.Errorf("Error getting namespace subsystem mapping")).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) // case 2: Namespace is not mapped mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, nil) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(false, nil).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, true, removePublishInfo) assert.NoError(t, err) - // case 3: Error removing namespace from subsystem + // case 3: Failed to get hosts of the subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(fmt.Errorf("Error removing namespace from subsystem")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return(nil, fmt.Errorf("failed to get hosts")).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) - // case 4: Error getting namespace count from subsystem + // case 4: hosts of the subsystem not returned mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil) - mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), fmt.Errorf("Error getting namespace count from subsystem")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return(nil, nil).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) - // case 5: Error deleting subsystem + // case 5: multiple hosts of the subsystem returned but error while removing host from subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil) - mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil) - mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(fmt.Errorf("Error deleting subsystem")) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1, host2}, nil).Times(1) + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(fmt.Errorf("Error removing host from subsystem")).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, false, removePublishInfo) assert.Error(t, err) - // case 6: Success deleting subsystem + // case 6: multiple hosts of the subsystem returned and success while removing host from subsystem mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() - mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil) - mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil) - mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil) - mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(nil) + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1, host2}, nil).Times(1) + mock.EXPECT().NVMeRemoveHostFromSubsystem(ctx, hostNQN, subsystemUUID).Return(nil).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.NoError(t, err) + + // case 7: Error removing namespace from subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(fmt.Errorf("Error removing namespace from subsystem")).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.Error(t, err) + + // case 8: Error getting namespace count from subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil).Times(1) + mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), fmt.Errorf("Error getting namespace count from subsystem")).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.Error(t, err) + + // case 9: Error deleting subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).Return(nil).Times(1) + mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil).Times(1) + mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(fmt.Errorf("Error deleting subsystem")).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) + + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + + assert.Equal(t, false, removePublishInfo) + assert.Error(t, err) + + // case 10: Success deleting subsystem + mock.EXPECT().ClientConfig().Return(clientConfig).AnyTimes() + mock.EXPECT().NVMeIsNamespaceMapped(ctx, subsystemUUID, nsUUID).Return(true, nil).Times(1) + mock.EXPECT().NVMeSubsystemRemoveNamespace(ctx, subsystemUUID, nsUUID).AnyTimes().Return(nil).Times(1) + mock.EXPECT().NVMeNamespaceCount(ctx, subsystemUUID).Return(int64(0), nil).Times(1) + mock.EXPECT().NVMeSubsystemDelete(ctx, subsystemUUID).Return(nil).Times(1) + mock.EXPECT().NVMeGetHostsOfSubsystem(ctx, subsystemUUID).Return([]*models.NvmeSubsystemHost{host1}, nil).Times(1) - err = oapi.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, nsUUID) + removePublishInfo, err = oapi.NVMeEnsureNamespaceUnmapped(ctx, hostNQN, subsystemUUID, nsUUID) + assert.Equal(t, true, removePublishInfo) assert.NoError(t, err) } diff --git a/storage_drivers/ontap/api/abstraction_zapi.go b/storage_drivers/ontap/api/abstraction_zapi.go index b00ac9d21..085d47ab7 100644 --- a/storage_drivers/ontap/api/abstraction_zapi.go +++ b/storage_drivers/ontap/api/abstraction_zapi.go @@ -2478,6 +2478,10 @@ func (d OntapAPIZAPI) NVMeAddHostToSubsystem(ctx context.Context, hostNQN, subsU return fmt.Errorf("ZAPI call is not supported yet") } +func (d OntapAPIZAPI) NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsUUID string) error { + return fmt.Errorf("ZAPI call is not supported yet") +} + func (d OntapAPIZAPI) NVMeIsNamespaceMapped(ctx context.Context, subsysUUID, nsUUID string) (bool, error) { return false, fmt.Errorf("ZAPI call is not supported yet") } @@ -2486,8 +2490,8 @@ func (d OntapAPIZAPI) NVMeEnsureNamespaceMapped(ctx context.Context, subsystemUU return fmt.Errorf("ZAPI call is not supported yet") } -func (d OntapAPIZAPI) NVMeEnsureNamespaceUnmapped(ctx context.Context, subsystemUUID, namespaceUUID string) error { - return fmt.Errorf("ZAPI call is not supported yet") +func (d OntapAPIZAPI) NVMeEnsureNamespaceUnmapped(ctx context.Context, hostNQN, subsystemUUID, namespaceUUID string) (bool, error) { + return false, fmt.Errorf("ZAPI call is not supported yet") } func (d OntapAPIZAPI) NVMeNamespaceGetSize(ctx context.Context, subsystemName string) (int, error) { diff --git a/storage_drivers/ontap/api/ontap_rest.go b/storage_drivers/ontap/api/ontap_rest.go index be168a6b4..a15ee71cc 100644 --- a/storage_drivers/ontap/api/ontap_rest.go +++ b/storage_drivers/ontap/api/ontap_rest.go @@ -248,7 +248,7 @@ func NewRestClientFromOntapConfig( apiREST, err := NewOntapAPIREST(restClient, ontapConfig.StorageDriverName) if err != nil { - return nil, fmt.Errorf("unable to get REST API client for ontap: %v", err) + return nil, fmt.Errorf("unable to get REST API client for ontap; %v", err) } return apiREST, nil @@ -1526,7 +1526,7 @@ func (c RestClient) SnapshotCreate( func (c RestClient) SnapshotCreateAndWait(ctx context.Context, volumeUUID, snapshotName string) error { snapshotCreateResult, err := c.SnapshotCreate(ctx, volumeUUID, snapshotName) if err != nil { - return fmt.Errorf("could not create snapshot: %v", err) + return fmt.Errorf("could not create snapshot; %v", err) } if snapshotCreateResult == nil { return fmt.Errorf("could not create snapshot: %v", "unexpected result") @@ -1707,7 +1707,7 @@ func (c RestClient) VolumeCloneCreate(ctx context.Context, cloneName, sourceVolu func (c RestClient) VolumeCloneCreateAsync(ctx context.Context, cloneName, sourceVolumeName, snapshot string) error { cloneCreateResult, err := c.createCloneNAS(ctx, cloneName, sourceVolumeName, snapshot) if err != nil { - return fmt.Errorf("could not create clone: %v", err) + return fmt.Errorf("could not create clone; %v", err) } if cloneCreateResult == nil { return fmt.Errorf("could not create clone: %v", "unexpected result") @@ -1992,7 +1992,7 @@ func (c RestClient) IgroupDestroy(ctx context.Context, initiatorGroupName string lunDeleteResult, err := c.api.San.IgroupDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("could not delete igroup: %v", err) + return fmt.Errorf("could not delete igroup; %v", err) } if lunDeleteResult == nil { return fmt.Errorf("could not delete igroup: %v", "unexpected result") @@ -2366,7 +2366,7 @@ func (c RestClient) LunDelete( lunDeleteResult, err := c.api.San.LunDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("could not delete lun: %v", err) + return fmt.Errorf("could not delete lun; %v", err) } if lunDeleteResult == nil { return fmt.Errorf("could not delete lun: %v", "unexpected result") @@ -2952,7 +2952,7 @@ func (c RestClient) NetInterfaceGetDataLIFs(ctx context.Context, protocol string lifResponse, err := c.api.Networking.NetworkIPInterfacesGet(params, c.authInfo) if err != nil { - return nil, fmt.Errorf("error checking network interfaces: %v", err) + return nil, fmt.Errorf("error checking network interfaces; %v", err) } if lifResponse == nil { return nil, fmt.Errorf("unexpected error checking network interfaces") @@ -5671,7 +5671,7 @@ func (c RestClient) NVMeNamespaceSetSize(ctx context.Context, nsUUID string, new nsModify, err := c.api.NvMe.NvmeNamespaceModify(params, c.authInfo) if err != nil { - return fmt.Errorf("namespace resize failed, %v", err) + return fmt.Errorf("namespace resize failed; %v", err) } if nsModify == nil { return fmt.Errorf("namespace resize failed") @@ -5782,7 +5782,7 @@ func (c RestClient) NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUUID _, err := c.api.NvMe.NvmeSubsystemMapDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("error while deleting namespace from subsystem map: %v", err) + return fmt.Errorf("error while deleting namespace from subsystem map; %v", err) } return nil } @@ -5961,7 +5961,7 @@ func (c RestClient) NVMeSubsystemDelete(ctx context.Context, subsysUUID string) subsysDeleted, err := c.api.NvMe.NvmeSubsystemDelete(params, c.authInfo) if err != nil { - return fmt.Errorf("issue while deleting the subsystem, %v", err) + return fmt.Errorf("issue while deleting the subsystem; %v", err) } if subsysDeleted == nil { return fmt.Errorf("issue while deleting the subsystem") @@ -5997,6 +5997,25 @@ func (c RestClient) NVMeAddHostNqnToSubsystem(ctx context.Context, hostNQN, subs return fmt.Errorf("error while adding host to subsystem %v", hostAdded.Error()) } +// NVMeRemoveHostFromSubsystem remove the NQN of the host from the subsystem +func (c RestClient) NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsUUID string) error { + params := nvme.NewNvmeSubsystemHostDeleteParamsWithTimeout(c.httpClient.Timeout) + params.Context = ctx + params.HTTPClient = c.httpClient + params.SubsystemUUID = subsUUID + params.Nqn = hostNQN + + hostRemoved, err := c.api.NvMe.NvmeSubsystemHostDelete(params, c.authInfo) + if err != nil { + return fmt.Errorf("issue while removing host to subsystem; %v", err) + } + if hostRemoved.IsSuccess() { + return nil + } + + return fmt.Errorf("error while removing host from subsystem; %v", hostRemoved.Error()) +} + // NVMeGetHostsOfSubsystem retuns all the hosts connected to a subsystem func (c RestClient) NVMeGetHostsOfSubsystem(ctx context.Context, subsUUID string) ([]*models.NvmeSubsystemHost, error) { params := nvme.NewNvmeSubsystemHostCollectionGetParamsWithTimeout(c.httpClient.Timeout) diff --git a/storage_drivers/ontap/api/ontap_rest_interface.go b/storage_drivers/ontap/api/ontap_rest_interface.go index e25e6731e..65c6a0d59 100644 --- a/storage_drivers/ontap/api/ontap_rest_interface.go +++ b/storage_drivers/ontap/api/ontap_rest_interface.go @@ -356,4 +356,5 @@ type RestClientInterface interface { NVMeSubsystemAddNamespace(ctx context.Context, subsystemUUID, nsUUID string) error // NVMeSubsystemRemoveNamespace ummaps a given namespace from a Subsystem with the specified subsystem UUID. NVMeSubsystemRemoveNamespace(ctx context.Context, subsysUUID, nsUUID string) error + NVMeRemoveHostFromSubsystem(ctx context.Context, hostNQN, subsystemUUID string) error } diff --git a/storage_drivers/ontap/ontap_san_nvme.go b/storage_drivers/ontap/ontap_san_nvme.go index 841926e2a..01a041f2f 100644 --- a/storage_drivers/ontap/ontap_san_nvme.go +++ b/storage_drivers/ontap/ontap_san_nvme.go @@ -798,6 +798,7 @@ func (d *NVMeStorageDriver) Unpublish( "name": name, "NVMeNamespaceUUID": volConfig.AccessInfo.NVMeNamespaceUUID, "NVMeSubsystemUUID": volConfig.AccessInfo.NVMeSubsystemUUID, + "hostNQN": publishInfo.HostNQN, } Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> Unpublish") defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< Unpublish") @@ -805,7 +806,13 @@ func (d *NVMeStorageDriver) Unpublish( subsystemUUID := volConfig.AccessInfo.NVMeSubsystemUUID namespaceUUID := volConfig.AccessInfo.NVMeNamespaceUUID - return d.API.NVMeEnsureNamespaceUnmapped(ctx, subsystemUUID, namespaceUUID) + removePublishInfo, err := d.API.NVMeEnsureNamespaceUnmapped(ctx, publishInfo.HostNQN, subsystemUUID, namespaceUUID) + if removePublishInfo { + volConfig.AccessInfo.NVMeTargetIPs = []string{} + volConfig.AccessInfo.NVMeSubsystemNQN = "" + volConfig.AccessInfo.NVMeSubsystemUUID = "" + } + return err } // CanSnapshot determines whether a snapshot as specified in the provided snapshot config may be taken. diff --git a/storage_drivers/ontap/ontap_san_nvme_test.go b/storage_drivers/ontap/ontap_san_nvme_test.go index 848a841f7..192e40a1d 100644 --- a/storage_drivers/ontap/ontap_san_nvme_test.go +++ b/storage_drivers/ontap/ontap_san_nvme_test.go @@ -988,7 +988,7 @@ func TestUnpublish(t *testing.T) { // case 1: NVMeEnsureNamespaceUnmapped returned error volConfig.AccessInfo.NVMeNamespaceUUID = "fakeUUID" tridentconfig.CurrentDriverContext = tridentconfig.ContextCSI - mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any()).Return(fmt.Errorf("NVMeEnsureNamespaceUnmapped returned error")) + mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(false, fmt.Errorf("NVMeEnsureNamespaceUnmapped returned error")) err := d.Unpublish(ctx, volConfig, publishInfo) @@ -998,7 +998,7 @@ func TestUnpublish(t *testing.T) { volConfig.AccessInfo.PublishEnforcement = true volConfig.AccessInfo.NVMeNamespaceUUID = "fakeUUID" tridentconfig.CurrentDriverContext = tridentconfig.ContextCSI - mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any()).Return(nil) + mock.EXPECT().NVMeEnsureNamespaceUnmapped(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil) err = d.Unpublish(ctx, volConfig, publishInfo) From 205b6a872ad5910e08048a0999879cdada9323ab Mon Sep 17 00:00:00 2001 From: Prajwal V <91181612+prajwalv-netapp@users.noreply.github.com> Date: Fri, 7 Jul 2023 11:36:43 +0530 Subject: [PATCH 04/17] Support for RO clone in ONTAP-NAS --- storage_drivers/ontap/ontap_common.go | 51 ++++-- storage_drivers/ontap/ontap_common_test.go | 46 ++++- storage_drivers/ontap/ontap_nas.go | 52 ++++-- storage_drivers/ontap/ontap_nas_test.go | 186 ++++++++++++++++++++- 4 files changed, 302 insertions(+), 33 deletions(-) diff --git a/storage_drivers/ontap/ontap_common.go b/storage_drivers/ontap/ontap_common.go index 55996c1a5..25bd6f73a 100644 --- a/storage_drivers/ontap/ontap_common.go +++ b/storage_drivers/ontap/ontap_common.go @@ -2952,23 +2952,49 @@ func GetEncryptionValue(encryption string) (*bool, string, error) { return nil, "", nil } -// ConstructOntapNASSMBVolumePath returns windows compatible volume path for Ontap NAS. +// ConstructOntapNASVolumeAccessPath returns volume path for ONTAP NAS. // Function accepts parameters in following way: // 1.smbShare : This takes the value given in backend config, without path prefix. // 2.volumeName : This takes the value of volume's internal name, it is always prefixed with unix styled path separator. -// Example, ConstructOntapNASSMBVolumePath(ctx, "test_share", "/vol") -func ConstructOntapNASSMBVolumePath(ctx context.Context, smbShare, volumeName string) string { - Logc(ctx).Debug(">>>> smb.ConstructOntapNASSMBVolumePath") - defer Logc(ctx).Debug("<<<< smb.ConstructOntapNASSMBVolumePath") +// 3.volConfig : This takes value of volume configuration. +// 4.Protocol : This takes the value of NAS protocol (NFS/SMB). +// Example, ConstructOntapNASVolumeAccessPath(ctx, "test_share", "/vol" , volConfig, "nfs") +func ConstructOntapNASVolumeAccessPath( + ctx context.Context, smbShare, volumeName string, + volConfig *storage.VolumeConfig, protocol string, +) string { + Logc(ctx).Debug(">>>> smb.ConstructOntapNASVolumeAccessPath") + defer Logc(ctx).Debug("<<<< smb.ConstructOntapNASVolumeAccessPath") var completeVolumePath string - if smbShare != "" { - completeVolumePath = utils.WindowsPathSeparator + smbShare + volumeName - } else { - // If the user does not specify an SMB Share, Trident creates it with the same name as the flexvol volume name. - completeVolumePath = volumeName - } + var smbSharePath string + switch protocol { + case sa.NFS: + if volConfig.ReadOnlyClone { + return fmt.Sprintf("/%s/%s/%s", volConfig.CloneSourceVolumeInternal, ".snapshot", + volConfig.CloneSourceSnapshot) + } else if volumeName != utils.UnixPathSeparator+volConfig.InternalName && strings.HasPrefix(volumeName, + utils.UnixPathSeparator) { + // For managed import, return the original junction path + return volumeName + } + return fmt.Sprintf("/%s", volConfig.InternalName) + case sa.SMB: + if smbShare != "" { + smbSharePath = fmt.Sprintf("\\%s", smbShare) + } else { + // Set share path as empty, volume name contains the path prefix. + smbSharePath = "" + } + if volConfig.ReadOnlyClone { + completeVolumePath = fmt.Sprintf("%s\\%s\\%s\\%s", smbSharePath, volConfig.CloneSourceVolumeInternal, + "~snapshot", volConfig.CloneSourceSnapshot) + } else { + // If the user does not specify an SMB Share, Trident creates it with the same name as the flexvol volume name. + completeVolumePath = smbSharePath + volumeName + } + } // Replace unix styled path separator, if exists return strings.Replace(completeVolumePath, utils.UnixPathSeparator, utils.WindowsPathSeparator, -1) } @@ -3001,7 +3027,8 @@ func ConstructOntapNASFlexGroupSMBVolumePath(ctx context.Context, smbShare, volu // 3.volConfig : This takes the value of volume configuration. // 4. protocol: This takes the value of the protocol for which the path needs to be created. // Example, ConstructOntapNASQTreeVolumePath(ctx, test.smbShare, "flex-vol", volConfig, sa.SMB) -func ConstructOntapNASQTreeVolumePath(ctx context.Context, smbShare, flexvol string, +func ConstructOntapNASQTreeVolumePath( + ctx context.Context, smbShare, flexvol string, volConfig *storage.VolumeConfig, protocol string, ) (completeVolumePath string) { Logc(ctx).Debug(">>>> smb.ConstructOntapNASQTreeVolumePath") diff --git a/storage_drivers/ontap/ontap_common_test.go b/storage_drivers/ontap/ontap_common_test.go index ba1850b9f..f2691c280 100644 --- a/storage_drivers/ontap/ontap_common_test.go +++ b/storage_drivers/ontap/ontap_common_test.go @@ -2256,21 +2256,57 @@ func TestRestGetSLMLifs(t *testing.T) { assert.ElementsMatch(t, result, []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4"}) } -func TestConstructOntapNASSMBVolumePath(t *testing.T) { +func TestConstructOntapNASVolumeAccessPath(t *testing.T) { ctx := context.Background() + volConfig := &storage.VolumeConfig{ + InternalName: "vol", + } + tests := []struct { smbShare string + volName string + protocol string expectedPath string }{ - {"test_share", "\\test_sharevol"}, - {"", "vol"}, + {"test_share", "/vol", "smb", "\\test_share\\vol"}, + {"", "/vol", "smb", "\\vol"}, + {"", "/vol", "nfs", "/vol"}, + {"", "/vol1", "nfs", "/vol1"}, } for _, test := range tests { t.Run(test.smbShare, func(t *testing.T) { - result := ConstructOntapNASSMBVolumePath(ctx, test.smbShare, "vol") - assert.Equal(t, test.expectedPath, result, "unable to construct Ontap-NAS-QTree SMB volume path") + result := ConstructOntapNASVolumeAccessPath(ctx, test.smbShare, test.volName, volConfig, test.protocol) + assert.Equal(t, test.expectedPath, result, "unable to construct Ontap-NAS volume access path") + }) + } +} + +func TestConstructOntapNASVolumeAccessPath_ROClone(t *testing.T) { + ctx := context.Background() + + volConfig := &storage.VolumeConfig{ + InternalName: "vol", + ReadOnlyClone: true, + CloneSourceVolumeInternal: "sourceVol", + CloneSourceSnapshot: "snapshot-abcd-1234-wxyz", + } + + tests := []struct { + smbShare string + protocol string + expectedPath string + }{ + {"test_share", "smb", "\\test_share\\sourceVol\\~snapshot\\snapshot-abcd-1234-wxyz"}, + {"", "smb", "\\sourceVol\\~snapshot\\snapshot-abcd-1234-wxyz"}, + {"", "nfs", "/sourceVol/.snapshot/snapshot-abcd-1234-wxyz"}, + } + + for _, test := range tests { + t.Run(test.smbShare, func(t *testing.T) { + result := ConstructOntapNASVolumeAccessPath(ctx, test.smbShare, "/vol", volConfig, test.protocol) + assert.Equal(t, test.expectedPath, result, "unable to construct Ontap-NAS volume access path") }) } } diff --git a/storage_drivers/ontap/ontap_nas.go b/storage_drivers/ontap/ontap_nas.go index ad0901761..426c9e350 100644 --- a/storage_drivers/ontap/ontap_nas.go +++ b/storage_drivers/ontap/ontap_nas.go @@ -455,6 +455,14 @@ func (d *NASStorageDriver) CreateClone( storagePoolSplitOnCloneVal = storagePool.InternalAttributes()[SplitOnClone] } + if cloneVolConfig.ReadOnlyClone { + if !flexvol.SnapshotDir { + return fmt.Errorf("snapshot directory access is set to %t and readOnly clone is set to %t ", + flexvol.SnapshotDir, cloneVolConfig.ReadOnlyClone) + } + return nil + } + // If storagePoolSplitOnCloneVal is still unknown, set it to backend's default value if storagePoolSplitOnCloneVal == "" { storagePoolSplitOnCloneVal = d.GetConfig().SplitOnClone @@ -956,6 +964,8 @@ func (d *NASStorageDriver) CreatePrepare(ctx context.Context, volConfig *storage func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storage.VolumeConfig) error { var accessPath string + var flexvol *api.Volume + var err error fields := LogFields{ "Method": "CreateFollowup", @@ -976,9 +986,17 @@ func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storag } // Set correct junction path - flexvol, err := d.API.VolumeInfo(ctx, volConfig.InternalName) - if err != nil { - return err + // If it's a RO clone, get source volume + if volConfig.ReadOnlyClone { + flexvol, err = d.API.VolumeInfo(ctx, volConfig.CloneSourceVolumeInternal) + if err != nil { + return err + } + } else { + flexvol, err = d.API.VolumeInfo(ctx, volConfig.InternalName) + if err != nil { + return err + } } if flexvol.JunctionPath == "" { @@ -988,13 +1006,14 @@ func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storag // 2. During Create/CreateClone there is a failure and mount is not performed. if d.Config.NASType == sa.SMB { - volConfig.AccessInfo.SMBPath = ConstructOntapNASSMBVolumePath(ctx, d.Config.SMBShare, - volConfig.InternalName) + volConfig.AccessInfo.SMBPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + volConfig.InternalName, volConfig, sa.SMB) // Overwriting mount path, mounting at root instead of admin share volConfig.AccessInfo.SMBPath = "/" + volConfig.InternalName accessPath = volConfig.AccessInfo.SMBPath } else { - volConfig.AccessInfo.NfsPath = "/" + volConfig.InternalName + volConfig.AccessInfo.NfsPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + volConfig.InternalName, volConfig, sa.NFS) accessPath = volConfig.AccessInfo.NfsPath } @@ -1014,10 +1033,11 @@ func (d *NASStorageDriver) CreateFollowup(ctx context.Context, volConfig *storag } } else { if d.Config.NASType == sa.SMB { - volConfig.AccessInfo.SMBPath = ConstructOntapNASSMBVolumePath(ctx, d.Config.SMBShare, - flexvol.JunctionPath) + volConfig.AccessInfo.SMBPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + flexvol.JunctionPath, volConfig, sa.SMB) } else { - volConfig.AccessInfo.NfsPath = flexvol.JunctionPath + volConfig.AccessInfo.NfsPath = ConstructOntapNASVolumeAccessPath(ctx, d.Config.SMBShare, + flexvol.JunctionPath, volConfig, sa.NFS) } } return nil @@ -1246,7 +1266,8 @@ func (d *NASStorageDriver) EstablishMirror( replicationSchedule = "" } - return establishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, d.API) + return establishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, + d.API) } // ReestablishMirror will attempt to resync a mirror relationship, if and only if the relationship existed previously @@ -1285,7 +1306,8 @@ func (d *NASStorageDriver) ReestablishMirror( replicationSchedule = "" } - return reestablishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, d.API) + return reestablishMirror(ctx, localInternalVolumeName, remoteVolumeHandle, replicationPolicy, replicationSchedule, + d.API) } // PromoteMirror will break the mirror relationship and make the destination volume RW, @@ -1293,8 +1315,8 @@ func (d *NASStorageDriver) ReestablishMirror( func (d *NASStorageDriver) PromoteMirror( ctx context.Context, localInternalVolumeName, remoteVolumeHandle, snapshotName string, ) (bool, error) { - return promoteMirror(ctx, localInternalVolumeName, remoteVolumeHandle, snapshotName, d.GetConfig().ReplicationPolicy, - d.API) + return promoteMirror(ctx, localInternalVolumeName, remoteVolumeHandle, snapshotName, + d.GetConfig().ReplicationPolicy, d.API) } // GetMirrorStatus returns the current state of a mirror relationship @@ -1310,7 +1332,9 @@ func (d *NASStorageDriver) ReleaseMirror(ctx context.Context, localInternalVolum } // GetReplicationDetails returns the replication policy and schedule of a mirror relationship -func (d *NASStorageDriver) GetReplicationDetails(ctx context.Context, localInternalVolumeName, remoteVolumeHandle string) (string, string, string, error) { +func (d *NASStorageDriver) GetReplicationDetails( + ctx context.Context, localInternalVolumeName, remoteVolumeHandle string, +) (string, string, string, error) { return getReplicationDetails(ctx, localInternalVolumeName, remoteVolumeHandle, d.API) } diff --git a/storage_drivers/ontap/ontap_nas_test.go b/storage_drivers/ontap/ontap_nas_test.go index 03fec577f..77a65565c 100644 --- a/storage_drivers/ontap/ontap_nas_test.go +++ b/storage_drivers/ontap/ontap_nas_test.go @@ -9,6 +9,7 @@ import ( "os" "reflect" "testing" + "time" "github.com/RoaringBitmap/roaring" "github.com/golang/mock/gomock" @@ -673,8 +674,8 @@ func TestOntapNasStorageDriverVolumeClone(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, "").Return(false, nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, volConfig.InternalName, volConfig.CloneSourceVolumeInternal, volConfig.CloneSourceSnapshotInternal, false).Return(nil) - mockAPI.EXPECT().VolumeSetComment(ctx, volConfig.InternalName, volConfig.InternalName, - "flexvol").Return(nil) + mockAPI.EXPECT().VolumeSetComment(ctx, volConfig.InternalName, volConfig.InternalName, "flexvol"). + Return(nil) mockAPI.EXPECT().VolumeMount(ctx, volConfig.InternalName, "/"+volConfig.InternalName).Return(nil) if test.NasType == sa.SMB { @@ -690,6 +691,74 @@ func TestOntapNasStorageDriverVolumeClone(t *testing.T) { } } +func TestOntapNasStorageDriverVolumeClone_ROClone(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + pool1 := storage.NewStoragePool(nil, "pool1") + pool1.SetInternalAttributes(map[string]string{ + "tieringPolicy": "none", + }) + driver.physicalPools = map[string]storage.Pool{"pool1": pool1} + driver.Config.SplitOnClone = "false" + + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + CloneSourceSnapshotInternal: "flexvol", + ReadOnlyClone: true, + } + + flexVol := api.Volume{ + Name: "flexvol", + Comment: "flexvol", + SnapshotDir: true, + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + mockAPI.EXPECT().VolumeInfo(ctx, volConfig.CloneSourceVolumeInternal).Return(&flexVol, nil) + + result := driver.CreateClone(ctx, nil, volConfig, pool1) + fmt.Println(result) + + assert.NoError(t, result, "received error") +} + +func TestOntapNasStorageDriverVolumeClone_ROClone_Failure(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + pool1 := storage.NewStoragePool(nil, "pool1") + pool1.SetInternalAttributes(map[string]string{ + "tieringPolicy": "none", + }) + driver.physicalPools = map[string]storage.Pool{"pool1": pool1} + driver.Config.SplitOnClone = "false" + + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + CloneSourceSnapshotInternal: "flexvol", + ReadOnlyClone: true, + } + + // Set snapshot directory visibility to false + flexVol := api.Volume{ + Name: "flexvol", + Comment: "flexvol", + SnapshotDir: false, + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + + // Creating a readonly clone only results in the driver looking up volume information and no other calls to ONTAP. + mockAPI.EXPECT().VolumeInfo(ctx, volConfig.CloneSourceVolumeInternal).Return(&flexVol, nil) + + result := driver.CreateClone(ctx, nil, volConfig, pool1) + + assert.Error(t, result, "expected error") +} + func TestOntapNasStorageDriverVolumeClone_StoragePoolUnset(t *testing.T) { mockAPI, driver := newMockOntapNASDriver(t) volConfig := &storage.VolumeConfig{ @@ -988,6 +1057,30 @@ func TestOntapNasStorageDriverVolumeDestroy_SnapmirrorDeleteFail(t *testing.T) { assert.Error(t, result) } +func TestOntapNasStorageDriverVolumeDestroy_SnapmirrorReleaseFail(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + svmName := "SVM1" + volName := "testVol" + volNameInternal := volName + "Internal" + volConfig := &storage.VolumeConfig{ + Size: "1g", + Name: volName, + InternalName: volNameInternal, + Encryption: "false", + FileSystem: "xfs", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return(svmName) + mockAPI.EXPECT().VolumeExists(ctx, volNameInternal).Return(true, nil) + mockAPI.EXPECT().SnapmirrorDeleteViaDestination(ctx, volNameInternal, svmName).Return(nil) + mockAPI.EXPECT().SnapmirrorRelease(ctx, volNameInternal, + svmName).Return(fmt.Errorf("error releaseing snapmirror")) + + result := driver.Destroy(ctx, volConfig) + + assert.Error(t, result) +} + func TestOntapNasStorageDriverVolumeDestroy_Fail(t *testing.T) { mockAPI, driver := newMockOntapNASDriver(t) svmName := "SVM1" @@ -1632,6 +1725,51 @@ func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_NASType_None(t *te assert.NoError(t, result) } +func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_ROClone_Success(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + InternalName: "vol1", + ReadOnlyClone: true, + CloneSourceVolumeInternal: "flexvol", + } + + flexVol := api.Volume{ + Name: "flexvol", + Comment: "flexvol", + JunctionPath: "/vol1", + AccessType: "rw", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + mockAPI.EXPECT().VolumeInfo(ctx, "flexvol").Return(&flexVol, nil) + + result := driver.CreateFollowup(ctx, volConfig) + + assert.NoError(t, result, "error occurred") +} + +func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_ROClone_Failure(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + InternalName: "vol1", + ReadOnlyClone: true, + CloneSourceVolumeInternal: "flexvol", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") + mockAPI.EXPECT().VolumeInfo(ctx, "flexvol").Return(nil, api.ApiError("api error")) + + result := driver.CreateFollowup(ctx, volConfig) + + assert.Error(t, result, "expected error") +} + func TestOntapNasStorageDriverCreateFollowup_WithJunctionPath_NASType_SMB(t *testing.T) { mockAPI, driver := newMockOntapNASDriver(t) driver.Config.NASType = "smb" @@ -3198,3 +3336,47 @@ func TestOntapNasStorageDriverBackendName(t *testing.T) { assert.Equal(t, result, "myBackend") } + +func TestOntapNasStorageDriverUpdateMirror(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + mockAPI.EXPECT().SnapmirrorUpdate(ctx, "testVol", "testSnap") + + err := driver.UpdateMirror(ctx, "testVol", "testSnap") + assert.Error(t, err, "expected error") +} + +func TestOntapNasStorageDriverCheckMirrorTransferState(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + snapmirror := &api.Snapmirror{ + State: "snapmirrored", + RelationshipStatus: "idle", + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("fakesvm1") + mockAPI.EXPECT().SnapmirrorGet(ctx, "fakevolume1", "fakesvm1", "", "").Return(snapmirror, nil) + + result, err := driver.CheckMirrorTransferState(ctx, "fakevolume1") + + assert.Nil(t, result, "expected nil") + assert.Error(t, err, "expected error") +} + +func TestOntapStorageDriverGetMirrorTransferTime(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + + timeNow := time.Now() + snapmirror := &api.Snapmirror{ + State: "snapmirrored", + RelationshipStatus: "idle", + EndTransferTime: &timeNow, + } + + mockAPI.EXPECT().SVMName().AnyTimes().Return("fakesvm1") + mockAPI.EXPECT().SnapmirrorGet(ctx, "fakevolume1", "fakesvm1", "", "").Return(snapmirror, nil) + + result, err := driver.GetMirrorTransferTime(ctx, "fakevolume1") + assert.NotNil(t, result, "received nil") + assert.NoError(t, err, "received error") +} From 9b8f3df29d00d8f2e70e26683c0d2aba070c7e7b Mon Sep 17 00:00:00 2001 From: Rohit Arora <49132604+ntap-arorar@users.noreply.github.com> Date: Sat, 8 Jul 2023 01:47:48 -0400 Subject: [PATCH 05/17] Add LUN Serial Number Check This change ensures the LUN Serial Number is always retrieved and published during the ControllerVolumePublish workflow. The change also includes a fix to ensure ZAPI calls when getting a LUN gets LUN Serial Number. --- CHANGELOG.md | 8 +- storage_drivers/ontap/api/ontap_zapi.go | 3 +- storage_drivers/ontap/ontap_common.go | 60 ++-------- storage_drivers/ontap/ontap_common_test.go | 111 +++++------------- .../ontap/ontap_san_economy_test.go | 14 ++- storage_drivers/ontap/ontap_san_test.go | 44 +++++-- 6 files changed, 104 insertions(+), 136 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f1b20886..f5854a7dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,13 @@ [Releases](https://github.com/NetApp/trident/releases) -## Changes since v23.01.0 +## Changes since v23.04.0 + +**Fixes:** + +- Fixed ONTAP ZAPI request to ensure LUN serial number is queried when getting LUN attributes. + +## v23.04.0 - **IMPORTANT**: Force volume detach for ONTAP-SAN-* volumes is only supported with Kubernetes versions which have enabled the Non-Graceful Node Shutdown feature gate. Force detach must be enabled at install time via `--enable-force-detach` Trident installer flag. diff --git a/storage_drivers/ontap/api/ontap_zapi.go b/storage_drivers/ontap/api/ontap_zapi.go index 459009a05..7d10ed479 100644 --- a/storage_drivers/ontap/api/ontap_zapi.go +++ b/storage_drivers/ontap/api/ontap_zapi.go @@ -618,7 +618,8 @@ func (c Client) LunGet(path string) (*azgo.LunInfoType, error) { SetSize(0). SetCreationTimestamp(0). SetOnline(false). - SetMapped(false) + SetMapped(false). + SetSerialNumber("") desiredAttributes.SetLunInfo(*lunInfo) response, err := azgo.NewLunGetIterRequest(). diff --git a/storage_drivers/ontap/ontap_common.go b/storage_drivers/ontap/ontap_common.go index 25bd6f73a..bb37323c3 100644 --- a/storage_drivers/ontap/ontap_common.go +++ b/storage_drivers/ontap/ontap_common.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -562,52 +562,6 @@ func GetOntapDriverRedactList() []string { return clone[:] } -// PopulateOntapLunMapping helper function to fill in volConfig with its LUN mapping values. -// This function assumes that the list of data LIFs has not changed since driver initialization and volume creation -func PopulateOntapLunMapping( - ctx context.Context, clientAPI api.OntapAPI, ips []string, volConfig *storage.VolumeConfig, lunID int, - lunPath, igroupName string, -) error { - var targetIQN string - targetIQN, err := clientAPI.IscsiNodeGetNameRequest(ctx) - if err != nil { - return fmt.Errorf("problem retrieving iSCSI services: %v", err) - } - - lunResponse, err := clientAPI.LunGetByName(ctx, lunPath) - if err != nil || lunResponse == nil { - return fmt.Errorf("problem retrieving LUN info: %v", err) - } - serial := lunResponse.SerialNumber - - filteredIPs, err := getISCSIDataLIFsForReportingNodes(ctx, clientAPI, ips, lunPath, igroupName, - volConfig.ImportNotManaged) - if err != nil { - return err - } - - if len(filteredIPs) == 0 { - Logc(ctx).Warn("Unable to find reporting ONTAP nodes for discovered dataLIFs.") - filteredIPs = ips - } - - volConfig.AccessInfo.IscsiTargetPortal = filteredIPs[0] - volConfig.AccessInfo.IscsiPortals = filteredIPs[1:] - volConfig.AccessInfo.IscsiTargetIQN = targetIQN - volConfig.AccessInfo.IscsiLunNumber = int32(lunID) - volConfig.AccessInfo.IscsiIgroup = igroupName - volConfig.AccessInfo.IscsiLunSerial = serial - Logc(ctx).WithFields(LogFields{ - "volume": volConfig.Name, - "volume_internal": volConfig.InternalName, - "targetIQN": volConfig.AccessInfo.IscsiTargetIQN, - "lunNumber": volConfig.AccessInfo.IscsiLunNumber, - "igroup": volConfig.AccessInfo.IscsiIgroup, - }).Debug("Mapped ONTAP LUN.") - - return nil -} - // getNodeSpecificIgroupName generates a distinct igroup name for node name. // Igroup names may collide if node names are over 59 characters. func getNodeSpecificIgroupName(nodeName, tridentUUID string) string { @@ -681,6 +635,17 @@ func PublishLUN( fstype = lunFSType } + // Get LUN Serial Number + lunResponse, err := clientAPI.LunGetByName(ctx, lunPath) + if err != nil || lunResponse == nil { + return fmt.Errorf("problem retrieving LUN info: %v", err) + } + serial := lunResponse.SerialNumber + + if serial == "" { + return fmt.Errorf("LUN '%v' serial number not found", lunPath) + } + if config.DriverContext == tridentconfig.ContextCSI { // Get the info about the targeted node var targetNode *utils.Node @@ -729,6 +694,7 @@ func PublishLUN( // Add fields needed by Attach publishInfo.IscsiLunNumber = int32(lunID) + publishInfo.IscsiLunSerial = serial publishInfo.IscsiTargetPortal = filteredIPs[0] publishInfo.IscsiPortals = filteredIPs[1:] publishInfo.IscsiTargetIQN = iSCSINodeName diff --git a/storage_drivers/ontap/ontap_common_test.go b/storage_drivers/ontap/ontap_common_test.go index f2691c280..a514258d7 100644 --- a/storage_drivers/ontap/ontap_common_test.go +++ b/storage_drivers/ontap/ontap_common_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -3033,84 +3033,6 @@ func TestGetDesiredExportPolicyRules(t *testing.T) { assert.NoError(t, err, "Found error when expected none") } -func TestPopulateOntapLunMapping(t *testing.T) { - ctx := context.Background() - mockCtrl := gomock.NewController(t) - mockAPI := mockapi.NewMockOntapAPI(mockCtrl) - inputIPs := []string{ - "1.1.1.1", "2.2.2.2", "3.3.3.3", - } - - volConfig := &storage.VolumeConfig{ - Name: "testVol", - InternalName: "testInternalVol", - ImportNotManaged: true, - } - - lunID := 5555 - - lunPath := "fakeLunPath" - - igroupName := "testIgroupName" - - dummyLun := &api.Lun{ - Comment: "dummyLun", - SerialNumber: "testSerialNumber", - } - reportingNodes := []string{"Node1"} - - error := fmt.Errorf("Error returned") - - // Test1: Positive flow - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) - mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return(reportingNodes, nil) - mockAPI.EXPECT().GetSLMDataLifs(ctx, inputIPs, reportingNodes).Return([]string{"1.1.1.1"}, nil) - - err := PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.NoError(t, err) - assert.Equal(t, "1.1.1.1", volConfig.AccessInfo.IscsiTargetPortal) - assert.Equal(t, "testIQN", volConfig.AccessInfo.IscsiTargetIQN) - assert.Equal(t, int32(5555), volConfig.AccessInfo.IscsiLunNumber) - assert.Equal(t, "testIgroupName", volConfig.AccessInfo.IscsiIgroup) - assert.Equal(t, "testSerialNumber", volConfig.AccessInfo.IscsiLunSerial) - - // Test2: Error flow: IscsiNodeGetNameRequest returns error - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", error) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.Error(t, err) - - // Test3: Error flow: LunGetByName returns error - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, error) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.Error(t, err) - - // Test4: Error flow: LunMapGetReportingNodes returns error - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) - mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return(reportingNodes, error) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.Error(t, err) - - // Test5: Positive flow: Unable to find reporting ONTAP nodes - mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Return("testIQN", nil) - mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) - mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return(reportingNodes, nil) - mockAPI.EXPECT().GetSLMDataLifs(ctx, inputIPs, reportingNodes).Return([]string{}, nil) - - err = PopulateOntapLunMapping(ctx, mockAPI, inputIPs, volConfig, lunID, lunPath, igroupName) - - assert.NoError(t, err) -} - func TestReconcileNASNodeAccess(t *testing.T) { ctx := context.Background() mockCtrl := gomock.NewController(t) @@ -3945,6 +3867,16 @@ func TestPublishLun(t *testing.T) { } nodeList := []*utils.Node{&node} + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + + dummyLunNoSerial := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "", + } + commonConfig := &drivers.CommonStorageDriverConfig{ DebugTraceFlags: map[string]bool{"method": true}, DriverContext: "csi", @@ -3965,6 +3897,7 @@ func TestPublishLun(t *testing.T) { } // Test1 - Positive flow mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, publishInfo.HostIQN[0]) mockAPI.EXPECT().EnsureLunMapped(ctx, igroupName, lunPath).Return(1111, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return([]string{"Node1"}, nil) @@ -3987,6 +3920,7 @@ func TestPublishLun(t *testing.T) { mockAPI = mockapi.NewMockOntapAPI(mockCtrl) publishInfo.HostIQN = []string{"host_iqn"} mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("", fmt.Errorf("LunGetFSType returned error")) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, publishInfo.HostIQN[0]) mockAPI.EXPECT().EnsureLunMapped(ctx, igroupName, lunPath).Return(1111, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, igroupName, lunPath).Return([]string{"Node1"}, nil) @@ -4001,6 +3935,7 @@ func TestPublishLun(t *testing.T) { publishInfo.HostIQN = []string{"host_iqn"} publishInfo.HostName = "fakeHostName" mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("", fmt.Errorf("LunGetFSType returned error")) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) @@ -4015,6 +3950,7 @@ func TestPublishLun(t *testing.T) { HostIQN: []string{"host_iqn"}, } mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, gomock.Any()).Return(fmt.Errorf("EnsureIgroupAdded returned error")) @@ -4024,6 +3960,7 @@ func TestPublishLun(t *testing.T) { // Test 6 - EnsureLunMapped returns error mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, nil) mockAPI.EXPECT().EnsureLunMapped(ctx, igroupName, lunPath).Return(1111, fmt.Errorf("EnsureLunMapped returned error")) mockAPI.EXPECT().EnsureIgroupAdded(ctx, igroupName, gomock.Any()).Return(nil) @@ -4031,6 +3968,22 @@ func TestPublishLun(t *testing.T) { err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) assert.Error(t, err) + + // Test 7 - LunGetByName returns error + mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLun, fmt.Errorf("LunGetByName returned error")) + + err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) + + assert.Error(t, err) + + // Test 8 - LunGetByName returns nil but Serial Number is empty + mockAPI.EXPECT().LunGetFSType(ctx, lunPath).Return("fstype", nil) + mockAPI.EXPECT().LunGetByName(ctx, lunPath).Return(dummyLunNoSerial, nil) + + err = PublishLUN(ctx, mockAPI, config, ips, publishInfo, lunPath, igroupName, iSCSINodeName) + + assert.Error(t, err) } func TestValidateSANDriver(t *testing.T) { diff --git a/storage_drivers/ontap/ontap_san_economy_test.go b/storage_drivers/ontap/ontap_san_economy_test.go index 31d591378..13f3e91ca 100644 --- a/storage_drivers/ontap/ontap_san_economy_test.go +++ b/storage_drivers/ontap/ontap_san_economy_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -1591,11 +1591,17 @@ func TestOntapSanEconomyVolumePublish(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().LunList(ctx, gomock.Any()).Times(1).Return(api.Luns{api.Lun{Size: "1g", Name: "lunName", VolumeName: "volumeName"}}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/volumeName/storagePrefix_lunName") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/volumeName/storagePrefix_lunName").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -1623,11 +1629,17 @@ func TestOntapSanEconomyVolumePublishSLMError(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().LunList(ctx, gomock.Any()).Times(1).Return(api.Luns{api.Lun{Size: "1g", Name: "lunName", VolumeName: "volumeName"}}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/volumeName/storagePrefix_lunName") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/volumeName/storagePrefix_lunName").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) diff --git a/storage_drivers/ontap/ontap_san_test.go b/storage_drivers/ontap/ontap_san_test.go index 8203b111d..71939649e 100644 --- a/storage_drivers/ontap/ontap_san_test.go +++ b/storage_drivers/ontap/ontap_san_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 NetApp, Inc. All Rights Reserved. +// Copyright 2023 NetApp, Inc. All Rights Reserved. package ontap @@ -590,10 +590,16 @@ func TestOntapSanVolumePublishManaged(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Times(1).Return(&api.Volume{AccessType: VolTypeRW}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -626,10 +632,16 @@ func TestOntapSanVolumePublishUnmanaged(t *testing.T) { Unmanaged: true, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Times(1).Return(&api.Volume{AccessType: VolTypeRW}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -662,10 +674,16 @@ func TestOntapSanVolumePublishSLMError(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Times(1).Return(&api.Volume{AccessType: VolTypeRW}, nil) mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) mockAPI.EXPECT().EnsureIgroupAdded(ctx, gomock.Any(), gomock.Any()).Times(1) mockAPI.EXPECT().EnsureLunMapped(ctx, gomock.Any(), gomock.Any()).Times(1).Return(1, nil) mockAPI.EXPECT().LunMapGetReportingNodes(ctx, gomock.Any(), gomock.Any()).Times(1).Return([]string{"node1"}, nil) @@ -1165,7 +1183,8 @@ func TestOntapSanVolumeClone(t *testing.T) { mockAPI.EXPECT().VolumeSnapshotCreate(ctx, gomock.Any(), gomock.Any()).Return(nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, maxFlexvolCloneWait).AnyTimes().Return("online", nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, + maxFlexvolCloneWait).AnyTimes().Return("online", nil) mockAPI.EXPECT().VolumeSetComment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) err := driver.CreateClone(ctx, volConfig, volConfig, pool1) @@ -1262,7 +1281,8 @@ func TestOntapSanVolumeClone_ValidationTest(t *testing.T) { mockAPI.EXPECT().VolumeSnapshotCreate(ctx, gomock.Any(), gomock.Any()).Return(nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, maxFlexvolCloneWait).AnyTimes().Return("online", nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, []string{"online"}, []string{"error"}, + maxFlexvolCloneWait).AnyTimes().Return("online", nil) mockAPI.EXPECT().VolumeSetComment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) mockAPI.EXPECT().LunSetQosPolicyGroup(ctx, gomock.Any(), gomock.Any()).Return(nil) }, @@ -1882,7 +1902,8 @@ func TestOntapSanVolumeSnapshot(t *testing.T) { ).MaxTimes(1) mockAPI.EXPECT().LunSize(ctx, gomock.Any()).Return(1073741824, nil) - mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return(nil) + mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, + snapshotConfig.VolumeInternalName).Return(nil) mockAPI.EXPECT().VolumeSnapshotInfo(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return( api.Snapshot{ @@ -1922,14 +1943,16 @@ func TestOntapSanVolumeSnapshot_SnapshotNotFound(t *testing.T) { ).MaxTimes(1) mockAPI.EXPECT().LunSize(ctx, gomock.Any()).Return(1073741824, nil) - mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return(nil) + mockAPI.EXPECT().VolumeSnapshotCreate(ctx, snapshotConfig.InternalName, + snapshotConfig.VolumeInternalName).Return(nil) mockAPI.EXPECT().VolumeSnapshotInfo(ctx, snapshotConfig.InternalName, snapshotConfig.VolumeInternalName).Return( api.Snapshot{ CreateTime: "", Name: snapshotConfig.InternalName, }, - errors.NotFoundError("snapshot %v not found for volume %v", snapshotConfig.InternalName, snapshotConfig.VolumeInternalName)) + errors.NotFoundError("snapshot %v not found for volume %v", snapshotConfig.InternalName, + snapshotConfig.VolumeInternalName)) _, err := driver.CreateSnapshot(ctx, snapshotConfig, volConfig) @@ -2640,6 +2663,11 @@ func TestOntapSanVolumePublishisFlexvolRW(t *testing.T) { Unmanaged: false, } + dummyLun := &api.Lun{ + Comment: "dummyLun", + SerialNumber: "testSerialNumber", + } + tests := []struct { name string mocks func(mockAPI *mockapi.MockOntapAPI) @@ -2692,6 +2720,7 @@ func TestOntapSanVolumePublishisFlexvolRW(t *testing.T) { mockAPI.EXPECT().IscsiNodeGetNameRequest(ctx).Times(1).Return("node1", nil) mockAPI.EXPECT().IscsiInterfaceGet(ctx, gomock.Any()).Return([]string{"iscsi_if"}, nil).Times(1) mockAPI.EXPECT().LunGetFSType(ctx, "/vol/lunName/lun0") + mockAPI.EXPECT().LunGetByName(ctx, "/vol/lunName/lun0").Return(dummyLun, nil) err := driver.Publish(ctx, volConfig, publishInfo) assert.Errorf(t, err, "no reporting nodes found") @@ -3124,7 +3153,8 @@ func TestOntapSANStorageDriverEstablishMirror_Failure(t *testing.T) { { name: "ReplicationPolicyValidation_Fail", mocks: func(mockAPI *mockapi.MockOntapAPI) { - mockAPI.EXPECT().SnapmirrorPolicyGet(ctx, gomock.Any()).Times(2).Return(nil, fmt.Errorf("snap mirror fail")) + mockAPI.EXPECT().SnapmirrorPolicyGet(ctx, gomock.Any()).Times(2).Return(nil, + fmt.Errorf("snap mirror fail")) mockAPI.EXPECT().VolumeInfo(ctx, gomock.Any()).Return(&volume, nil) mockAPI.EXPECT().SnapmirrorGet(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( &api.Snapmirror{State: api.SnapmirrorStateSynchronizing}, nil) From 06a56a45328c13383a90f5ae663256613e707933 Mon Sep 17 00:00:00 2001 From: Shubham Phadnis <42840905+sphadnis007@users.noreply.github.com> Date: Mon, 10 Jul 2023 14:48:18 +0530 Subject: [PATCH 06/17] Added support for backend state update for NVMe driver --- storage_attribute/common_attributes.go | 3 +++ storage_drivers/ontap/api/ontap_rest.go | 5 +++- storage_drivers/ontap/ontap_nas.go | 2 +- storage_drivers/ontap/ontap_nas_flexgroup.go | 2 +- storage_drivers/ontap/ontap_nas_qtree.go | 2 +- storage_drivers/ontap/ontap_san.go | 2 +- storage_drivers/ontap/ontap_san_economy.go | 2 +- storage_drivers/ontap/ontap_san_nvme.go | 20 ++++++++++---- storage_drivers/ontap/ontap_san_nvme_test.go | 28 +++++++++++++------- 9 files changed, 45 insertions(+), 21 deletions(-) diff --git a/storage_attribute/common_attributes.go b/storage_attribute/common_attributes.go index ddf80790c..f3c99ded1 100644 --- a/storage_attribute/common_attributes.go +++ b/storage_attribute/common_attributes.go @@ -48,6 +48,9 @@ const ( ISCSI = "iscsi" NVMe = "nvme" + // NVMeTransport is used to get NVMe TCP dataLIFs. + NVMeTransport = "nvme_tcp" + RequiredStorage = "requiredStorage" // deprecated, use additionalStoragePools StoragePools = "storagePools" AdditionalStoragePools = "additionalStoragePools" diff --git a/storage_drivers/ontap/api/ontap_rest.go b/storage_drivers/ontap/api/ontap_rest.go index a15ee71cc..a92970440 100644 --- a/storage_drivers/ontap/api/ontap_rest.go +++ b/storage_drivers/ontap/api/ontap_rest.go @@ -5839,7 +5839,10 @@ func (c RestClient) NVMeNamespaceCount(ctx context.Context, subsysUUID string) ( } if getSubsys.IsSuccess() { - return *getSubsys.GetPayload().NumRecords, nil + payload := getSubsys.GetPayload() + if payload != nil && payload.NumRecords != nil { + return *payload.NumRecords, nil + } } return 0, fmt.Errorf("failed to get subsystem map collection") diff --git a/storage_drivers/ontap/ontap_nas.go b/storage_drivers/ontap/ontap_nas.go index 426c9e350..5eb54e0b5 100644 --- a/storage_drivers/ontap/ontap_nas.go +++ b/storage_drivers/ontap/ontap_nas.go @@ -1209,7 +1209,7 @@ func (d *NASStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *NASStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "nfs", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_nas_flexgroup.go b/storage_drivers/ontap/ontap_nas_flexgroup.go index b6fd5876d..36c4f3b16 100644 --- a/storage_drivers/ontap/ontap_nas_flexgroup.go +++ b/storage_drivers/ontap/ontap_nas_flexgroup.go @@ -1626,7 +1626,7 @@ func (d *NASFlexGroupStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *NASFlexGroupStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "nfs", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_nas_qtree.go b/storage_drivers/ontap/ontap_nas_qtree.go index 611ab82ef..f9efc1d96 100644 --- a/storage_drivers/ontap/ontap_nas_qtree.go +++ b/storage_drivers/ontap/ontap_nas_qtree.go @@ -2113,7 +2113,7 @@ func (d *NASQtreeStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *NASQtreeStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "nfs", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_san.go b/storage_drivers/ontap/ontap_san.go index f94c68a18..626008bb4 100644 --- a/storage_drivers/ontap/ontap_san.go +++ b/storage_drivers/ontap/ontap_san.go @@ -1285,7 +1285,7 @@ func (d *SANStorageDriver) ReconcileNodeAccess(ctx context.Context, nodes []*uti // in physical pools list. func (d *SANStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "iscsi", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_san_economy.go b/storage_drivers/ontap/ontap_san_economy.go index ef1550062..1e4d05ca8 100644 --- a/storage_drivers/ontap/ontap_san_economy.go +++ b/storage_drivers/ontap/ontap_san_economy.go @@ -2154,7 +2154,7 @@ func (d *SANEconomyStorageDriver) ReconcileNodeAccess( // in physical pools list. func (d *SANEconomyStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { Logc(ctx).Debug(">>>> GetBackendState") - defer Logc(ctx).Debugf("<<<< GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") return getSVMState(ctx, d.API, "iscsi", d.GetStorageBackendPhysicalPoolNames(ctx)) } diff --git a/storage_drivers/ontap/ontap_san_nvme.go b/storage_drivers/ontap/ontap_san_nvme.go index 01a041f2f..dc18bc62a 100644 --- a/storage_drivers/ontap/ontap_san_nvme.go +++ b/storage_drivers/ontap/ontap_san_nvme.go @@ -24,7 +24,7 @@ import ( "github.com/netapp/trident/utils/errors" ) -// RegExp to match the namespace path either empty string or +// NVMeNamespaceRegExp RegExp to match the namespace path either empty string or // string of the form /vol// var NVMeNamespaceRegExp = regexp.MustCompile(`[^(\/vol\/.+\/.+)?$]`) @@ -109,7 +109,6 @@ func (d *NVMeStorageDriver) Initialize( if err != nil { return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } - d.Config = *config // Unit tests mock the API layer, so we only use the real API interface if it doesn't already exist. if d.API == nil { @@ -117,19 +116,21 @@ func (d *NVMeStorageDriver) Initialize( return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } } + // OntapStorageDriverConfig gets updated with the SVM name in InitializeOntapDriver() if the SVM name is not provided + // in the backend config json. Therefore, this is the proper place to assign it to d.Config. + d.Config = *config // Check NVMe feature support if !d.API.SupportsFeature(ctx, api.NVMeProtocol) { return fmt.Errorf("error initializing %s driver: ontap doesn't support NVMe", d.Name()) } - transport := "tcp" - if d.ips, err = d.API.NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)); err != nil { + if d.ips, err = d.API.NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport); err != nil { return err } if len(d.ips) == 0 { - return fmt.Errorf("no data LIFs with TCP protocol found on SVM %s", d.API.SVMName()) + return fmt.Errorf("no NVMe data LIFs found on SVM %s", d.API.SVMName()) } else { Logc(ctx).WithField("dataLIFs", d.ips).Debug("Found LIFs.") } @@ -1266,6 +1267,15 @@ func (d *NVMeStorageDriver) ReconcileNodeAccess(_ context.Context, _ []*utils.No return nil } +// GetBackendState returns the reason if SVM is offline, and a flag to indicate if there is change +// in physical pools list. +func (d *NVMeStorageDriver) GetBackendState(ctx context.Context) (string, *roaring.Bitmap) { + Logc(ctx).Debug(">>>> GetBackendState") + defer Logc(ctx).Debug("<<<< GetBackendState") + + return getSVMState(ctx, d.API, sa.NVMeTransport, d.GetStorageBackendPhysicalPoolNames(ctx)) +} + // String makes NVMeStorageDriver satisfy the Stringer interface. func (d *NVMeStorageDriver) String() string { return utils.ToStringRedacted(&d, GetOntapDriverRedactList(), d.GetExternalConfig(context.Background())) diff --git a/storage_drivers/ontap/ontap_san_nvme_test.go b/storage_drivers/ontap/ontap_san_nvme_test.go index 192e40a1d..11df5fb23 100644 --- a/storage_drivers/ontap/ontap_san_nvme_test.go +++ b/storage_drivers/ontap/ontap_san_nvme_test.go @@ -20,10 +20,7 @@ import ( "github.com/netapp/trident/utils/errors" ) -var ( - mockIPs = []string{"0.0.0.0", "1.1.1.1"} - transport = "tcp" -) +var mockIPs = []string{"0.0.0.0", "1.1.1.1"} func newNVMeDriver(apiOverride api.OntapAPI) *NVMeStorageDriver { sPrefix := "test_" @@ -121,7 +118,7 @@ func TestNVMeInitialize_GetDataLifError(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(nil, fmt.Errorf("error getting dataLifs")) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(nil, fmt.Errorf("error getting dataLifs")) err := d.Initialize(ctx, tridentconfig.ContextCSI, configJSON, commonConfig, nil, BackendUUID) @@ -136,12 +133,12 @@ func TestNVMeInitialize_NoDataLifs(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return([]string{}, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return([]string{}, nil) mAPI.EXPECT().SVMName().Return("svm") err := d.Initialize(ctx, tridentconfig.ContextCSI, configJSON, commonConfig, nil, BackendUUID) - assert.ErrorContains(t, err, "no data LIFs with TCP protocol found on SVM") + assert.ErrorContains(t, err, "no NVMe data LIFs found on SVM svm") } func TestNVMeInitialize_GetAggrNamesError(t *testing.T) { @@ -152,7 +149,7 @@ func TestNVMeInitialize_GetAggrNamesError(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(mockIPs, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(mockIPs, nil) mAPI.EXPECT().IsSVMDRCapable(ctx).Return(true, nil) mAPI.EXPECT().GetSVMAggregateNames(ctx).Return(nil, fmt.Errorf("failed to get aggrs")) @@ -171,7 +168,7 @@ func TestNVMeInitialize_ValidateStoragePrefixError(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(mockIPs, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(mockIPs, nil) mAPI.EXPECT().IsSVMDRCapable(ctx).Return(true, nil) mAPI.EXPECT().GetSVMAggregateNames(ctx).Return([]string{"data"}, nil) mAPI.EXPECT().GetSVMAggregateAttributes(ctx).Return(nil, nil) @@ -190,7 +187,7 @@ func TestNVMeInitialize_Success(t *testing.T) { } configJSON := `{"SANType": "nvme"}` mAPI.EXPECT().SupportsFeature(ctx, gomock.Any()).Return(true) - mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, fmt.Sprintf("%s_%s", sa.NVMe, transport)).Return(mockIPs, nil) + mAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, sa.NVMeTransport).Return(mockIPs, nil) mAPI.EXPECT().IsSVMDRCapable(ctx).Return(true, nil) mAPI.EXPECT().GetSVMAggregateNames(ctx).Return([]string{"data"}, nil) mAPI.EXPECT().GetSVMAggregateAttributes(ctx).Return(nil, nil) @@ -1407,3 +1404,14 @@ func TestCreateNamespacePath(t *testing.T) { assert.Equal(t, nsNameExpected, nsNameGot) } + +func TestGetBackendState(t *testing.T) { + d, mAPI := newNVMeDriverAndMockApi(t) + + mAPI.EXPECT().GetSVMState(ctx).Return("", fmt.Errorf("returning test error")) + + reason, changeMap := d.GetBackendState(ctx) + + assert.Equal(t, reason, StateReasonSVMUnreachable, "should be 'SVM is not reachable'") + assert.NotNil(t, changeMap, "should not be nil") +} From 561e2cbd590bda41cc5c77e1d1382870859f9464 Mon Sep 17 00:00:00 2001 From: Jonathan Rippy Date: Mon, 10 Jul 2023 11:40:57 -0400 Subject: [PATCH 07/17] OsType cannot be specified in a LUN clone POST operation, removing (#1396) --- storage_drivers/ontap/api/abstraction_rest.go | 6 ++++++ storage_drivers/ontap/api/ontap_rest.go | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/storage_drivers/ontap/api/abstraction_rest.go b/storage_drivers/ontap/api/abstraction_rest.go index e89789f66..ba9f14420 100644 --- a/storage_drivers/ontap/api/abstraction_rest.go +++ b/storage_drivers/ontap/api/abstraction_rest.go @@ -395,6 +395,11 @@ func lunInfoFromRestAttrsHelper(lunGetResponse *models.Lun) (*Lun, error) { state = *lunGetResponse.Status.State } + osType := "" + if lunGetResponse.OsType != nil { + osType = *lunGetResponse.OsType + } + lunInfo := &Lun{ Comment: responseComment, CreateTime: responseCreateTime, @@ -408,6 +413,7 @@ func lunInfoFromRestAttrsHelper(lunGetResponse *models.Lun) (*Lun, error) { SerialNumber: serialNumber, State: state, VolumeName: responseVolName, + OsType: osType, } return lunInfo, nil } diff --git a/storage_drivers/ontap/api/ontap_rest.go b/storage_drivers/ontap/api/ontap_rest.go index a92970440..89ca3e281 100644 --- a/storage_drivers/ontap/api/ontap_rest.go +++ b/storage_drivers/ontap/api/ontap_rest.go @@ -2214,8 +2214,8 @@ func (c RestClient) LunCloneCreate( Name: utils.Ptr(sourcePath), }, }, - Name: utils.Ptr(lunPath), // example: /vol/myVolume/myLun1 - OsType: utils.Ptr(osType), + Name: utils.Ptr(lunPath), // example: /vol/myVolume/myLun1 + // OsType is not supported for POST when creating a LUN clone Space: &models.LunInlineSpace{ Size: utils.Ptr(sizeInBytes), }, From 62f1f8045ccd19a4c7bcf3b58c8b6ac020e58bf0 Mon Sep 17 00:00:00 2001 From: Jonathan Rippy Date: Mon, 10 Jul 2023 19:53:14 -0400 Subject: [PATCH 08/17] Fixing this REST call, the initiator name needs to be in the URL and not within the body of the PATCH (#1398) --- storage_drivers/ontap/api/ontap_rest.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/storage_drivers/ontap/api/ontap_rest.go b/storage_drivers/ontap/api/ontap_rest.go index 89ca3e281..4da82bedd 100644 --- a/storage_drivers/ontap/api/ontap_rest.go +++ b/storage_drivers/ontap/api/ontap_rest.go @@ -1794,10 +1794,18 @@ func (c RestClient) IscsiInitiatorSetDefaultAuth( if *getDefaultAuthResponse.Payload.NumRecords != 1 { return fmt.Errorf("should only be one default iscsi initiator") } + if getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0] == nil { + return fmt.Errorf("could not get the default iscsi initiator") + } + if getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0].Initiator == nil { + return fmt.Errorf("could not get the default iscsi initiator") + } params := san.NewIscsiCredentialsModifyParamsWithTimeout(c.httpClient.Timeout) params.Context = ctx params.HTTPClient = c.httpClient + params.SvmUUID = c.svmUUID + params.Initiator = *getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0].Initiator outboundInfo := &models.IscsiCredentialsInlineChapInlineOutbound{} if outbountUserName != "" && outboundPassphrase != "" { @@ -1815,7 +1823,6 @@ func (c RestClient) IscsiInitiatorSetDefaultAuth( authInfo := &models.IscsiCredentials{ AuthenticationType: utils.Ptr(authType), Chap: chapInfo, - Initiator: getDefaultAuthResponse.Payload.IscsiCredentialsResponseInlineRecords[0].Initiator, } params.SetInfo(authInfo) From eca626878d2ddaf77ac2f0f43c708b4946f3ef2b Mon Sep 17 00:00:00 2001 From: Joe Webster <31218426+jwebster7@users.noreply.github.com> Date: Wed, 12 Jul 2023 18:55:44 -0500 Subject: [PATCH 09/17] Add backend pool reporting to all ONTAP drivers This change enables each ONTAP driver to discover and report non-overlapping sets of discrete storage pools that exist within their respective backend. --- storage/backend.go | 1 + storage_drivers/common.go | 52 ++++++++ storage_drivers/common_test.go | 103 +++++++++++++++ storage_drivers/ontap/ontap_nas.go | 29 +++++ storage_drivers/ontap/ontap_nas_flexgroup.go | 22 ++++ .../ontap/ontap_nas_flexgroup_test.go | 15 +++ storage_drivers/ontap/ontap_nas_qtree.go | 32 +++++ storage_drivers/ontap/ontap_nas_qtree_test.go | 29 +++++ storage_drivers/ontap/ontap_nas_test.go | 26 ++++ storage_drivers/ontap/ontap_san.go | 32 ++++- storage_drivers/ontap/ontap_san_economy.go | 35 +++++- .../ontap/ontap_san_economy_test.go | 31 +++++ storage_drivers/ontap/ontap_san_nvme.go | 29 +++++ storage_drivers/ontap/ontap_san_nvme_test.go | 26 ++++ storage_drivers/ontap/ontap_san_test.go | 26 ++++ storage_drivers/types.go | 27 ++++ utils/utils.go | 36 ++++++ utils/utils_test.go | 119 ++++++++++++++++++ 18 files changed, 666 insertions(+), 4 deletions(-) diff --git a/storage/backend.go b/storage/backend.go index 994e0813e..460b9523e 100644 --- a/storage/backend.go +++ b/storage/backend.go @@ -1034,6 +1034,7 @@ func (b *StorageBackend) ConstructExternal(ctx context.Context) *BackendExternal for volName := range b.volumes { backendExternal.Volumes = append(backendExternal.Volumes, volName) } + return &backendExternal } diff --git a/storage_drivers/common.go b/storage_drivers/common.go index 923fdf47f..94b70803d 100644 --- a/storage_drivers/common.go +++ b/storage_drivers/common.go @@ -274,3 +274,55 @@ func ensureJoinedStringContainsElem(joined, elem, sep string) string { } return joined + sep + elem } + +// EncodeStorageBackendPools serializes and base64 encodes backend storage pools within the driver's backend; +// it is shared by all storage drivers. +func EncodeStorageBackendPools[P StorageBackendPool]( + ctx context.Context, config *CommonStorageDriverConfig, backendPools []P, +) ([]string, error) { + fields := LogFields{"Method": "EncodeStorageBackendPools", "Type": config.StorageDriverName} + Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug(">>>> EncodeStorageBackendPools") + defer Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug("<<<< EncodeStorageBackendPools") + + if len(backendPools) == 0 { + return nil, fmt.Errorf("failed encode backend pools; no storage backend pools supplied") + } + + encodedPools := make([]string, 0) + for _, pool := range backendPools { + encodedPool, err := utils.EncodeObjectToBase64String(pool) + if err != nil { + return nil, err + } + encodedPools = append(encodedPools, encodedPool) + } + return encodedPools, nil +} + +// DecodeStorageBackendPools deserializes and decodes base64 encoded pools into driver-specific backend storage pools. +func DecodeStorageBackendPools[P StorageBackendPool]( + ctx context.Context, config *CommonStorageDriverConfig, encodedPools []string, +) ([]P, error) { + fields := LogFields{"Method": "DecodeStorageBackendPools", "Type": config.StorageDriverName} + Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug(">>>> DecodeStorageBackendPools") + defer Logd(ctx, config.StorageDriverName, + config.DebugTraceFlags["method"]).WithFields(fields).Debug("<<<< DecodeStorageBackendPools") + + if len(encodedPools) == 0 { + return nil, fmt.Errorf("failed to decode backend pools; no encoded backend pools supplied") + } + + backendPools := make([]P, 0) + for _, pool := range encodedPools { + var backendPool P + err := utils.DecodeBase64StringToObject(pool, &backendPool) + if err != nil { + return nil, err + } + backendPools = append(backendPools, backendPool) + } + return backendPools, nil +} diff --git a/storage_drivers/common_test.go b/storage_drivers/common_test.go index e3c704606..d15f88164 100644 --- a/storage_drivers/common_test.go +++ b/storage_drivers/common_test.go @@ -574,3 +574,106 @@ func TestEnsureJoinedStringContainsElem(t *testing.T) { }) } } + +func TestEncodeAndDecode_OntapFlexGroupStorageBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + backendPools := []OntapFlexGroupStorageBackendPool{{SvmUUID: "svm0"}} + + encoded, err := EncodeStorageBackendPools[OntapFlexGroupStorageBackendPool](ctx, config, backendPools) + assert.NoError(t, err) + assert.True(t, len(backendPools) == len(encoded)) + + // Passing the type of the backend pools is required for DecodeStorageBackendPools. + decoded, err := DecodeStorageBackendPools[OntapFlexGroupStorageBackendPool](ctx, config, encoded) + assert.NoError(t, err) + assert.EqualValues(t, backendPools, decoded) +} + +func TestEncodeAndDecode_OntapStorageBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + backendPools := []OntapStorageBackendPool{ + {SvmUUID: "svm0", Aggregate: "aggr0"}, + {SvmUUID: "svm0", Aggregate: "aggr1"}, + } + + encoded, err := EncodeStorageBackendPools[OntapStorageBackendPool](ctx, config, backendPools) + assert.NoError(t, err) + assert.True(t, len(backendPools) == len(encoded)) + + // Passing the type of the backend pools is required for DecodeStorageBackendPools. + decoded, err := DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, encoded) + assert.NoError(t, err) + assert.EqualValues(t, backendPools, decoded) +} + +func TestEncodeAndDecode_OntapEconomyStorageBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + backendPools := []OntapEconomyStorageBackendPool{ + {SvmUUID: "svm0", Aggregate: "aggr0", FlexVolPrefix: "trident_qtree_pool_test_"}, + {SvmUUID: "svm0", Aggregate: "aggr1", FlexVolPrefix: "trident_qtree_pool_test_"}, + } + + encoded, err := EncodeStorageBackendPools[OntapEconomyStorageBackendPool](ctx, config, backendPools) + assert.NoError(t, err) + assert.True(t, len(backendPools) == len(encoded)) + + // Passing the type of the backend pools is required for DecodeStorageBackendPools. + decoded, err := DecodeStorageBackendPools[OntapEconomyStorageBackendPool](ctx, config, encoded) + assert.NoError(t, err) + assert.EqualValues(t, backendPools, decoded) +} + +func TestEncodeStorageBackendPools_FailsWithInvalidBackendPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + // Backend pools are nil. + encodedPools, err := EncodeStorageBackendPools[OntapStorageBackendPool](ctx, config, nil) + assert.Error(t, err) + assert.Nil(t, encodedPools) + + // Backend pools are empty. + encodedPools, err = EncodeStorageBackendPools[OntapStorageBackendPool](ctx, config, []OntapStorageBackendPool{}) + assert.Error(t, err) + assert.Nil(t, encodedPools) +} + +func TestDecodeStorageBackendPools_FailsWithInvalidEncodedPools(t *testing.T) { + ctx := context.Background() + config := &CommonStorageDriverConfig{ + StorageDriverName: "test storage driver", + DebugTraceFlags: map[string]bool{"method": true}, + } + + // Backend pools are nil. + backendPools, err := DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, nil) + assert.Error(t, err) + assert.Nil(t, backendPools) + + // Backend pools are empty. + backendPools, err = DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, []string{}) + assert.Error(t, err) + assert.Nil(t, backendPools) + + // Backend pools specified are not valid base64 encoded strings. + backendPools, err = DecodeStorageBackendPools[OntapStorageBackendPool](ctx, config, []string{"test", ""}) + assert.Error(t, err) + assert.Nil(t, backendPools) +} diff --git a/storage_drivers/ontap/ontap_nas.go b/storage_drivers/ontap/ontap_nas.go index 5eb54e0b5..d63bc2e7c 100644 --- a/storage_drivers/ontap/ontap_nas.go +++ b/storage_drivers/ontap/ontap_nas.go @@ -123,6 +123,13 @@ func (d *NASStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -935,6 +942,28 @@ func (d *NASStorageDriver) GetStorageBackendPhysicalPoolNames(context.Context) [ return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NASStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NASStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + svmUUID := d.GetAPI().GetSVMUUID() + backendPools := make([]drivers.OntapStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *NASStorageDriver) getStoragePoolAttributes(ctx context.Context) map[string]sa.Offer { client := d.GetAPI() mirroring, _ := client.IsSVMDRCapable(ctx) diff --git a/storage_drivers/ontap/ontap_nas_flexgroup.go b/storage_drivers/ontap/ontap_nas_flexgroup.go index 36c4f3b16..81831a212 100644 --- a/storage_drivers/ontap/ontap_nas_flexgroup.go +++ b/storage_drivers/ontap/ontap_nas_flexgroup.go @@ -107,6 +107,13 @@ func (d *NASFlexGroupStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -1342,6 +1349,21 @@ func (d *NASFlexGroupStorageDriver) GetStorageBackendPhysicalPoolNames(context.C return physicalPoolNames } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NASFlexGroupStorageDriver) getStorageBackendPools( + ctx context.Context, +) []drivers.OntapFlexGroupStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NASFlexGroupStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // FlexGroup volumes span all or a subset of aggregates assigned to the SVM; + // As such, backend comparisons can rely on the SVM name. + return []drivers.OntapFlexGroupStorageBackendPool{{SvmUUID: d.GetAPI().GetSVMUUID()}} +} + func (d *NASFlexGroupStorageDriver) vserverAggregates(ctx context.Context, svmName string) ([]string, error) { var err error // Get the aggregates assigned to the SVM. There must be at least one! diff --git a/storage_drivers/ontap/ontap_nas_flexgroup_test.go b/storage_drivers/ontap/ontap_nas_flexgroup_test.go index 3f28b2222..d179a711e 100644 --- a/storage_drivers/ontap/ontap_nas_flexgroup_test.go +++ b/storage_drivers/ontap/ontap_nas_flexgroup_test.go @@ -310,6 +310,7 @@ func TestOntapNasFlexgroupStorageDriverInitialize(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "nfs").Return([]string{"dataLIF"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-nas-flexgroup", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := driver.Initialize(ctx, "CSI", configJSON, commonConfig, secrets, BackendUUID) @@ -365,6 +366,7 @@ func TestOntapNasFlexgroupStorageDriverInitialize_StoragePool(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "nfs").AnyTimes().Return([]string{"dataLIF"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-nas-flexgroup", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid").AnyTimes() if test.name == "flexgroupAggrListFailed" { configJSON, _ = getOntapStorageDriverConfigJson("true", "volume", "none", "", @@ -2717,6 +2719,19 @@ func TestOntapNasFlexgroupStorageDriverGetStorageBackendPhysicalPoolNames(t *tes assert.Equal(t, "pool1", poolNames[0], "Pool names are not equal") } +func TestOntapNasFlexgroupStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapNASFlexgroupDriver(t) + svmUUID := "SVM1-uuid" + pool := storage.NewStoragePool(nil, "pool1") + driver.physicalPool = pool + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + backendPool := pools[0] + assert.NotEmpty(t, pools) + assert.Equal(t, svmUUID, backendPool.SvmUUID) +} + func TestOntapNasFlexgroupStorageDriverGetInternalVolumeName(t *testing.T) { _, driver := newMockOntapNASFlexgroupDriver(t) driver.Config.StoragePrefix = utils.Ptr("storagePrefix_") diff --git a/storage_drivers/ontap/ontap_nas_qtree.go b/storage_drivers/ontap/ontap_nas_qtree.go index f9efc1d96..ed74a630a 100644 --- a/storage_drivers/ontap/ontap_nas_qtree.go +++ b/storage_drivers/ontap/ontap_nas_qtree.go @@ -179,6 +179,13 @@ func (d *NASQtreeStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Ensure all quotas are in force after a driver restart d.queueAllFlexvolsForQuotaResize(ctx) @@ -1560,6 +1567,31 @@ func (d *NASQtreeStorageDriver) GetStorageBackendPhysicalPoolNames(context.Conte return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NASQtreeStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapEconomyStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NASQtreeStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + // 3. FlexVol Name Prefix + svmUUID := d.GetAPI().GetSVMUUID() + flexVolPrefix := d.FlexvolNamePrefix() + backendPools := make([]drivers.OntapEconomyStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapEconomyStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + FlexVolPrefix: flexVolPrefix, + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *NASQtreeStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { return map[string]sa.Offer{ sa.BackendType: sa.NewStringOffer(d.Name()), diff --git a/storage_drivers/ontap/ontap_nas_qtree_test.go b/storage_drivers/ontap/ontap_nas_qtree_test.go index 530347ab0..2abe1f4b8 100644 --- a/storage_drivers/ontap/ontap_nas_qtree_test.go +++ b/storage_drivers/ontap/ontap_nas_qtree_test.go @@ -3140,6 +3140,35 @@ func TestGetStorageBackendSpecs_Success(t *testing.T) { assert.NoError(t, result, "Expected no error, got error") } +func TestOntapNasQtreeStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapNasQtreeDriver(t) + svmUUID := "SVM1-uuid" + flexVolPrefix := fmt.Sprintf("trident_qtree_pool_%s_", *driver.Config.StoragePrefix) + driver.flexvolNamePrefix = flexVolPrefix + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) +} + func TestNASQtreeStorageDriver_getQuotaDiskLimitSize_1Gi(t *testing.T) { mockCtrl := gomock.NewController(t) diff --git a/storage_drivers/ontap/ontap_nas_test.go b/storage_drivers/ontap/ontap_nas_test.go index 77a65565c..32efb5183 100644 --- a/storage_drivers/ontap/ontap_nas_test.go +++ b/storage_drivers/ontap/ontap_nas_test.go @@ -393,6 +393,7 @@ func TestOntapNasStorageDriverInitialize(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "nfs").Return([]string{"dataLIF"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-nas", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := driver.Initialize(ctx, "CSI", configJSON, commonConfig, secrets, BackendUUID) @@ -1531,6 +1532,31 @@ func TestOntapNasStorageDriverGetStorageBackendPhysicalPoolNames(t *testing.T) { assert.Equal(t, "pool1", poolNames[0], "Pool names are not equal") } +func TestOntapNasStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapNASDriver(t) + svmUUID := "SVM1-uuid" + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pools[0].SvmUUID) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pools[1].SvmUUID) +} + func TestOntapNasStorageDriverGetInternalVolumeName(t *testing.T) { _, driver := newMockOntapNASDriver(t) driver.Config.StoragePrefix = utils.Ptr("storagePrefix_") diff --git a/storage_drivers/ontap/ontap_san.go b/storage_drivers/ontap/ontap_san.go index 626008bb4..e5ab0e5b1 100644 --- a/storage_drivers/ontap/ontap_san.go +++ b/storage_drivers/ontap/ontap_san.go @@ -116,10 +116,9 @@ func (d *SANStorageDriver) Initialize( } err = InitializeSANDriver(ctx, driverContext, d.API, &d.Config, d.validate, backendUUID) - - // clean up igroup for failed driver if err != nil { if d.Config.DriverContext == tridentconfig.ContextCSI { + // Clean up igroup for failed driver. err := d.API.IgroupDestroy(ctx, d.Config.IgroupName) if err != nil { Logc(ctx).WithError(err).WithField("igroup", d.Config.IgroupName).Warn("Error deleting igroup.") @@ -128,6 +127,13 @@ func (d *SANStorageDriver) Initialize( return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -958,6 +964,28 @@ func (d *SANStorageDriver) GetStorageBackendPhysicalPoolNames(context.Context) [ return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *SANStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "SANStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + svmUUID := d.GetAPI().GetSVMUUID() + backendPools := make([]drivers.OntapStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *SANStorageDriver) getStoragePoolAttributes(ctx context.Context) map[string]sa.Offer { client := d.GetAPI() mirroring, _ := client.IsSVMDRCapable(ctx) diff --git a/storage_drivers/ontap/ontap_san_economy.go b/storage_drivers/ontap/ontap_san_economy.go index 1e4d05ca8..1bfa2af76 100644 --- a/storage_drivers/ontap/ontap_san_economy.go +++ b/storage_drivers/ontap/ontap_san_economy.go @@ -330,10 +330,9 @@ func (d *SANEconomyStorageDriver) Initialize( } err = InitializeSANDriver(ctx, driverContext, d.API, &d.Config, d.validate, backendUUID) - - // clean up igroup for failed driver if err != nil { if d.Config.DriverContext == tridentconfig.ContextCSI { + // Clean up igroup for failed driver. err := d.API.IgroupDestroy(ctx, d.Config.IgroupName) if err != nil { Logc(ctx).WithError(err).WithField("igroup", d.Config.IgroupName).Warn("Error deleting igroup.") @@ -342,6 +341,13 @@ func (d *SANEconomyStorageDriver) Initialize( return fmt.Errorf("error initializing %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -1713,6 +1719,31 @@ func (d *SANEconomyStorageDriver) GetStorageBackendPhysicalPoolNames(context.Con return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *SANEconomyStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapEconomyStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "SANEconomyStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + // 3. FlexVol Name Prefix + svmUUID := d.GetAPI().GetSVMUUID() + flexVolPrefix := d.FlexvolNamePrefix() + backendPools := make([]drivers.OntapEconomyStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapEconomyStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + FlexVolPrefix: flexVolPrefix, + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + func (d *SANEconomyStorageDriver) getStoragePoolAttributes() map[string]sa.Offer { return map[string]sa.Offer{ sa.BackendType: sa.NewStringOffer(d.Name()), diff --git a/storage_drivers/ontap/ontap_san_economy_test.go b/storage_drivers/ontap/ontap_san_economy_test.go index 13f3e91ca..ac3fa3960 100644 --- a/storage_drivers/ontap/ontap_san_economy_test.go +++ b/storage_drivers/ontap/ontap_san_economy_test.go @@ -3036,6 +3036,35 @@ func TestOntapSanEconomyGetStorageBackendPhysicalPoolNames(t *testing.T) { assert.Equal(t, "pool1", poolNames[0], "Pool names are not equal") } +func TestOntapSanEconomyGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapSanEcoDriver(t) + svmUUID := "SVM1-uuid" + flexVolPrefix := fmt.Sprintf("trident_lun_pool_%s_", *driver.Config.StoragePrefix) + driver.flexvolNamePrefix = flexVolPrefix + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + assert.Equal(t, flexVolPrefix, pool.FlexVolPrefix) +} + func TestOntapSanEconomyGetInternalVolumeName(t *testing.T) { _, d := newMockOntapSanEcoDriver(t) d.Config.StoragePrefix = utils.Ptr("storagePrefix_") @@ -3783,6 +3812,7 @@ func TestOntapSanEconomyInitialize(t *testing.T) { mockAPI.EXPECT().IscsiInitiatorGetDefaultAuth(ctx).Return(authResponse, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-san-economy", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := d.Initialize(ctx, "csi", commonConfigJSON, commonConfig, secrets, BackendUUID) @@ -3913,6 +3943,7 @@ func TestOntapSanEconomyInitialize_NumOfLUNs(t *testing.T) { "trident", 5).AnyTimes() if !test.expectError { mockAPI.EXPECT().IscsiInitiatorGetDefaultAuth(ctx).Return(authResponse, nil) + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid").AnyTimes() } result := d.Initialize(ctx, "csi", commonConfigJSON, commonConfig, secrets, BackendUUID) diff --git a/storage_drivers/ontap/ontap_san_nvme.go b/storage_drivers/ontap/ontap_san_nvme.go index dc18bc62a..f8cbcc8dd 100644 --- a/storage_drivers/ontap/ontap_san_nvme.go +++ b/storage_drivers/ontap/ontap_san_nvme.go @@ -145,6 +145,13 @@ func (d *NVMeStorageDriver) Initialize( return fmt.Errorf("error validating %s driver: %v", d.Name(), err) } + // Identify non-overlapping storage backend pools on the driver backend. + pools, err := drivers.EncodeStorageBackendPools(ctx, commonConfig, d.getStorageBackendPools(ctx)) + if err != nil { + return fmt.Errorf("failed to encode storage backend pools: %v", err) + } + d.Config.BackendPools = pools + // Set up the autosupport heartbeat d.telemetry = NewOntapTelemetry(ctx, d) d.telemetry.Telemetry = tridentconfig.OrchestratorTelemetry @@ -942,6 +949,28 @@ func (d *NVMeStorageDriver) GetStorageBackendPhysicalPoolNames(context.Context) return getStorageBackendPhysicalPoolNamesCommon(d.physicalPools) } +// getStorageBackendPools determines any non-overlapping, discrete storage pools present on a driver's storage backend. +func (d *NVMeStorageDriver) getStorageBackendPools(ctx context.Context) []drivers.OntapStorageBackendPool { + fields := LogFields{"Method": "getStorageBackendPools", "Type": "NVMeStorageDriver"} + Logc(ctx).WithFields(fields).Debug(">>>> getStorageBackendPools") + defer Logc(ctx).WithFields(fields).Debug("<<<< getStorageBackendPools") + + // For this driver, a discrete storage pool is composed of the following: + // 1. SVM UUID + // 2. Aggregate (physical pool) + svmUUID := d.GetAPI().GetSVMUUID() + backendPools := make([]drivers.OntapStorageBackendPool, 0) + for _, pool := range d.physicalPools { + backendPool := drivers.OntapStorageBackendPool{ + SvmUUID: svmUUID, + Aggregate: pool.Name(), + } + backendPools = append(backendPools, backendPool) + } + + return backendPools +} + // getStoragePoolAttributes returns the map for storage pool attributes. func (d *NVMeStorageDriver) getStoragePoolAttributes(ctx context.Context) map[string]sa.Offer { client := d.GetAPI() diff --git a/storage_drivers/ontap/ontap_san_nvme_test.go b/storage_drivers/ontap/ontap_san_nvme_test.go index 11df5fb23..7e382ef0e 100644 --- a/storage_drivers/ontap/ontap_san_nvme_test.go +++ b/storage_drivers/ontap/ontap_san_nvme_test.go @@ -194,6 +194,7 @@ func TestNVMeInitialize_Success(t *testing.T) { mAPI.EXPECT().SVMName().Return("svm") mAPI.EXPECT().EmsAutosupportLog(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mAPI.EXPECT().GetSVMUUID().Return("svm-uuid") err := d.Initialize(ctx, tridentconfig.ContextCSI, configJSON, commonConfig, nil, BackendUUID) @@ -245,6 +246,31 @@ func TestNVMeGetStorageBackendPhysicalPoolNames(t *testing.T) { assert.Equal(t, d.GetStorageBackendPhysicalPoolNames(ctx), []string{"pool1"}, "Physical pools are different.") } +func TestNVMeGetStorageBackendPools(t *testing.T) { + driver, mockAPI := newNVMeDriverAndMockApi(t) + svmUUID := "SVM1-uuid" + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) +} + func TestNVMeGetVolumeOpts(t *testing.T) { d := newNVMeDriver(nil) volConfig := storage.VolumeConfig{} diff --git a/storage_drivers/ontap/ontap_san_test.go b/storage_drivers/ontap/ontap_san_test.go index 71939649e..884a93b3b 100644 --- a/storage_drivers/ontap/ontap_san_test.go +++ b/storage_drivers/ontap/ontap_san_test.go @@ -2111,6 +2111,31 @@ func TestOntapSanVolumeGetStorageBackendSpecs(t *testing.T) { assert.NoError(t, err, "Failed to get the storage backend specification") } +func TestOntapSanStorageDriverGetStorageBackendPools(t *testing.T) { + mockAPI, driver := newMockOntapSANDriver(t) + svmUUID := "SVM1-uuid" + driver.physicalPools = map[string]storage.Pool{ + "pool1": storage.NewStoragePool(nil, "pool1"), + "pool2": storage.NewStoragePool(nil, "pool2"), + } + mockAPI.EXPECT().GetSVMUUID().Return(svmUUID) + + pools := driver.getStorageBackendPools(ctx) + + assert.NotEmpty(t, pools) + assert.Equal(t, len(driver.physicalPools), len(pools)) + + pool := pools[0] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) + + pool = pools[1] + assert.NotNil(t, driver.physicalPools[pool.Aggregate]) + assert.Equal(t, driver.physicalPools[pool.Aggregate].Name(), pool.Aggregate) + assert.Equal(t, svmUUID, pool.SvmUUID) +} + func TestOntapSanVolumeGetInternalVolumeName(t *testing.T) { ctx := context.Background() @@ -2867,6 +2892,7 @@ func TestOntapSanStorageDriverInitialize(t *testing.T) { mockAPI.EXPECT().NetInterfaceGetDataLIFs(ctx, "iscsi").Return([]string{"1.1.1.1"}, nil) mockAPI.EXPECT().EmsAutosupportLog(ctx, "ontap-san", "1", false, "heartbeat", hostname, string(message), 1, "trident", 5).AnyTimes() + mockAPI.EXPECT().GetSVMUUID().Return("SVM1-uuid") result := driver.Initialize(ctx, "CSI", configJSON, commonConfig, secrets, BackendUUID) diff --git a/storage_drivers/types.go b/storage_drivers/types.go index 5f95588f6..8228c24ff 100644 --- a/storage_drivers/types.go +++ b/storage_drivers/types.go @@ -72,6 +72,7 @@ type CommonStorageDriverConfig struct { StoragePrefixRaw json.RawMessage `json:"storagePrefix,string"` StoragePrefix *string `json:"-"` SerialNumbers []string `json:"serialNumbers,omitEmpty"` + BackendPools []string `json:"backendPools,omitEmpty"` DriverContext trident.DriverContext `json:"-"` LimitVolumeSize string `json:"limitVolumeSize"` Credentials map[string]string `json:"credentials"` @@ -148,6 +149,32 @@ type OntapStorageDriverPool struct { OntapStorageDriverConfigDefaults `json:"defaults"` } +// StorageBackendPool is a type constraint that enables drivers to generically report non-overlapping storage pools +// within a backend. +type StorageBackendPool interface { + OntapFlexGroupStorageBackendPool | OntapStorageBackendPool | OntapEconomyStorageBackendPool +} + +// OntapFlexGroupStorageBackendPool is a non-overlapping section of an ONTAP flexgroup backend that may be used for +// provisioning storage. +type OntapFlexGroupStorageBackendPool struct { + SvmUUID string `json:"svmUUID"` +} + +// OntapStorageBackendPool is a non-overlapping section of an ONTAP backend that may be used for provisioning storage. +type OntapStorageBackendPool struct { + SvmUUID string `json:"svmUUID"` + Aggregate string `json:"aggregate"` +} + +// OntapEconomyStorageBackendPool is a non-overlapping section of an ONTAP economy backend that may be used for +// provisioning storage. +type OntapEconomyStorageBackendPool struct { + SvmUUID string `json:"svmUUID"` + Aggregate string `json:"aggregate"` + FlexVolPrefix string `json:"flexVolPrefix"` +} + type OntapStorageDriverConfigDefaults struct { SpaceAllocation string `json:"spaceAllocation"` SpaceReserve string `json:"spaceReserve"` diff --git a/utils/utils.go b/utils/utils.go index 8784f38a7..2db01c968 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -6,6 +6,8 @@ import ( "bytes" "context" "crypto/rand" + "encoding/base64" + "encoding/json" "fmt" "net" "net/http" @@ -1042,3 +1044,37 @@ func SlicePtrs[T any](slice []T) []*T { } return result } + +func EncodeObjectToBase64String(object any) (string, error) { + if object == nil { + return "", fmt.Errorf("cannot encode nil object") + } + + // Serialize the object data to JSON + bytes, err := json.Marshal(object) + if err != nil { + return "", fmt.Errorf("failed encode object; %v", object) + } + + // Encode JSON bytes to a string + return base64.StdEncoding.EncodeToString(bytes), nil +} + +func DecodeBase64StringToObject(encodedObject string, destination any) error { + if encodedObject == "" { + return fmt.Errorf("cannot decode empty encoded string") + } + + // Decode the data from a string + bytes, err := base64.StdEncoding.DecodeString(encodedObject) + if err != nil { + return fmt.Errorf("failed to decode string; %s", encodedObject) + } + + // Deserialize the bytes into the destination + err = json.Unmarshal(bytes, &destination) + if err != nil { + return fmt.Errorf("failed to unmarshal bytes into destination of type: %t", reflect.TypeOf(destination)) + } + return nil +} diff --git a/utils/utils_test.go b/utils/utils_test.go index 464238930..6e6425c03 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -1523,3 +1523,122 @@ func TestDNS1123Regexes_MatchString(t *testing.T) { }) } } + +func TestEncodeObjectToBase64String_Fails(t *testing.T) { + // Object is nil. + encodedObj, err := EncodeObjectToBase64String(nil) + assert.Empty(t, encodedObj) + assert.Error(t, err) + + // Object is an unmarshal-able type. + encodedObj, err = EncodeObjectToBase64String(func() {}) + assert.Empty(t, encodedObj) + assert.Error(t, err) +} + +func TestEncodeObjectToBase64String_Succeeds(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Object is non-nil, but empty. + encodedObj, err := EncodeObjectToBase64String(testObject{}) + assert.NotNil(t, encodedObj) + assert.NoError(t, err) + + // Object is an object with fields filled in. + obj := testObject{ + Foo: "foo_test", + Bar: "bar_test", + Baz: "baz_test", + } + encodedObj, err = EncodeObjectToBase64String(obj) + assert.NotNil(t, encodedObj) + assert.NoError(t, err) +} + +func TestDecodeBase64StringToObject_Fails(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Encoded object is an empty string. + actualObject := testObject{} + err := DecodeBase64StringToObject("", &actualObject) + assert.Empty(t, actualObject.Foo) + assert.Empty(t, actualObject.Bar) + assert.Empty(t, actualObject.Baz) + assert.Error(t, err) + + // Encoded object is an invalid value for a base64 string. + actualObject = testObject{} + err = DecodeBase64StringToObject("%", &actualObject) + assert.Empty(t, actualObject.Foo) + assert.Empty(t, actualObject.Bar) + assert.Empty(t, actualObject.Baz) + assert.Error(t, err) + + // Encoded object contains non-ASCII characters for a base64 string. + actualObject = testObject{} + err = DecodeBase64StringToObject("ß-11234567890987654321234567890", &actualObject) + assert.Empty(t, actualObject.Foo) + assert.Empty(t, actualObject.Bar) + assert.Empty(t, actualObject.Baz) + assert.Error(t, err) +} + +func TestDecodeBase64StringToObject_Succeeds(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Encoded object is an empty string. + actualObject := testObject{} + expectedObject := testObject{Foo: "foo_test", Bar: "bar_test", Baz: "baz_test"} + err := DecodeBase64StringToObject( + "eyJmb28iOiJmb29fdGVzdCIsImJhciI6ImJhcl90ZXN0IiwiYmF6IjoiYmF6X3Rlc3QifQ==", + &actualObject, + ) + assert.EqualValues(t, expectedObject, actualObject) + assert.NoError(t, err) + + // Encoded object is an empty string. + actualObject = testObject{} + expectedObject = testObject{Foo: "foo_test", Bar: "bar_test", Baz: "baz_test"} + err = DecodeBase64StringToObject( + "eyJmb28iOiJmb29fdGVzdCIsImJhciI6ImJhcl90ZXN0IiwiYmF6IjoiYmF6X3Rlc3QifQ==", + &actualObject, + ) + assert.EqualValues(t, expectedObject, actualObject) + assert.NoError(t, err) +} + +func TestEncodeAndDecodeToAndFromBase64(t *testing.T) { + type testObject struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + Baz string `json:"baz,omitempty"` + } + + // Create a test object and encoded it. + originalObject := testObject{Foo: "foo_test", Bar: "bar_test", Baz: "baz_test"} + encodedObject, err := EncodeObjectToBase64String(originalObject) + assert.NoError(t, err) + assert.NotNil(t, encodedObject) + + // Decode the encoded test object and ensure the values extracted object and its values are equivalent to + // those present in the original object. + var actualObject testObject + err = DecodeBase64StringToObject(encodedObject, &actualObject) + assert.NoError(t, err) + assert.NotNil(t, encodedObject) + assert.Equal(t, originalObject.Foo, actualObject.Foo) + assert.Equal(t, originalObject.Bar, actualObject.Bar) + assert.Equal(t, originalObject.Baz, actualObject.Baz) +} From 2f52e48260f71bc5524e541d98dd9379a5931411 Mon Sep 17 00:00:00 2001 From: ameade <847570+ameade@users.noreply.github.com> Date: Wed, 12 Jul 2023 22:58:32 -0400 Subject: [PATCH 10/17] Imported snapshots shall have the same "managedness" of their associated volume --- core/orchestrator_core.go | 2 +- core/orchestrator_core_test.go | 63 +++++++++++++++ storage/backend.go | 10 +-- storage/backend_test.go | 77 +++++++++++++++++++ storage_drivers/azure/azure_anf_subvolume.go | 2 +- .../azure/azure_anf_subvolume_test.go | 2 +- storage_drivers/solidfire/solidfire_san.go | 2 +- 7 files changed, 149 insertions(+), 9 deletions(-) diff --git a/core/orchestrator_core.go b/core/orchestrator_core.go index f8ce83119..e0fd5d9bb 100644 --- a/core/orchestrator_core.go +++ b/core/orchestrator_core.go @@ -3952,7 +3952,7 @@ func (o *TridentOrchestrator) ImportSnapshot( // Complete the snapshot config. snapshotConfig.VolumeInternalName = volume.Config.InternalName snapshotConfig.LUKSPassphraseNames = volume.Config.LUKSPassphraseNames - snapshotConfig.ImportNotManaged = true // All imported snapshots are not managed. + snapshotConfig.ImportNotManaged = volume.Config.ImportNotManaged // Snapshots inherit the managed state of their volume // Query the storage backend for the snapshot. snapshot, err := backend.GetSnapshot(ctx, snapshotConfig, volume.Config) diff --git a/core/orchestrator_core_test.go b/core/orchestrator_core_test.go index d89a11f70..14aa39fc1 100644 --- a/core/orchestrator_core_test.go +++ b/core/orchestrator_core_test.go @@ -6963,6 +6963,69 @@ func TestImportSnapshot(t *testing.T) { } snapName := "snapshot-import" snapInternalName := "snap.2023-05-23_175116" + snapConfig := &storage.SnapshotConfig{ + Version: "1", + Name: snapName, + VolumeName: volumeName, + InternalName: snapInternalName, + VolumeInternalName: volumeInternalName, + ImportNotManaged: false, + } + snapshot := &storage.Snapshot{ + Config: snapConfig, + Created: "2023-05-15T17:04:09Z", + SizeBytes: 1024, + } + + // Initialize mocks. + mockCtrl := gomock.NewController(t) + mockBackend := mockstorage.NewMockBackend(mockCtrl) + mockStore := mockpersistentstore.NewMockStoreClient(mockCtrl) + + // Set up common mock expectations between test cases. + mockBackend.EXPECT().GetDriverName().Return(backendUUID).AnyTimes() + mockBackend.EXPECT().Name().Return(backendUUID).AnyTimes() + mockBackend.EXPECT().State().Return(storage.Online).AnyTimes() + mockBackend.EXPECT().BackendUUID().Return(backendUUID).AnyTimes() + + // Set up test case specific mock expectations and inject mocks into core. + mockBackend.EXPECT().GetSnapshot( + gomock.Any(), snapConfig, volume.Config, + ).Return(snapshot, nil) + mockStore.EXPECT().AddSnapshot(gomock.Any(), snapshot).Return(nil) + + o.storeClient = mockStore + o.backends[volume.BackendUUID] = mockBackend + o.volumes[snapConfig.VolumeName] = volume + + // Call method under test and make assertions. + importedSnap, err := o.ImportSnapshot(ctx(), snapConfig) + assert.NoError(t, err) + assert.NotNil(t, importedSnap) + assert.EqualValues(t, snapshot.ConstructExternal(), importedSnap) +} + +func TestImportSnapshot_VolumeNotManaged(t *testing.T) { + o := getOrchestrator(t, false) + + // Initialize variables used in all subtests. + backendUUID := "test-backend" + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volume := &storage.Volume{ + Config: &storage.VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + ImportOriginalName: "import-" + volumeName, + ImportBackendUUID: "import-" + backendUUID, + ImportNotManaged: true, + LUKSPassphraseNames: nil, + }, + BackendUUID: backendUUID, + } + snapName := "snapshot-import" + snapInternalName := "snap.2023-05-23_175116" snapConfig := &storage.SnapshotConfig{ Version: "1", Name: snapName, diff --git a/storage/backend.go b/storage/backend.go index 460b9523e..9dd691f07 100644 --- a/storage/backend.go +++ b/storage/backend.go @@ -834,16 +834,16 @@ func (b *StorageBackend) DeleteSnapshot( "snapshotName": snapConfig.Name, }).Debug("Attempting snapshot delete.") + // Ensure snapshot is managed + if snapConfig.ImportNotManaged { + return errors.NotManagedError("snapshot %s is not managed by Trident", snapConfig.InternalName) + } + // Ensure volume is managed if volConfig.ImportNotManaged { return errors.NotManagedError("source volume %s is not managed by Trident", volConfig.InternalName) } - // Ensure snapshot is managed - if snapConfig.ImportNotManaged { - return errors.NotManagedError("source volume %s is not managed by Trident", snapConfig.InternalName) - } - // Ensure backend is ready if err := b.ensureOnlineOrDeleting(ctx); err != nil { return err diff --git a/storage/backend_test.go b/storage/backend_test.go index e0279851e..60ba04ba7 100644 --- a/storage/backend_test.go +++ b/storage/backend_test.go @@ -3,6 +3,7 @@ package storage import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -66,3 +67,79 @@ func TestBackendState(t *testing.T) { ) } } + +func TestDeleteSnapshot_BackendOffline(t *testing.T) { + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + } + snapName := "snapshot" + snapInternalName := "snap.2023-05-23_175116" + snapConfig := &SnapshotConfig{ + Version: "1", + Name: snapName, + VolumeName: volumeName, + InternalName: snapInternalName, + VolumeInternalName: volumeInternalName, + } + + backend := &StorageBackend{ + state: Offline, + } + + // Both volume and snapshot not managed + err := backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") +} + +func TestDeleteSnapshot_NotManaged(t *testing.T) { + backendUUID := "test-backend" + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + ImportOriginalName: "import-" + volumeName, + ImportBackendUUID: "import-" + backendUUID, + ImportNotManaged: true, + LUKSPassphraseNames: nil, + } + snapName := "snapshot-import" + snapInternalName := "snap.2023-05-23_175116" + snapConfig := &SnapshotConfig{ + Version: "1", + Name: snapName, + VolumeName: volumeName, + InternalName: snapInternalName, + VolumeInternalName: volumeInternalName, + ImportNotManaged: true, + } + + backend := &StorageBackend{ + state: Online, + } + + // Both volume and snapshot not managed + err := backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") + + // Volume not managed + volumeConfig.ImportNotManaged = true + snapConfig.ImportNotManaged = false + err = backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") + + // Snapshot not managed + volumeConfig.ImportNotManaged = false + snapConfig.ImportNotManaged = true + err = backend.DeleteSnapshot(context.Background(), snapConfig, volumeConfig) + + assert.Errorf(t, err, "expected err") +} diff --git a/storage_drivers/azure/azure_anf_subvolume.go b/storage_drivers/azure/azure_anf_subvolume.go index e952cf85d..5249222be 100644 --- a/storage_drivers/azure/azure_anf_subvolume.go +++ b/storage_drivers/azure/azure_anf_subvolume.go @@ -1593,7 +1593,7 @@ func (d *NASBlockStorageDriver) DeleteSnapshot( Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> DeleteSnapshot") defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< DeleteSnapshot") - creationToken := d.helper.GetSnapshotInternalName(snapConfig.VolumeName, snapName) + creationToken := snapConfig.InternalName subscriptionID, resourceGroup, _, netappAccount, cPoolName, volumeName, _, err := api.ParseSubvolumeID(volConfig.InternalID) diff --git a/storage_drivers/azure/azure_anf_subvolume_test.go b/storage_drivers/azure/azure_anf_subvolume_test.go index 82d835abc..21a3598a3 100644 --- a/storage_drivers/azure/azure_anf_subvolume_test.go +++ b/storage_drivers/azure/azure_anf_subvolume_test.go @@ -2421,7 +2421,7 @@ func getStructsForSubvolumeCreateSnapshot() ( snapConfig := &storage.SnapshotConfig{ Version: "1", Name: "testSnap", - InternalName: "testSnap", + InternalName: "trident-testSnap--ce20c", VolumeName: "pvc-ce20c6cf-0a75-4b27-b9bd-3f53bf520f4f", VolumeInternalName: "trident-pvc-ce20c6cf-0a75-4b27-b9bd-3f53bf520f4f-file-0", } diff --git a/storage_drivers/solidfire/solidfire_san.go b/storage_drivers/solidfire/solidfire_san.go index 466849f7d..f9d574c6c 100644 --- a/storage_drivers/solidfire/solidfire_san.go +++ b/storage_drivers/solidfire/solidfire_san.go @@ -1180,7 +1180,7 @@ func (d *SANStorageDriver) Publish( // Get the fstype attrs, _ := v.Attributes.(map[string]interface{}) fstype := drivers.DefaultFileSystemType - if str, ok := attrs["fstype"].(string); ok { + if str, ok := attrs["fstype"].(string); ok && str != "" { fstype = str } From 550c77643cc4b407445aa2babdc14f77a27ef8fb Mon Sep 17 00:00:00 2001 From: agagan <110954204+agagan@users.noreply.github.com> Date: Thu, 13 Jul 2023 20:04:06 +0530 Subject: [PATCH 11/17] Increase the client rate limit to reduce the time of node registration with the trident controller in large cluster Increase the client rate limit to reduce the time of node registration with the trident controller in large cluster --- cli/k8s_client/client_factory.go | 6 ++++++ frontend/csi/controller_api/rest.go | 14 ++++++++++++-- frontend/rest/controller_routes.go | 14 +++++++++----- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/cli/k8s_client/client_factory.go b/cli/k8s_client/client_factory.go index 37965337f..03cb8649b 100644 --- a/cli/k8s_client/client_factory.go +++ b/cli/k8s_client/client_factory.go @@ -39,6 +39,8 @@ type Clients struct { const ( k8sTimeout = 30 * time.Second defaultNamespace = "default" + QPS = 50 + burstTime = 100 ) var cachedClients *Clients @@ -198,6 +200,8 @@ func createK8SClientsExCluster( } // Create the CLI-based Kubernetes client + restConfig.QPS = QPS + restConfig.Burst = burstTime k8sClient, err := NewKubeClient(restConfig, namespace, k8sTimeout) if err != nil { return nil, fmt.Errorf("could not initialize Kubernetes client; %v", err) @@ -220,6 +224,8 @@ func createK8SClientsInCluster(ctx context.Context, overrideNamespace string) (* if err != nil { return nil, err } + restConfig.QPS = QPS + restConfig.Burst = burstTime // when running in a pod, we use the Trident pod's namespace namespaceBytes, err := os.ReadFile(config.NamespaceFile) diff --git a/frontend/csi/controller_api/rest.go b/frontend/csi/controller_api/rest.go index 3fc8a5509..b3c781e69 100644 --- a/frontend/csi/controller_api/rest.go +++ b/frontend/csi/controller_api/rest.go @@ -127,10 +127,20 @@ func (c *ControllerRestClient) CreateNode(ctx context.Context, node *utils.Node) if err != nil { return CreateNodeResponse{}, fmt.Errorf("error parsing create node request; %v", err) } - resp, respBody, err := c.InvokeAPI(ctx, nodeData, "PUT", config.NodeURL+"/"+node.Name, false, false) + + createRequest := func() (*http.Response, []byte, error) { + resp, respBody, err := c.InvokeAPI(ctx, nodeData, "PUT", config.NodeURL+"/"+node.Name, false, false) + if err != nil { + return resp, respBody, fmt.Errorf("could not log into the Trident CSI Controller: %v", err) + } + return resp, respBody, nil + } + + resp, respBody, err := c.requestAndRetry(ctx, createRequest) if err != nil { - return CreateNodeResponse{}, fmt.Errorf("could not log into the Trident CSI Controller: %v", err) + return CreateNodeResponse{}, fmt.Errorf("failed during retry for CreateNode: %v", err) } + createResponse := CreateNodeResponse{} if err := json.Unmarshal(respBody, &createResponse); err != nil { return createResponse, fmt.Errorf("could not parse node : %s; %v", string(respBody), err) diff --git a/frontend/rest/controller_routes.go b/frontend/rest/controller_routes.go index 051b52d42..bcfe6d97a 100644 --- a/frontend/rest/controller_routes.go +++ b/frontend/rest/controller_routes.go @@ -23,10 +23,12 @@ type Routes []Route const ( // arbitrarily large number to limit maximum routines waiting for global lock - updateNodeRateLimit = 10000.0 // requests per second - updateNodeBurst = 10000 // maximum request burst - getNodeRateLimit = 10000.0 // requests per second - getNodeBurst = 10000 // maximum request burst + updateNodeRateLimit = 10000.0 // requests per second + updateNodeBurst = 10000 // maximum request burst + getNodeRateLimit = 10000.0 // requests per second + getNodeBurst = 10000 // maximum request burst + addOrUpdateNodeRateLimit = 50.0 // requests per second + addOrUpdateNodeBurst = 100 // maximum request burst ) var controllerRoutes = Routes{ @@ -160,7 +162,9 @@ var controllerRoutes = Routes{ "AddOrUpdateNode", "PUT", config.NodeURL + "/{node}", - nil, + []mux.MiddlewareFunc{ + rateLimiterMiddleware(addOrUpdateNodeRateLimit, addOrUpdateNodeBurst), + }, AddNode, }, Route{ From 35d6f5f3d6a635efd61b59388be477576e543fe3 Mon Sep 17 00:00:00 2001 From: Joe Webster <31218426+jwebster7@users.noreply.github.com> Date: Thu, 13 Jul 2023 09:53:10 -0500 Subject: [PATCH 12/17] Update Trident version to 23.07 --- config/config.go | 2 +- deploy/bundle_post_1_25.yaml | 2 +- deploy/bundle_pre_1_25.yaml | 2 +- deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml | 2 +- deploy/operator.yaml | 2 +- hack/VERSION | 2 +- helm/trident-operator/Chart.yaml | 2 +- operator/controllers/orchestrator/installer/installer_test.go | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/config/config.go b/config/config.go index c68dcce67..1dcfaf939 100644 --- a/config/config.go +++ b/config/config.go @@ -39,7 +39,7 @@ const ( OrchestratorName = "trident" OrchestratorClientName = OrchestratorName + "ctl" OrchestratorAPIVersion = "1" - DefaultOrchestratorVersion = "23.04.0" + DefaultOrchestratorVersion = "23.07.0" PersistentStoreBootstrapAttempts = 30 PersistentStoreBootstrapTimeout = PersistentStoreBootstrapAttempts * time.Second PersistentStoreTimeout = 10 * time.Second diff --git a/deploy/bundle_post_1_25.yaml b/deploy/bundle_post_1_25.yaml index fc3a51a74..3da3af0ad 100644 --- a/deploy/bundle_post_1_25.yaml +++ b/deploy/bundle_post_1_25.yaml @@ -454,7 +454,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: trident-operator - image: docker.io/netapp/trident-operator:23.04.0 + image: docker.io/netapp/trident-operator:23.07.0 imagePullPolicy: IfNotPresent name: trident-operator securityContext: diff --git a/deploy/bundle_pre_1_25.yaml b/deploy/bundle_pre_1_25.yaml index 00cb3a0d7..57fff3fb4 100644 --- a/deploy/bundle_pre_1_25.yaml +++ b/deploy/bundle_pre_1_25.yaml @@ -457,7 +457,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: trident-operator - image: docker.io/netapp/trident-operator:23.04.0 + image: docker.io/netapp/trident-operator:23.07.0 imagePullPolicy: IfNotPresent name: trident-operator securityContext: diff --git a/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml b/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml index af314778c..c3dad1b69 100644 --- a/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml +++ b/deploy/crds/tridentorchestrator_cr_imagepullsecrets.yaml @@ -5,6 +5,6 @@ metadata: spec: debug: true namespace: trident - tridentImage: netapp/trident:23.04.0 + tridentImage: netapp/trident:23.07.0 imagePullSecrets: - thisisasecret diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 2e74d7e8e..486c1b380 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -22,7 +22,7 @@ spec: serviceAccountName: trident-operator containers: - name: trident-operator - image: docker.io/netapp/trident-operator:23.04.0 + image: docker.io/netapp/trident-operator:23.07.0 command: - "/trident-operator" - "--debug" diff --git a/hack/VERSION b/hack/VERSION index 92f023e6e..942d403ae 100644 --- a/hack/VERSION +++ b/hack/VERSION @@ -1 +1 @@ -23.04.0 +23.07.0 diff --git a/helm/trident-operator/Chart.yaml b/helm/trident-operator/Chart.yaml index 52ac2109f..6c1604ab8 100644 --- a/helm/trident-operator/Chart.yaml +++ b/helm/trident-operator/Chart.yaml @@ -11,4 +11,4 @@ icon: "https://raw.githubusercontent.com/NetApp/trident/master/logo/trident.png" # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 23.04.0 +appVersion: 23.07.0 diff --git a/operator/controllers/orchestrator/installer/installer_test.go b/operator/controllers/orchestrator/installer/installer_test.go index fcc151d31..86d7d2c14 100644 --- a/operator/controllers/orchestrator/installer/installer_test.go +++ b/operator/controllers/orchestrator/installer/installer_test.go @@ -55,7 +55,7 @@ func createTestLabels() map[string]string { labels := make(map[string]string) labels[appLabelKey] = appLabelValue labels[K8sVersionLabelKey] = "v1.21.8" - labels[TridentVersionLabelKey] = "v23.04.0" + labels[TridentVersionLabelKey] = "v23.07.0" return labels } From 6bd58b9ebffe6233752c1373e842a133bd1b81bc Mon Sep 17 00:00:00 2001 From: Clinton Knight Date: Mon, 17 Jul 2023 09:18:54 -0400 Subject: [PATCH 13/17] Updated 3rd-party dependencies for 23.07.0 release All good things... --- Dockerfile | 2 +- cli/k8s_client/yaml_factory.go | 12 +- config/config.go | 2 +- contrib/docker/plugin/Dockerfile | 2 +- go.mod | 100 +++++------ go.sum | 290 +++++++++++++------------------ operator/Dockerfile | 2 +- 7 files changed, 178 insertions(+), 232 deletions(-) diff --git a/Dockerfile b/Dockerfile index 864f37755..7c75fea56 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ ARG ARCH=amd64 -FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:a01d47d4036cae5a67a9619e3d06fa14a6811a2247b4da72b4233ece4efebd57 +FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:7198a357ff3a8ef750b041324873960cf2153c11cc50abb9d8d5f8bb089f6b4e LABEL maintainers="The NetApp Trident Team" \ app="trident.netapp.io" \ diff --git a/cli/k8s_client/yaml_factory.go b/cli/k8s_client/yaml_factory.go index 77ef21ada..1ef9c783a 100644 --- a/cli/k8s_client/yaml_factory.go +++ b/cli/k8s_client/yaml_factory.go @@ -575,7 +575,7 @@ spec: - name: asup-dir mountPath: /asup - name: csi-provisioner - image: {CSI_SIDECAR_REGISTRY}/csi-provisioner:v3.4.1 + image: {CSI_SIDECAR_REGISTRY}/csi-provisioner:v3.5.0 imagePullPolicy: {IMAGE_PULL_POLICY} securityContext: capabilities: @@ -595,7 +595,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-attacher - image: {CSI_SIDECAR_REGISTRY}/csi-attacher:v4.2.0 + image: {CSI_SIDECAR_REGISTRY}/csi-attacher:v4.3.0 imagePullPolicy: {IMAGE_PULL_POLICY} securityContext: capabilities: @@ -613,7 +613,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-resizer - image: {CSI_SIDECAR_REGISTRY}/csi-resizer:v1.7.0 + image: {CSI_SIDECAR_REGISTRY}/csi-resizer:v1.8.0 imagePullPolicy: {IMAGE_PULL_POLICY} args: - "--v={SIDECAR_LOG_LEVEL}" @@ -626,7 +626,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-snapshotter - image: {CSI_SIDECAR_REGISTRY}/csi-snapshotter:v6.2.1 + image: {CSI_SIDECAR_REGISTRY}/csi-snapshotter:v6.2.2 imagePullPolicy: {IMAGE_PULL_POLICY} securityContext: capabilities: @@ -942,7 +942,7 @@ spec: mountPath: /certs readOnly: true - name: driver-registrar - image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.7.0 + image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.8.0 imagePullPolicy: {IMAGE_PULL_POLICY} args: - "--v={SIDECAR_LOG_LEVEL}" @@ -1144,7 +1144,7 @@ spec: cpu: 10m memory: 20Mi - name: node-driver-registrar - image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.7.0 + image: {CSI_SIDECAR_REGISTRY}/csi-node-driver-registrar:v2.8.0 imagePullPolicy: {IMAGE_PULL_POLICY} args: - --v=2 diff --git a/config/config.go b/config/config.go index 1dcfaf939..ccc90a610 100644 --- a/config/config.go +++ b/config/config.go @@ -158,7 +158,7 @@ const ( Darwin = "darwin" // Minimum and maximum supported Kubernetes versions - KubernetesVersionMin = "v1.21" + KubernetesVersionMin = "v1.22" KubernetesVersionMax = "v1.27" // KubernetesCSISidecarRegistry is where the CSI sidecar images are hosted diff --git a/contrib/docker/plugin/Dockerfile b/contrib/docker/plugin/Dockerfile index 731cd6844..00b408d1b 100644 --- a/contrib/docker/plugin/Dockerfile +++ b/contrib/docker/plugin/Dockerfile @@ -1,6 +1,6 @@ FROM busybox:uclibc as busybox -FROM gcr.io/distroless/static:b3e0897b507e86f0dab5bb99861e297d53891e84 +FROM gcr.io/distroless/static@sha256:7198a357ff3a8ef750b041324873960cf2153c11cc50abb9d8d5f8bb089f6b4e LABEL maintainers="The NetApp Trident Team" \ app="trident.netapp.io" \ diff --git a/go.mod b/go.mod index fef6e462b..b17afed7e 100755 --- a/go.mod +++ b/go.mod @@ -3,23 +3,23 @@ module github.com/netapp/trident go 1.20 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/netapp/armnetapp/v4 v4.0.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures v1.1.0 - github.com/RoaringBitmap/roaring v1.2.3 - github.com/cenkalti/backoff/v4 v4.2.0 + github.com/RoaringBitmap/roaring v1.3.0 + github.com/cenkalti/backoff/v4 v4.2.1 github.com/container-storage-interface/spec v1.8.0 github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 github.com/dustin/go-humanize v1.0.2-0.20230319011938-bd1b3e1a20a1 - github.com/elastic/go-sysinfo v1.10.0 + github.com/elastic/go-sysinfo v1.11.0 github.com/evanphx/json-patch/v5 v5.6.0 github.com/ghodss/yaml v1.0.1-0.20220118164431-d8423dcdf344 // 1/18/2022 - github.com/go-openapi/errors v0.20.3 - github.com/go-openapi/runtime v0.25.0 + github.com/go-openapi/errors v0.20.4 + github.com/go-openapi/runtime v0.26.0 github.com/go-openapi/strfmt v0.21.7 - github.com/go-openapi/swag v0.22.3 + github.com/go-openapi/swag v0.22.4 github.com/go-openapi/validate v0.22.1 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 @@ -27,41 +27,41 @@ require ( github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/kr/secureheader v0.2.0 - github.com/kubernetes-csi/csi-lib-utils v0.13.0 + github.com/kubernetes-csi/csi-lib-utils v0.14.0 github.com/kubernetes-csi/csi-proxy/client v1.1.2 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 - github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20211207080247-460296229913 + github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20230502164821-3079e7b80fca github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/olekukonko/tablewriter v0.0.6-0.20210304033056-74c60be0ef68 - github.com/openshift/api v0.0.0-20230406152840-ce21e3fe5da2 - github.com/prometheus/client_golang v1.14.0 - github.com/sirupsen/logrus v1.9.0 + github.com/olekukonko/tablewriter v0.0.6-0.20230422125635-f6b4e4ae60d8 + github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 + github.com/prometheus/client_golang v1.16.0 + github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.9.5 github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.4 github.com/vishvananda/netlink v1.1.0 - github.com/zcalusic/sysinfo v0.9.6-0.20220805135214-99e836ba64f2 + github.com/zcalusic/sysinfo v1.0.1 go.uber.org/multierr v1.11.0 // github.com/uber-go/multierr - golang.org/x/crypto v0.8.0 // github.com/golang/crypto - golang.org/x/net v0.9.0 // github.com/golang/net - golang.org/x/oauth2 v0.7.0 // github.com/golang/oauth2 - golang.org/x/sys v0.7.0 // github.com/golang/sys - golang.org/x/text v0.9.0 // github.com/golang/text + golang.org/x/crypto v0.11.0 // github.com/golang/crypto + golang.org/x/net v0.12.0 // github.com/golang/net + golang.org/x/oauth2 v0.10.0 // github.com/golang/oauth2 + golang.org/x/sys v0.10.0 // github.com/golang/sys + golang.org/x/text v0.11.0 // github.com/golang/text golang.org/x/time v0.3.0 // github.com/golang/time - google.golang.org/grpc v1.54.0 // github.com/grpc/grpc-go - k8s.io/api v0.26.3 // github.com/kubernetes/api - k8s.io/apiextensions-apiserver v0.26.3 // github.com/kubernetes/apiextensions-apiserver - k8s.io/apimachinery v0.26.3 // github.com/kubernetes/apimachinery - k8s.io/client-go v0.26.3 // github.com/kubernetes/client-go - k8s.io/mount-utils v0.26.3 // github.com/kubernetes/mount-utils + google.golang.org/grpc v1.56.2 // github.com/grpc/grpc-go + k8s.io/api v0.27.3 // github.com/kubernetes/api + k8s.io/apiextensions-apiserver v0.27.3 // github.com/kubernetes/apiextensions-apiserver + k8s.io/apimachinery v0.27.3 // github.com/kubernetes/apimachinery + k8s.io/client-go v0.27.3 // github.com/kubernetes/client-go + k8s.io/mount-utils v0.27.3 // github.com/kubernetes/mount-utils ) require ( - cloud.google.com/go/compute v1.15.1 // indirect + cloud.google.com/go/compute v1.20.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -75,11 +75,11 @@ require ( github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.8 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -93,7 +93,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.10 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -108,29 +108,29 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/rivo/uniseg v0.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect - go.opentelemetry.io/otel v1.11.1 // indirect - go.opentelemetry.io/otel/trace v1.11.1 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/term v0.7.0 // indirect - golang.org/x/tools v0.6.0 // indirect + go.opentelemetry.io/otel v1.14.0 // indirect + go.opentelemetry.io/otel/trace v1.14.0 // indirect + golang.org/x/mod v0.9.0 // indirect + golang.org/x/term v0.10.0 // indirect + golang.org/x/tools v0.7.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 403ece6dc..62469f034 100755 --- a/go.sum +++ b/go.sum @@ -23,8 +23,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -40,22 +40,22 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/netapp/armnetapp/v4 v4.0.0 h1:yNyKx1DKBWXs6EP6WaaVgRuX9ilmOj8emmAyKfqHBYA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/netapp/armnetapp/v4 v4.0.0/go.mod h1:CLToNi36LmwVMgHuqOgfG8M0ph7VQaEUoqpO35/1wqU= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.0 h1:ht6xbz1wlfgLAwho2Fv4nKqq2ev/PCDCoX8H0MXf6q4= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.0/go.mod h1:21rlzm+SuYrS9ARS92XEGxcHQeLVDcaY2YV30rHjSd4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1 h1:eoQrCw9DMThzbJ32fHXZtISnURk6r0TozXiWuTsay5s= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.7.1/go.mod h1:21rlzm+SuYrS9ARS92XEGxcHQeLVDcaY2YV30rHjSd4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures v1.1.0 h1:NhvID5juwkPxMUD8hdV3no0nugxk9QM8d5OSLskjOLM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures v1.1.0/go.mod h1:hDdPReNCfyh7kmZm6uKm3uH3OQkGn8gbeb1c/JkmEdE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= @@ -64,31 +64,22 @@ github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2B github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY= -github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/RoaringBitmap/roaring v1.3.0 h1:aQmu9zQxDU0uhwR8SXOH/OrqEf+X8A0LQmwW3JX8Lcg= +github.com/RoaringBitmap/roaring v1.3.0/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -108,9 +99,9 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/docker v23.0.3+incompatible h1:9GhVsShNWz1hO//9BNg/dpMnZW25KydO4wtVxWAIbho= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 h1:YcvzLmdrP/b8kLAGJ8GT7bdncgCAiWxJZIlt84D+RJg= @@ -119,11 +110,10 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.2-0.20230319011938-bd1b3e1a20a1 h1:xWuCuGTxFJBS1aR92jQcf67YS3N6DozQ9xhlM421MPI= github.com/dustin/go-humanize v1.0.2-0.20230319011938-bd1b3e1a20a1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elastic/go-sysinfo v1.10.0 h1:8mhFXJrWFLpeskULp0sGq+jt5DA0AaPU+RfGDOJQPUA= -github.com/elastic/go-sysinfo v1.10.0/go.mod h1:RgpZTzVQX1UUNtbCnTYE5xzUaZ9+UU4ydR2ZXyzjkBg= +github.com/elastic/go-sysinfo v1.11.0 h1:QW+6BF1oxBoAprH3w2yephF7xLkrrSXj7gl2xC2BM4w= +github.com/elastic/go-sysinfo v1.11.0/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -145,50 +135,52 @@ github.com/ghodss/yaml v1.0.1-0.20220118164431-d8423dcdf344/go.mod h1:GIjDIg/heH github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= -github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc= -github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -213,7 +205,6 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= @@ -284,6 +275,7 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -311,16 +303,10 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9 github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -328,20 +314,19 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/secureheader v0.2.0 h1:Fe/BS3McH8EGMSc+HzaZkkRnrCyx2gq9kSVgLbyBNrA= github.com/kr/secureheader v0.2.0/go.mod h1:PfvbGMMfqBg6z+vxKGKbSJRcmASZc4klL5DiW9V5iLI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/csi-lib-utils v0.13.0 h1:QrTdZVZbHlaSUBN9ReayBPnnF1N0edFIpUKBwVIBW3w= -github.com/kubernetes-csi/csi-lib-utils v0.13.0/go.mod h1:JS9eDIZmSjx4F9o0bLTVK/qfhIIOifdjEfVXzxWapfE= +github.com/kubernetes-csi/csi-lib-utils v0.14.0 h1:pusB32LkSd7GhuT8Z6cyRFqByujc28ygWV97ndaT19s= +github.com/kubernetes-csi/csi-lib-utils v0.14.0/go.mod h1:uX8xidqxGJOLXtsfCCVsxWtZl/9NiLyd2DD3Nb+KoP4= github.com/kubernetes-csi/csi-proxy/client v1.1.2 h1:zRZOv9RXAd9d/46RIiVkzyssIw5tAK7IJlYIk3gn9FU= github.com/kubernetes-csi/csi-proxy/client v1.1.2/go.mod h1:SfK4HVKQdMH5KrffivddAWgX5hl3P5KmnuOTBbDNboU= github.com/kubernetes-csi/external-snapshotter/client/v6 v6.2.0 h1:cMM5AB37e9aRGjErygVT6EuBPB6s5a+l95OPERmSlVM= @@ -355,13 +340,12 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20211207080247-460296229913 h1:4QObo/l+9iqntBhKIEt5ZIs9r8MEZR8Q9fq93AVi9Bw= -github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20211207080247-460296229913/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= +github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20230502164821-3079e7b80fca h1:TsdNYsfVbY0KKLQPNWupAj/+8getyMQd/5X3haqHvt4= +github.com/mattermost/xml-roundtrip-validator v0.1.1-0.20230502164821-3079e7b80fca/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To= github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= -github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= @@ -379,8 +363,6 @@ github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGp github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= @@ -388,19 +370,17 @@ github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.6-0.20210304033056-74c60be0ef68 h1:sB6FDvBA1aVDINTWnVSrcJ95fV/QkN6fTJgksZOT8vY= -github.com/olekukonko/tablewriter v0.0.6-0.20210304033056-74c60be0ef68/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/olekukonko/tablewriter v0.0.6-0.20230422125635-f6b4e4ae60d8 h1:eZ1u2pOgYpOBuhRmW9qo8C7tXKtqSRNI1U3PHcpcObQ= +github.com/olekukonko/tablewriter v0.0.6-0.20230422125635-f6b4e4ae60d8/go.mod h1:8Hf+pH6thup1sPZPD+NLg7d6vbpsdilu9CPIeikvgMQ= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/openshift/api v0.0.0-20230406152840-ce21e3fe5da2 h1:lpKBKpI8or60mSEEKrpS67cevp8XaW8vfmXSwCZXKd0= -github.com/openshift/api v0.0.0-20230406152840-ce21e3fe5da2/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= +github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 h1:j7LIIr4Vrdy4Dpd4bw2j53UXUSjA1eXXC0x89g9kyAI= +github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= @@ -413,46 +393,28 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= @@ -476,8 +438,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= @@ -498,10 +461,11 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zcalusic/sysinfo v0.9.6-0.20220805135214-99e836ba64f2 h1:kbjgNu2XjGjB8nvV/BHZS1J8FK8ONWrFQX3uHMDp2Lc= -github.com/zcalusic/sysinfo v0.9.6-0.20220805135214-99e836ba64f2/go.mod h1:30ZyzePdcgO8cQgyXtuPpg1FPCaHAv4kTap0HE8wBjo= +github.com/zcalusic/sysinfo v1.0.1 h1:cVh8q3codjh43AGRTa54dJ2Zq+qPejv8n2VWpxKViwc= +github.com/zcalusic/sysinfo v1.0.1/go.mod h1:LxwKwtQdbTIQc65drhjQzYzt0o7jfB80LrrZm7SWn8o= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -510,11 +474,11 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4= -go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE= -go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs= -go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ= -go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -529,8 +493,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -565,11 +529,10 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -577,7 +540,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -602,13 +564,10 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -618,10 +577,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -634,10 +591,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -655,7 +611,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -668,8 +623,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -677,26 +630,21 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -706,8 +654,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -767,8 +715,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -838,8 +786,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -860,8 +808,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -875,22 +823,20 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -910,27 +856,27 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= -k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= -k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= -k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= -k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= -k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= -k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/mount-utils v0.26.3 h1:FxMDiPLCkrYgonfSaKHWltLNkyTg3Q/Xrwn94uwhd8k= -k8s.io/mount-utils v0.26.3/go.mod h1:95yx9K6N37y8YZ0/lUh9U6ITosMODNaW0/v4wvaa0Xw= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= +k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= +k8s.io/apiextensions-apiserver v0.27.3 h1:xAwC1iYabi+TDfpRhxh4Eapl14Hs2OftM2DN5MpgKX4= +k8s.io/apiextensions-apiserver v0.27.3/go.mod h1:BH3wJ5NsB9XE1w+R6SSVpKmYNyIiyIz9xAmBl8Mb+84= +k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= +k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= +k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/mount-utils v0.27.3 h1:oubkDKLTZUneW27wgyOmp8a1AAZj04vGmtq+YW8wdvY= +k8s.io/mount-utils v0.27.3/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= +k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= +k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/operator/Dockerfile b/operator/Dockerfile index cc1ac32e1..bb935dd6c 100644 --- a/operator/Dockerfile +++ b/operator/Dockerfile @@ -1,6 +1,6 @@ ARG ARCH=amd64 -FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:a01d47d4036cae5a67a9619e3d06fa14a6811a2247b4da72b4233ece4efebd57 +FROM --platform=linux/${ARCH} gcr.io/distroless/static@sha256:7198a357ff3a8ef750b041324873960cf2153c11cc50abb9d8d5f8bb089f6b4e LABEL maintainers="The NetApp Trident Team" \ app="trident-operator.netapp.io" description="Trident Operator" From 724169b15bdc58e983063209979f15a46f48cc4e Mon Sep 17 00:00:00 2001 From: VinayKumarHavanur <54576364+VinayKumarHavanur@users.noreply.github.com> Date: Tue, 18 Jul 2023 07:31:50 +0530 Subject: [PATCH 14/17] Feature gate for 23.07 Trident features Co-authored-by: Alex Meade --- config/config.go | 4 + frontend/crd/snapshot_restore.go | 5 ++ frontend/crd/snapshot_restore_test.go | 75 ++++++++++++++++++ frontend/crd/trident_action_mirror_update.go | 5 ++ .../crd/trident_action_mirror_update_test.go | 65 ++++++++++++++++ storage/backend.go | 6 ++ storage/backend_test.go | 61 +++++++++++++++ storage_drivers/ontap/ontap_nas_qtree.go | 5 +- storage_drivers/ontap/ontap_nas_qtree_test.go | 77 ++++++++++++++++++- 9 files changed, 301 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index ccc90a610..b78d8482a 100644 --- a/config/config.go +++ b/config/config.go @@ -248,6 +248,10 @@ var ( 6: "SINGLE_NODE_SINGLE_WRITER", 7: "SINGLE_NODE_MULTI_WRITER", } + + // DisableExtraFeatures makes a subset of Trident features disabled + // This can be removed when ACP replaces feature-gating + DisableExtraFeatures = true ) func IsValidProtocol(p Protocol) bool { diff --git a/frontend/crd/snapshot_restore.go b/frontend/crd/snapshot_restore.go index 714a73d81..c33e8f9f8 100644 --- a/frontend/crd/snapshot_restore.go +++ b/frontend/crd/snapshot_restore.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" + "github.com/netapp/trident/config" . "github.com/netapp/trident/logging" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/storage" @@ -61,6 +62,10 @@ func (c *TridentCrdController) handleActionSnapshotRestore(keyItem *KeyItem) (re } }() + if config.DisableExtraFeatures { + return errors.UnsupportedError("snapshot restore is not enabled") + } + // Detect a CR that is in progress but is not a retry from the workqueue. This can only happen // if Trident restarted while processing a CR, in which case we move the CR directly to Failed. if actionCR.InProgress() && !keyItem.isRetry { diff --git a/frontend/crd/snapshot_restore_test.go b/frontend/crd/snapshot_restore_test.go index 4b8c0f580..3a72498fe 100644 --- a/frontend/crd/snapshot_restore_test.go +++ b/frontend/crd/snapshot_restore_test.go @@ -14,6 +14,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/netapp/trident/config" mockcore "github.com/netapp/trident/mocks/mock_core" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/utils" @@ -141,6 +142,9 @@ func fakeTASR(name, namespace, pvcName, vsName string) *netappv1.TridentActionSn } func TestHandleActionSnapshotRestore(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -237,7 +241,78 @@ func TestHandleActionSnapshotRestore(t *testing.T) { assert.True(t, apierrors.IsNotFound(err), "TASR should not have been found") } +func TestHandleActionSnapshotRestore_Disabled(t *testing.T) { + mockCtrl := gomock.NewController(t) + orchestrator := mockcore.NewMockOrchestrator(mockCtrl) + + tridentNamespace := "trident" + kubeClient := GetTestKubernetesClientset() + snapClient := GetTestSnapshotClientset() + crdClient := GetTestCrdClientset() + crdController, err := newTridentCrdControllerImpl(orchestrator, tridentNamespace, kubeClient, snapClient, crdClient) + if err != nil { + t.Fatalf("cannot create Trident CRD controller frontend; %v", err) + } + + // Activate the CRD controller and start monitoring + if err = crdController.Activate(); err != nil { + t.Fatalf("error while activating; %v", err) + } + time.Sleep(250 * time.Millisecond) + + pvc := fakeSnapRestorePVC(snapRestorePVC1, namespace1, snapRestorePV1) + _, _ = kubeClient.CoreV1().PersistentVolumeClaims(namespace1).Create(ctx(), pvc, createOpts) + + pv := fakePV(snapRestorePVC1, namespace1, snapRestorePV1) + _, _ = kubeClient.CoreV1().PersistentVolumes().Create(ctx(), pv, createOpts) + + vs1Time := time.Now() + vs2Time := vs1Time.Add(1 * time.Second) + vs3Time := vs2Time.Add(1 * time.Second) + + vs1 := fakeVS(snapRestoreSnap1, namespace1, snapRestoreVSC1, snapRestorePVC1, vs1Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshots(namespace1).Create(ctx(), vs1, createOpts) + + vsc1 := fakeVSC(snapRestoreSnap1, namespace1, snapRestoreVSC1, snapRestoreSnapHandle1, vs1Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshotContents().Create(ctx(), vsc1, createOpts) + + vs2 := fakeVS(snapRestoreSnap2, namespace1, snapRestoreVSC2, snapRestorePVC1, vs2Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshots(namespace1).Create(ctx(), vs2, createOpts) + + vsc2 := fakeVSC(snapRestoreSnap2, namespace1, snapRestoreVSC2, snapRestoreSnapHandle2, vs2Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshotContents().Create(ctx(), vsc2, createOpts) + + vs3 := fakeVS(snapRestoreSnap3, namespace1, snapRestoreVSC3, snapRestorePVC1, vs3Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshots(namespace1).Create(ctx(), vs3, createOpts) + + vsc3 := fakeVSC(snapRestoreSnap3, namespace1, snapRestoreVSC3, snapRestoreSnapHandle3, vs3Time) + _, _ = snapClient.SnapshotV1().VolumeSnapshotContents().Create(ctx(), vsc3, createOpts) + + tasr := fakeTASR(tasr1, namespace1, snapRestorePVC1, snapRestoreSnap3) + _, _ = crdClient.TridentV1().TridentActionSnapshotRestores(namespace1).Create(ctx(), tasr, createOpts) + + // Wait until the operation completes + for i := 0; i < 20; i++ { + time.Sleep(250 * time.Millisecond) + + tasr, err = crdClient.TridentV1().TridentActionSnapshotRestores(namespace1).Get(ctx(), tasr1, getOpts) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + break + } else if tasr.IsComplete() { + break + } + } + + assert.True(t, tasr.Failed(), "TASR operation did not fail") +} + func TestHandleActionSnapshotRestore_InProgressError(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) diff --git a/frontend/crd/trident_action_mirror_update.go b/frontend/crd/trident_action_mirror_update.go index 69e14665b..63e2de33d 100644 --- a/frontend/crd/trident_action_mirror_update.go +++ b/frontend/crd/trident_action_mirror_update.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" + "github.com/netapp/trident/config" . "github.com/netapp/trident/logging" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/storage" @@ -62,6 +63,10 @@ func (c *TridentCrdController) handleActionMirrorUpdate(keyItem *KeyItem) (updat } }() + if config.DisableExtraFeatures { + return errors.UnsupportedError("mirror update is not enabled") + } + // Detect a CR that is in progress but is not a retry from the workqueue. // This can only happen if Trident restarted while processing a CR, in which case we move the CR directly to Failed. if actionCR.InProgress() && !keyItem.isRetry { diff --git a/frontend/crd/trident_action_mirror_update_test.go b/frontend/crd/trident_action_mirror_update_test.go index a996ea8c9..32f9fba8e 100644 --- a/frontend/crd/trident_action_mirror_update_test.go +++ b/frontend/crd/trident_action_mirror_update_test.go @@ -13,6 +13,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/netapp/trident/config" mockcore "github.com/netapp/trident/mocks/mock_core" netappv1 "github.com/netapp/trident/persistent_store/crd/apis/netapp/v1" "github.com/netapp/trident/utils" @@ -107,6 +108,9 @@ func fakeTAMU(name, namespace, tmrName, snapshotHandle string) *netappv1.Trident } func TestHandleActionMirrorUpdate(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -258,6 +262,9 @@ func TestHandleActionMirrorUpdate_ValidateFailure(t *testing.T) { } func TestHandleActionMirrorUpdate_InProgress(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -320,7 +327,56 @@ func TestHandleActionMirrorUpdate_InProgress(t *testing.T) { assert.True(t, tamu.Succeeded(), "TAMU operation failed") } +func TestHandleActionMirrorUpdate_InProgress_Disabled(t *testing.T) { + mockCtrl := gomock.NewController(t) + orchestrator := mockcore.NewMockOrchestrator(mockCtrl) + + tridentNamespace := "trident" + kubeClient := GetTestKubernetesClientset() + snapClient := GetTestSnapshotClientset() + crdClient := GetTestCrdClientset() + crdController, err := newTridentCrdControllerImpl(orchestrator, tridentNamespace, kubeClient, snapClient, crdClient) + if err != nil { + t.Fatalf("cannot create Trident CRD controller frontend, error: %v", err.Error()) + } + + // Activate the CRD controller and start monitoring + if err = crdController.Activate(); err != nil { + t.Fatalf("error while activating: %v", err.Error()) + } + delaySeconds(1) + + pvc := fakePVC(pvc1, namespace1, pv1) + _, _ = kubeClient.CoreV1().PersistentVolumeClaims(namespace1).Create(ctx(), pvc, createOpts) + + tmr := fakeTMR(tmrName1, namespace1, pvc1) + _, _ = crdClient.TridentV1().TridentMirrorRelationships(namespace1).Create(ctx(), tmr, createOpts) + + tamu := fakeTAMU(tamu1, namespace1, tmrName1, snapHandle1) + _, _ = crdClient.TridentV1().TridentActionMirrorUpdates(namespace1).Create(ctx(), tamu, createOpts) + + // Wait until the operation completes + for i := 0; i < 5; i++ { + time.Sleep(250 * time.Millisecond) + + tamu, err = crdClient.TridentV1().TridentActionMirrorUpdates(namespace1).Get(ctx(), tamu1, getOpts) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + break + } else if tamu.IsComplete() { + break + } + } + + assert.True(t, tamu.Failed(), "TAMU operation was not disabled") +} + func TestHandleActionMirrorUpdate_InProgressAtStartup(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -362,6 +418,9 @@ func TestHandleActionMirrorUpdate_InProgressAtStartup(t *testing.T) { } func TestUpdateActionMirrorUpdateCRInProgress(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) transferTime, _ := time.Parse(utils.TimestampFormat, previousTransferTime) @@ -397,6 +456,9 @@ func TestUpdateActionMirrorUpdateCRInProgress(t *testing.T) { } func TestUpdateActionMirrorUpdateCRComplete_Succeeded(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) @@ -430,6 +492,9 @@ func TestUpdateActionMirrorUpdateCRComplete_Succeeded(t *testing.T) { } func TestUpdateActionMirrorUpdateCRComplete_Failed(t *testing.T) { + defer func() { config.DisableExtraFeatures = true }() + config.DisableExtraFeatures = false + mockCtrl := gomock.NewController(t) orchestrator := mockcore.NewMockOrchestrator(mockCtrl) diff --git a/storage/backend.go b/storage/backend.go index 9dd691f07..9c4acce3c 100644 --- a/storage/backend.go +++ b/storage/backend.go @@ -419,6 +419,12 @@ func (b *StorageBackend) CloneVolume( "cloneVolumeInternal": cloneVolConfig.InternalName, }).Debug("Attempting volume clone.") + if cloneVolConfig.ReadOnlyClone { + if tridentconfig.DisableExtraFeatures { + return nil, errors.UnsupportedError("read only clone is not supported") + } + } + // Ensure volume is managed if cloneVolConfig.ImportNotManaged { return nil, errors.NotManagedError("volume %s is not managed by Trident", cloneVolConfig.InternalName) diff --git a/storage/backend_test.go b/storage/backend_test.go index 60ba04ba7..24fab5576 100644 --- a/storage/backend_test.go +++ b/storage/backend_test.go @@ -7,6 +7,9 @@ import ( "testing" "github.com/stretchr/testify/assert" + + tridentconfig "github.com/netapp/trident/config" + "github.com/netapp/trident/utils/errors" ) func TestBackendState(t *testing.T) { @@ -143,3 +146,61 @@ func TestDeleteSnapshot_NotManaged(t *testing.T) { assert.Errorf(t, err, "expected err") } + +func TestCloneVolume_FeatureDisabled(t *testing.T) { + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + } + volumeConfigDest := &VolumeConfig{ + Version: "", + Name: "pvc-deadbeef-8240-4fd8-97bc-868bf064ecd4", + InternalName: "trident_pvc_deadbeef_8240_4fd8_97bc_868bf064ecd4", + ReadOnlyClone: true, + } + + backend := &StorageBackend{ + state: Offline, + } + pool := NewStoragePool(nil, "test-pool1") + + // Both volume and snapshot not managed + _, err := backend.CloneVolume(context.Background(), volumeConfig, volumeConfigDest, pool, false) + + assert.Error(t, err, "expected err") + assert.True(t, errors.IsUnsupportedError(err)) +} + +func TestCloneVolume_BackendOffline(t *testing.T) { + volumeName := "pvc-e9748b6b-8240-4fd8-97bc-868bf064ecd4" + volumeInternalName := "trident_pvc_e9748b6b_8240_4fd8_97bc_868bf064ecd4" + volumeConfig := &VolumeConfig{ + Version: "", + Name: volumeName, + InternalName: volumeInternalName, + ReadOnlyClone: true, + } + volumeConfigDest := &VolumeConfig{ + Version: "", + Name: "pvc-deadbeef-8240-4fd8-97bc-868bf064ecd4", + InternalName: "trident_pvc_deadbeef_8240_4fd8_97bc_868bf064ecd4", + ReadOnlyClone: false, + } + + backend := &StorageBackend{ + state: Offline, + name: "test-backend", + } + pool := NewStoragePool(nil, "test-pool1") + + tridentconfig.DisableExtraFeatures = false + + // Both volume and snapshot not managed + _, err := backend.CloneVolume(context.Background(), volumeConfig, volumeConfigDest, pool, false) + + assert.Errorf(t, err, "expected err") + assert.Equal(t, err.Error(), "backend test-backend is not Online") +} diff --git a/storage_drivers/ontap/ontap_nas_qtree.go b/storage_drivers/ontap/ontap_nas_qtree.go index ed74a630a..8918db7a3 100644 --- a/storage_drivers/ontap/ontap_nas_qtree.go +++ b/storage_drivers/ontap/ontap_nas_qtree.go @@ -482,7 +482,6 @@ func (d *NASQtreeStorageDriver) CreateClone( // If RO clone is requested, validate the snapshot directory access and return if cloneVolConfig.ReadOnlyClone { - _, flexvol, _, err := d.ParseQtreeInternalID(sourceVolConfig.InternalID) if err != nil { return errors.WrapWithNotFoundError(err, "error while getting flexvol") @@ -902,6 +901,10 @@ func (d *NASQtreeStorageDriver) CreateSnapshot( Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace(">>>> CreateSnapshot") defer Logd(ctx, d.Name(), d.Config.DebugTraceFlags["method"]).WithFields(fields).Trace("<<<< CreateSnapshot") + if tridentconfig.DisableExtraFeatures { + return nil, errors.UnsupportedError(fmt.Sprintf("snapshots are not supported by backend type %s", d.Name())) + } + if volConfig.ReadOnlyClone { // This is a read-only volume and hence do not create snapshot of it return nil, fmt.Errorf("snapshot is not supported for a read-only volume") diff --git a/storage_drivers/ontap/ontap_nas_qtree_test.go b/storage_drivers/ontap/ontap_nas_qtree_test.go index 2abe1f4b8..0116afc65 100644 --- a/storage_drivers/ontap/ontap_nas_qtree_test.go +++ b/storage_drivers/ontap/ontap_nas_qtree_test.go @@ -3761,7 +3761,30 @@ func TestCanSnapshot_InvalidSnapshotDir(t *testing.T) { assert.NotNil(t, result, "result is nil") } +func TestCreateSnapshot_Disabled(t *testing.T) { + _, driver := newMockOntapNasQtreeDriver(t) + volConfig := &storage.VolumeConfig{ + Size: "1g", + Encryption: "false", + FileSystem: "nfs", + InternalName: flexvol, + InternalID: volInternalID, + } + + snapConfig := &storage.SnapshotConfig{ + InternalName: "snap1", + VolumeInternalName: "vol1", + } + + _, err := driver.CreateSnapshot(ctx, snapConfig, volConfig) + + assert.Error(t, err, "no error occurred") +} + func TestCreateSnapshot_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3794,6 +3817,9 @@ func TestCreateSnapshot_Success(t *testing.T) { } func TestCreateSnapshot_FailureErrorCheckingVolume(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3818,6 +3844,9 @@ func TestCreateSnapshot_FailureErrorCheckingVolume(t *testing.T) { } func TestCreateSnapshot_FailureNoVolumeExists(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3842,6 +3871,9 @@ func TestCreateSnapshot_FailureNoVolumeExists(t *testing.T) { } func TestCreateSnapshot_FailureSnapshotCreateFailed(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3867,6 +3899,9 @@ func TestCreateSnapshot_FailureSnapshotCreateFailed(t *testing.T) { } func TestCreateSnapshot_FailureSnapshotInfoFailed(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3899,6 +3934,9 @@ func TestCreateSnapshot_FailureSnapshotInfoFailed(t *testing.T) { } func TestCreateSnapshot_FailureNoSnapshots(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3927,6 +3965,9 @@ func TestCreateSnapshot_FailureNoSnapshots(t *testing.T) { } func TestCreateSnapshot_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3950,6 +3991,9 @@ func TestCreateSnapshot_FailureWrongVolumeID(t *testing.T) { } func TestGetSnapshot_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3980,6 +4024,9 @@ func TestGetSnapshot_Success(t *testing.T) { } func TestGetSnapshot_FailureNoSnapshotReturned(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -3996,7 +4043,8 @@ func TestGetSnapshot_FailureNoSnapshotReturned(t *testing.T) { mockAPI.EXPECT().SVMName().AnyTimes().Return("SVM1") mockAPI.EXPECT().VolumeSnapshotInfo(ctx, snapConfig.InternalName, flexvol).Return( api.Snapshot{}, - errors.NotFoundError(fmt.Sprintf("snapshot %v not found for volume %v", snapConfig.InternalName, snapConfig.VolumeInternalName))) + errors.NotFoundError(fmt.Sprintf("snapshot %v not found for volume %v", snapConfig.InternalName, + snapConfig.VolumeInternalName))) snap, err := driver.GetSnapshot(ctx, snapConfig, volConfig) @@ -4005,6 +4053,9 @@ func TestGetSnapshot_FailureNoSnapshotReturned(t *testing.T) { } func TestGetSnapshot_FailureErrorFetchingSnapshots(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4030,6 +4081,9 @@ func TestGetSnapshot_FailureErrorFetchingSnapshots(t *testing.T) { } func TestGetSnapshot_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4052,6 +4106,9 @@ func TestGetSnapshot_FailureWrongVolumeID(t *testing.T) { } func TestGetSnapshots_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4077,6 +4134,9 @@ func TestGetSnapshots_Success(t *testing.T) { } func TestGetSnapshots_SuccessDockerContext(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4102,6 +4162,9 @@ func TestGetSnapshots_SuccessDockerContext(t *testing.T) { } func TestGetSnapshots_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4120,6 +4183,9 @@ func TestGetSnapshots_FailureWrongVolumeID(t *testing.T) { } func TestGetSnapshots_FailureSnapshotListErr(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ Size: "1g", @@ -4145,6 +4211,9 @@ func TestGetSnapshots_FailureSnapshotListErr(t *testing.T) { } func TestDeleteSnapshot_Success(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) volConfig := &storage.VolumeConfig{ @@ -4168,6 +4237,9 @@ func TestDeleteSnapshot_Success(t *testing.T) { } func TestDeleteSnapshot_FailureSnapshotBusy(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + mockAPI, driver := newMockOntapNasQtreeDriver(t) childVols := make([]string, 0) childVols = append(childVols, flexvol) @@ -4196,6 +4268,9 @@ func TestDeleteSnapshot_FailureSnapshotBusy(t *testing.T) { } func TestDeleteSnapshot_FailureWrongVolumeID(t *testing.T) { + defer func() { tridentconfig.DisableExtraFeatures = true }() + tridentconfig.DisableExtraFeatures = false + _, driver := newMockOntapNasQtreeDriver(t) childVols := make([]string, 0) childVols = append(childVols, flexvol) From 5680f764dcbb58e1065a44048e3d4e33cd8d8da6 Mon Sep 17 00:00:00 2001 From: Jonathan Rippy Date: Wed, 19 Jul 2023 11:56:57 -0400 Subject: [PATCH 15/17] Add check for volume state in REST clone create path This adds an extra check for volume state during for clone create operations in REST. --- storage_drivers/ontap/ontap_common.go | 16 ++++------------ storage_drivers/ontap/ontap_nas_test.go | 4 ++++ 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/storage_drivers/ontap/ontap_common.go b/storage_drivers/ontap/ontap_common.go index bb37323c3..a1dcc4696 100644 --- a/storage_drivers/ontap/ontap_common.go +++ b/storage_drivers/ontap/ontap_common.go @@ -2692,18 +2692,10 @@ func cloneFlexvol( return err } - // NVMe clone is not ready by the time we return from VolumeCloneCreate. - // This check here makes sure that we don't fail the clone operation during the time clone is not ready - // Currently this change is done only for NVMe volumes but it should work with other volumes too if needed - if config.SANType == sa.NVMe { - - desiredNVMeVolStates := []string{"online"} - abortNVMeVolStates := []string{"error"} - volState, err := client.VolumeWaitForStates(ctx, name, desiredNVMeVolStates, abortNVMeVolStates, - maxFlexvolCloneWait) - if err != nil { - return fmt.Errorf("unable to create flexClone for NVMe volume %v, volState:%v", name, volState) - } + desiredStates, abortStates := []string{"online"}, []string{"error"} + volState, err := client.VolumeWaitForStates(ctx, name, desiredStates, abortStates, maxFlexvolCloneWait) + if err != nil { + return fmt.Errorf("unable to create flexClone for volume %v, volState:%v", name, volState) } if err = client.VolumeSetComment(ctx, name, name, labels); err != nil { diff --git a/storage_drivers/ontap/ontap_nas_test.go b/storage_drivers/ontap/ontap_nas_test.go index 32efb5183..c5daba610 100644 --- a/storage_drivers/ontap/ontap_nas_test.go +++ b/storage_drivers/ontap/ontap_nas_test.go @@ -675,6 +675,8 @@ func TestOntapNasStorageDriverVolumeClone(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, "").Return(false, nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, volConfig.InternalName, volConfig.CloneSourceVolumeInternal, volConfig.CloneSourceSnapshotInternal, false).Return(nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, gomock.Any(), gomock.Any(), + maxFlexvolCloneWait).Return("online", nil) mockAPI.EXPECT().VolumeSetComment(ctx, volConfig.InternalName, volConfig.InternalName, "flexvol"). Return(nil) mockAPI.EXPECT().VolumeMount(ctx, volConfig.InternalName, "/"+volConfig.InternalName).Return(nil) @@ -946,6 +948,8 @@ func TestOntapNasStorageDriverVolumeClone_SMBShareCreateFail(t *testing.T) { mockAPI.EXPECT().VolumeExists(ctx, "").Return(false, nil) mockAPI.EXPECT().VolumeCloneCreate(ctx, volConfig.InternalName, volConfig.CloneSourceVolumeInternal, volConfig.CloneSourceSnapshotInternal, false).Return(nil) + mockAPI.EXPECT().VolumeWaitForStates(ctx, volConfig.InternalName, gomock.Any(), gomock.Any(), + maxFlexvolCloneWait).Return("online", nil) mockAPI.EXPECT().VolumeSetComment(ctx, volConfig.InternalName, volConfig.InternalName, "flexvol").Return(nil) mockAPI.EXPECT().VolumeMount(ctx, volConfig.InternalName, "/"+volConfig.InternalName).Return(nil) mockAPI.EXPECT().SMBShareExists(ctx, volConfig.InternalName).Return(false, nil) From 616b3f8ae390bf9a1e2785c909ee00ea3f74eb60 Mon Sep 17 00:00:00 2001 From: Rohit Arora <49132604+ntap-arorar@users.noreply.github.com> Date: Fri, 21 Jul 2023 14:43:39 -0400 Subject: [PATCH 16/17] Add multipath serial and size checks; enhance unstaging to ensure a correct mpath device is removed This change adds multiple verifications in the staging and unstaging paths of iSCSI volumes. This ensures, multipath devices are not influenced by any existing Ghost devices, at the same time these checks ensure ghost devices can be safely removed without influencing good devices. --- core/orchestrator_core.go | 3 +- frontend/csi/node_server.go | 117 +++++++-- mocks/mock_utils/mock_iscsi_utils.go | 45 ++++ storage_drivers/ontap/ontap_san.go | 12 +- storage_drivers/ontap/ontap_san_economy.go | 12 +- storage_drivers/solidfire/solidfire_san.go | 11 +- utils/devices.go | 254 +++++++++++++++++-- utils/errors/errors.go | 22 ++ utils/iscsi.go | 280 ++++++++++++++++++--- utils/iscsi_types.go | 3 + 10 files changed, 683 insertions(+), 76 deletions(-) diff --git a/core/orchestrator_core.go b/core/orchestrator_core.go index e0fd5d9bb..416c3bccb 100644 --- a/core/orchestrator_core.go +++ b/core/orchestrator_core.go @@ -3441,8 +3441,9 @@ func (o *TridentOrchestrator) AttachVolume( return utils.MountDevice(ctx, loopDeviceName, mountpoint, publishInfo.SubvolumeMountOptions, isRawBlock) } } else { - return utils.AttachISCSIVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{}, + _, err := utils.AttachISCSIVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{}, AttachISCSIVolumeTimeoutLong) + return err } } diff --git a/frontend/csi/node_server.go b/frontend/csi/node_server.go index 69ea67f5d..4d6134dd6 100644 --- a/frontend/csi/node_server.go +++ b/frontend/csi/node_server.go @@ -422,7 +422,8 @@ func (p *Plugin) NodeExpandVolume( }).Warn("Received something other than the expected stagingTargetPath.") } - err = p.nodeExpandVolume(ctx, &trackingInfo.VolumePublishInfo, requiredBytes, stagingTargetPath, volumeId, req.GetSecrets()) + err = p.nodeExpandVolume(ctx, &trackingInfo.VolumePublishInfo, requiredBytes, stagingTargetPath, volumeId, + req.GetSecrets()) if err != nil { return nil, err } @@ -1017,11 +1018,12 @@ func (p *Plugin) populatePublishedSessions(ctx context.Context) { volumeIDs := utils.GetAllVolumeIDs(ctx, tridentDeviceInfoPath) for _, volumeID := range volumeIDs { trackingInfo, err := p.nodeHelper.ReadTrackingInfo(ctx, volumeID) - if err != nil { + if err != nil || trackingInfo == nil { Logc(ctx).WithFields(LogFields{ - "VolumeID": volumeID, - "Error": err.Error(), - }).Error("Volume tracking file info not found.") + "volumeID": volumeID, + "error": err.Error(), + "isEmpty": trackingInfo == nil, + }).Error("Volume tracking file info not found or is empty.") continue } @@ -1037,6 +1039,26 @@ func (p *Plugin) populatePublishedSessions(ctx context.Context) { } } +func (p *Plugin) readAllTrackingFiles(ctx context.Context) []utils.VolumePublishInfo { + publishInfos := make([]utils.VolumePublishInfo, 0) + volumeIDs := utils.GetAllVolumeIDs(ctx, tridentDeviceInfoPath) + for _, volumeID := range volumeIDs { + trackingInfo, err := p.nodeHelper.ReadTrackingInfo(ctx, volumeID) + if err != nil || trackingInfo == nil { + Logc(ctx).WithError(err).WithFields(LogFields{ + "volumeID": volumeID, + "isEmpty": trackingInfo == nil, + }).Error("Volume tracking file info not found or is empty.") + + continue + } + + publishInfos = append(publishInfos, trackingInfo.VolumePublishInfo) + } + + return publishInfos +} + func (p *Plugin) nodeStageISCSIVolume( ctx context.Context, req *csi.NodeStageVolumeRequest, publishInfo *utils.VolumePublishInfo, ) error { @@ -1098,7 +1120,8 @@ func (p *Plugin) nodeStageISCSIVolume( } } - if err = p.ensureAttachISCSIVolume(ctx, req, "", publishInfo, AttachISCSIVolumeTimeoutShort); err != nil { + mpathSize, err := p.ensureAttachISCSIVolume(ctx, req, "", publishInfo, AttachISCSIVolumeTimeoutShort) + if err != nil { return err } @@ -1107,7 +1130,8 @@ func (p *Plugin) nodeStageISCSIVolume( return err } if isLUKS { - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return err } @@ -1118,6 +1142,20 @@ func (p *Plugin) nodeStageISCSIVolume( } } + if mpathSize > 0 { + Logc(ctx).Warn("Multipath device size may not be correct, performing gratuitous resize.") + + err = p.nodeExpandVolume(ctx, publishInfo, mpathSize, stagingTargetPath, volumeId, req.GetSecrets()) + if err != nil { + Logc(ctx).WithFields(LogFields{ + "multipathDevice": publishInfo.DevicePath, + "volumeID": volumeId, + "size": mpathSize, + "err": err, + }).Warn("Attempt to perform gratuitous resize failed.") + } + } + volTrackingInfo := &utils.VolumeTrackingInfo{ VolumePublishInfo: *publishInfo, StagingTargetPath: stagingTargetPath, @@ -1139,28 +1177,32 @@ func (p *Plugin) nodeStageISCSIVolume( func (p *Plugin) ensureAttachISCSIVolume( ctx context.Context, req *csi.NodeStageVolumeRequest, mountpoint string, publishInfo *utils.VolumePublishInfo, attachTimeout time.Duration, -) error { +) (int64, error) { + var err error + var mpathSize int64 + // Perform the login/rescan/discovery/(optionally)format, mount & get the device back in the publish info - if err := utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, publishInfo, + if mpathSize, err = utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, publishInfo, req.GetSecrets(), attachTimeout); err != nil { // Did we fail to log in? if errors.IsAuthError(err) { // Update CHAP info from the controller and try one more time. Logc(ctx).Warn("iSCSI login failed; will retrieve CHAP credentials from Trident controller and try again.") if err = p.updateChapInfoFromController(ctx, req, publishInfo); err != nil { - return status.Error(codes.Internal, err.Error()) + return mpathSize, status.Error(codes.Internal, err.Error()) } - if err = utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, publishInfo, + if mpathSize, err = utils.AttachISCSIVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint, + publishInfo, req.GetSecrets(), attachTimeout); err != nil { // Bail out no matter what as we've now tried with updated credentials - return status.Error(codes.Internal, err.Error()) + return mpathSize, status.Error(codes.Internal, err.Error()) } } else { - return status.Error(codes.Internal, fmt.Sprintf("failed to stage volume: %v", err)) + return mpathSize, status.Error(codes.Internal, fmt.Sprintf("failed to stage volume: %v", err)) } } - return nil + return mpathSize, nil } func (p *Plugin) updateChapInfoFromController( @@ -1192,18 +1234,41 @@ func (p *Plugin) nodeUnstageISCSIVolume( return fmt.Errorf("could not parse LUKSEncryption into a bool, got %v", publishInfo.LUKSEncryption) } if isLUKS { - err := utils.EnsureLUKSDeviceClosed(ctx, publishInfo.DevicePath) + // Before closing the device, get the corresponding DM device. + publishedLUKsDevice, err := utils.GetUnderlyingDevicePathForLUKSDevice(ctx, publishInfo.DevicePath) + if err != nil { + // No need to return an error + Logc(ctx).WithFields(LogFields{ + "devicePath": publishInfo.DevicePath, + "LUN": publishInfo.IscsiLunNumber, + "err": err, + }).Error("Failed to verify the multipath device, could not determine" + + " underlying device for LUKS mapping.") + } + + err = utils.EnsureLUKSDeviceClosed(ctx, publishInfo.DevicePath) if err != nil { return err } + + // For the future steps LUKs device path is not really useful, either it should be + // DM device or empty. + publishInfo.DevicePath = publishedLUKsDevice } } // Delete the device from the host. - unmappedMpathDevice, err := utils.PrepareDeviceForRemoval(ctx, int(publishInfo.IscsiLunNumber), - publishInfo.IscsiTargetIQN, p.unsafeDetach, force) - if nil != err && !p.unsafeDetach { - return status.Error(codes.Internal, err.Error()) + unmappedMpathDevice, err := utils.PrepareDeviceForRemoval(ctx, publishInfo, nil, p.unsafeDetach, force) + if err != nil { + if errors.IsISCSISameLunNumberError(err) { + // There is a need to pass all the publish infos this time + unmappedMpathDevice, err = utils.PrepareDeviceForRemoval(ctx, publishInfo, p.readAllTrackingFiles(ctx), + p.unsafeDetach, force) + } + + if err != nil && !p.unsafeDetach { + return status.Error(codes.Internal, err.Error()) + } } // Get map of hosts and sessions for given Target IQN. @@ -1325,7 +1390,8 @@ func (p *Plugin) nodePublishISCSIVolume( } if isLUKS { // Rotate the LUKS passphrase if needed, on failure, log and continue to publish - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -1889,7 +1955,7 @@ func (p *Plugin) selfHealingRectifySession(ctx context.Context, portal string, a publishedCHAPCredentials := publishInfo.IscsiChapInfo - if err = p.ensureAttachISCSIVolume(ctx, req, "", &publishInfo, iSCSILoginTimeout); err != nil { + if _, err = p.ensureAttachISCSIVolume(ctx, req, "", &publishInfo, iSCSILoginTimeout); err != nil { return fmt.Errorf("failed to login to the target") } @@ -2123,7 +2189,8 @@ func (p *Plugin) nodeStageNVMeVolume( publishInfo.NVMeTargetIPs = strings.Split(req.PublishContext["nvmeTargetIPs"], ",") publishInfo.SANType = req.PublishContext["SANType"] - if err := utils.AttachNVMeVolumeRetry(ctx, req.VolumeContext["internalName"], "", publishInfo, nil, nvmeAttachTimeout); err != nil { + if err := utils.AttachNVMeVolumeRetry(ctx, req.VolumeContext["internalName"], "", publishInfo, nil, + nvmeAttachTimeout); err != nil { return err } @@ -2133,7 +2200,8 @@ func (p *Plugin) nodeStageNVMeVolume( } if isLUKS { - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return err } @@ -2245,7 +2313,8 @@ func (p *Plugin) nodePublishNVMeVolume( } if isLUKS { // Rotate the LUKS passphrase if needed, on failure, log and continue to publish - luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, req.VolumeContext["internalName"]) + luksDevice, err := utils.NewLUKSDeviceFromMappingPath(ctx, publishInfo.DevicePath, + req.VolumeContext["internalName"]) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/mocks/mock_utils/mock_iscsi_utils.go b/mocks/mock_utils/mock_iscsi_utils.go index 0c8e96b20..c38b43504 100644 --- a/mocks/mock_utils/mock_iscsi_utils.go +++ b/mocks/mock_utils/mock_iscsi_utils.go @@ -64,6 +64,51 @@ func (mr *MockIscsiReconcileUtilsMockRecorder) GetISCSIHostSessionMapForTarget(a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetISCSIHostSessionMapForTarget", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetISCSIHostSessionMapForTarget), arg0, arg1) } +// GetMultipathDeviceBySerial mocks base method. +func (m *MockIscsiReconcileUtils) GetMultipathDeviceBySerial(arg0 context.Context, arg1 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultipathDeviceBySerial", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultipathDeviceBySerial indicates an expected call of GetMultipathDeviceBySerial. +func (mr *MockIscsiReconcileUtilsMockRecorder) GetMultipathDeviceBySerial(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultipathDeviceBySerial", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetMultipathDeviceBySerial), arg0, arg1) +} + +// GetMultipathDeviceDisks mocks base method. +func (m *MockIscsiReconcileUtils) GetMultipathDeviceDisks(arg0 context.Context, arg1 string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultipathDeviceDisks", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultipathDeviceDisks indicates an expected call of GetMultipathDeviceDisks. +func (mr *MockIscsiReconcileUtilsMockRecorder) GetMultipathDeviceDisks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultipathDeviceDisks", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetMultipathDeviceDisks), arg0, arg1) +} + +// GetMultipathDeviceUUID mocks base method. +func (m *MockIscsiReconcileUtils) GetMultipathDeviceUUID(arg0 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMultipathDeviceUUID", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMultipathDeviceUUID indicates an expected call of GetMultipathDeviceUUID. +func (mr *MockIscsiReconcileUtilsMockRecorder) GetMultipathDeviceUUID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMultipathDeviceUUID", reflect.TypeOf((*MockIscsiReconcileUtils)(nil).GetMultipathDeviceUUID), arg0) +} + // GetSysfsBlockDirsForLUN mocks base method. func (m *MockIscsiReconcileUtils) GetSysfsBlockDirsForLUN(arg0 int, arg1 map[int]int) []string { m.ctrl.T.Helper() diff --git a/storage_drivers/ontap/ontap_san.go b/storage_drivers/ontap/ontap_san.go index e5ab0e5b1..c2ce64c89 100644 --- a/storage_drivers/ontap/ontap_san.go +++ b/storage_drivers/ontap/ontap_san.go @@ -719,8 +719,18 @@ func (d *SANStorageDriver) Destroy(ctx context.Context, volConfig *storage.Volum return fmt.Errorf("error reading LUN maps for volume %s: %v", name, err) } if lunID >= 0 { + publishInfo := utils.VolumePublishInfo{ + DevicePath: "", + VolumeAccessInfo: utils.VolumeAccessInfo{ + IscsiAccessInfo: utils.IscsiAccessInfo{ + IscsiTargetIQN: iSCSINodeName, + IscsiLunNumber: int32(lunID), + }, + }, + } + // Inform the host about the device removal - if _, err := utils.PrepareDeviceForRemoval(ctx, lunID, iSCSINodeName, true, false); err != nil { + if _, err := utils.PrepareDeviceForRemoval(ctx, &publishInfo, nil, true, false); err != nil { Logc(ctx).Error(err) } } diff --git a/storage_drivers/ontap/ontap_san_economy.go b/storage_drivers/ontap/ontap_san_economy.go index 1bfa2af76..56f38c024 100644 --- a/storage_drivers/ontap/ontap_san_economy.go +++ b/storage_drivers/ontap/ontap_san_economy.go @@ -884,8 +884,18 @@ func (d *SANEconomyStorageDriver) Destroy(ctx context.Context, volConfig *storag return fmt.Errorf("error reading LUN maps for volume %s: %v", name, err) } if lunID >= 0 { + publishInfo := utils.VolumePublishInfo{ + DevicePath: "", + VolumeAccessInfo: utils.VolumeAccessInfo{ + IscsiAccessInfo: utils.IscsiAccessInfo{ + IscsiTargetIQN: iSCSINodeName, + IscsiLunNumber: int32(lunID), + }, + }, + } + // Inform the host about the device removal - if _, err := utils.PrepareDeviceForRemoval(ctx, lunID, iSCSINodeName, true, false); err != nil { + if _, err := utils.PrepareDeviceForRemoval(ctx, &publishInfo, nil, true, false); err != nil { Logc(ctx).Error(err) } } diff --git a/storage_drivers/solidfire/solidfire_san.go b/storage_drivers/solidfire/solidfire_san.go index f9d574c6c..f2bf91a9e 100644 --- a/storage_drivers/solidfire/solidfire_san.go +++ b/storage_drivers/solidfire/solidfire_san.go @@ -1134,9 +1134,18 @@ func (d *SANStorageDriver) Destroy(ctx context.Context, volConfig *storage.Volum } if d.Config.DriverContext == tridentconfig.ContextDocker { + publishInfo := utils.VolumePublishInfo{ + DevicePath: "", + VolumeAccessInfo: utils.VolumeAccessInfo{ + IscsiAccessInfo: utils.IscsiAccessInfo{ + IscsiTargetIQN: v.Iqn, + IscsiLunNumber: 0, + }, + }, + } // Inform the host about the device removal - if _, err = utils.PrepareDeviceForRemoval(ctx, 0, v.Iqn, true, false); err != nil { + if _, err = utils.PrepareDeviceForRemoval(ctx, &publishInfo, nil, true, false); err != nil { Logc(ctx).Warningf("Unable to prepare device for removal, attempting to detach anyway: %v", err) } diff --git a/utils/devices.go b/utils/devices.go index 06263a747..542ff86bf 100644 --- a/utils/devices.go +++ b/utils/devices.go @@ -4,6 +4,7 @@ package utils import ( "bytes" + "encoding/hex" "fmt" "os" "path/filepath" @@ -19,7 +20,10 @@ import ( "github.com/netapp/trident/utils/errors" ) -const luksDevicePrefix = "luks-" +const ( + luksDevicePrefix = "luks-" + devPrefix = "/dev/" +) // waitForDevice accepts a device name and checks if it is present func waitForDevice(ctx context.Context, device string) error { @@ -42,7 +46,7 @@ func flushDevice(ctx context.Context, deviceInfo *ScsiDeviceInfo, force bool) er defer Logc(ctx).Debug("<<<< devices.flushDevice") for _, device := range deviceInfo.Devices { - err := flushOneDevice(ctx, "/dev/"+device) + err := flushOneDevice(ctx, devPrefix+device) if err != nil && !force { // Return error only if this is a standalone device, i.e. no multipath device is present for this device. // If a multipath device exists, then it should be flushed before flushing the device, @@ -153,7 +157,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS allLargeEnough := true for _, diskDevice := range deviceInfo.Devices { - size, err := getISCSIDiskSize(ctx, "/dev/"+diskDevice) + size, err := getISCSIDiskSize(ctx, devPrefix+diskDevice) if err != nil { return err } @@ -173,7 +177,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS if !allLargeEnough { time.Sleep(time.Second) for _, diskDevice := range deviceInfo.Devices { - size, err := getISCSIDiskSize(ctx, "/dev/"+diskDevice) + size, err := getISCSIDiskSize(ctx, devPrefix+diskDevice) if err != nil { return err } @@ -186,7 +190,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS if deviceInfo.MultipathDevice != "" { multipathDevice := deviceInfo.MultipathDevice - size, err := getISCSIDiskSize(ctx, "/dev/"+multipathDevice) + size, err := getISCSIDiskSize(ctx, devPrefix+multipathDevice) if err != nil { return err } @@ -199,7 +203,7 @@ func ISCSIRescanDevices(ctx context.Context, targetIQN string, lunID int32, minS return err } time.Sleep(time.Second) - size, err = getISCSIDiskSize(ctx, "/dev/"+multipathDevice) + size, err = getISCSIDiskSize(ctx, devPrefix+multipathDevice) if err != nil { return err } @@ -224,7 +228,7 @@ func reloadMultipathDevice(ctx context.Context, multipathDevice string) error { return fmt.Errorf("cannot reload an empty multipathDevice") } - _, err := command.ExecuteWithTimeout(ctx, "multipath", 30*time.Second, true, "-r", "/dev/"+multipathDevice) + _, err := command.ExecuteWithTimeout(ctx, "multipath", 10*time.Second, true, "-r", devPrefix+multipathDevice) if err != nil { Logc(ctx).WithFields(LogFields{ "device": multipathDevice, @@ -328,7 +332,7 @@ func listAllISCSIDevices(ctx context.Context) { dmLog := make([]string, 0) sdLog := make([]string, 0) sysLog := make([]string, 0) - entries, _ := os.ReadDir("/dev/") + entries, _ := os.ReadDir(devPrefix) for _, entry := range entries { if strings.HasPrefix(entry.Name(), "dm-") { dmLog = append(dmLog, entry.Name()) @@ -457,7 +461,7 @@ func multipathFlushDevice(ctx context.Context, deviceInfo *ScsiDeviceInfo) error return nil } - devicePath := "/dev/" + deviceInfo.MultipathDevice + devicePath := devPrefix + deviceInfo.MultipathDevice deviceErr := canFlushMultipathDevice(ctx, devicePath) if deviceErr != nil { @@ -507,7 +511,7 @@ func GetMountedISCSIDevices(ctx context.Context) ([]*ScsiDeviceInfo, error) { mountedDevices := make([]string, 0) for _, procMount := range procSelfMountinfo { - hasDevMountSourcePrefix := strings.HasPrefix(procMount.MountSource, "/dev/") + hasDevMountSourcePrefix := strings.HasPrefix(procMount.MountSource, devPrefix) hasPvcMountPoint := strings.Contains(procMount.MountPoint, "/pvc-") if !hasPvcMountPoint { @@ -522,7 +526,7 @@ func GetMountedISCSIDevices(ctx context.Context) ([]*ScsiDeviceInfo, error) { Logc(ctx).Error(err) continue } - mountedDevice = strings.TrimPrefix(device, "/dev/") + mountedDevice = strings.TrimPrefix(device, devPrefix) } else { mountedDevice = strings.TrimPrefix(procMount.Root, "/") } @@ -862,10 +866,215 @@ func findDevicesForMultipathDevice(ctx context.Context, device string) []string return devices } -// PrepareDeviceForRemoval informs Linux that a device will be removed. -func PrepareDeviceForRemoval(ctx context.Context, lunID int, iSCSINodeName string, ignoreErrors, force bool) (string, error) { +// compareWithPublishedDevicePath verifies that published path matches the discovered device path +func compareWithPublishedDevicePath(ctx context.Context, publishInfo *VolumePublishInfo, + deviceInfo *ScsiDeviceInfo, +) (bool, error) { + isProbablyGhostDevice := false + discoverMpath := strings.TrimPrefix(deviceInfo.MultipathDevice, devPrefix) + publishedMpath := strings.TrimPrefix(publishInfo.DevicePath, devPrefix) + + if discoverMpath != publishedMpath { + // If this is the case, a wrong multipath device has been identified. + // Reset the Multipath device and disks + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "discoveredMultipathDevice": discoverMpath, + "publishedMultipathDevice": publishedMpath, + }).Debug("Discovered multipath device may not be correct,") + + deviceInfo.MultipathDevice = strings.TrimPrefix(publishedMpath, devPrefix) + deviceInfo.Devices = []string{} + + // Get Device based on the multipath value at the same time identify if it is a ghost device. + devices, err := IscsiUtils.GetMultipathDeviceDisks(ctx, deviceInfo.MultipathDevice) + if err != nil { + return false, fmt.Errorf("failed to verify multipath disks for '%v'; %v ", + deviceInfo.MultipathDevice, err) + } + + isProbablyGhostDevice = devices == nil || len(devices) == 0 + if isProbablyGhostDevice { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": deviceInfo.MultipathDevice, + }).Debug("Multipath device may be a ghost device.") + } else { + deviceInfo.Devices = devices + } + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": deviceInfo.MultipathDevice, + "devices": deviceInfo.Devices, + }).Debug("Updated Multipath device and devices.") + } else { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "publishedMultipathDevice": publishedMpath, + "discoveredMultipathDevice": discoverMpath, + "devices": deviceInfo.Devices, + }).Debug("Discovered multipath device is valid.") + } + + return isProbablyGhostDevice, nil +} + +// compareWithPublishedSerialNumber verifies that device serial number matches the discovered LUNs +func compareWithPublishedSerialNumber(ctx context.Context, publishInfo *VolumePublishInfo, + deviceInfo *ScsiDeviceInfo, +) (bool, error) { + isProbablyGhostDevice := false + lunSerialCheckPassed := false + + for _, path := range deviceInfo.DevicePaths { + serial, err := getLunSerial(ctx, path) + if err != nil { + // LUN either isn't scanned yet, or this kernel + // doesn't support VPD page 80 in sysfs. Assume + // correctness and move on + Logc(ctx).WithError(err).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "path": path, + }).Error("LUN serial check skipped.") + continue + } + + lunSerialCheckPassed = serial != publishInfo.IscsiLunSerial + if lunSerialCheckPassed { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "path": path, + }).Error("LUN serial check failed.") + break + } + } + + // It means the multipath device found was wrong + if !lunSerialCheckPassed { + + // Get Device based on the serial number and at the same time identify if it is a ghost device. + // Multipath UUID contains LUN serial in hex format + lunSerialHex := hex.EncodeToString([]byte(publishInfo.IscsiLunSerial)) + multipathDevice, err := IscsiUtils.GetMultipathDeviceBySerial(ctx, lunSerialHex) + if err != nil { + return false, fmt.Errorf("failed to verify multipath device for serial '%v'; %v ", + publishInfo.IscsiLunSerial, err) + } + + deviceInfo.MultipathDevice = strings.TrimPrefix(multipathDevice, devPrefix) + + // Get Device based on the multipath value at the same time identify if it is a ghost device. + devices, err := IscsiUtils.GetMultipathDeviceDisks(ctx, multipathDevice) + if err != nil { + return false, fmt.Errorf("failed to verify multipath disks for '%v', "+ + "serial '%v'; %v", multipathDevice, publishInfo.IscsiLunSerial, err) + } + + isProbablyGhostDevice = devices == nil || len(devices) == 0 + if isProbablyGhostDevice { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": multipathDevice, + }).Debug("Multipath device may be a ghost device.") + } else { + deviceInfo.Devices = devices + } + } + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "multipathDevice": deviceInfo.MultipathDevice, + "devices": deviceInfo.Devices, + }).Debug("Discovered multipath device and devices have valid serial number.") + + return isProbablyGhostDevice, nil +} + +// compareWithAllPublishInfos comparing all publications (allPublishInfos) for +// LUN number uniqueness, if more than one publication exists with the same LUN number +// then it indicates a larger problem that user needs to manually fix +func compareWithAllPublishInfos(ctx context.Context, publishInfo *VolumePublishInfo, + allPublishInfos []VolumePublishInfo, deviceInfo *ScsiDeviceInfo, +) error { + // During unstaging at least 1 publish info should exist else + // there is some issue on the node. + if len(allPublishInfos) < 1 { + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + }).Debug("Missing all the publish infos; re-requesting.") + + return errors.ISCSISameLunNumberError(fmt.Sprintf( + "failed to verify multipath device '%v' with LUN number '%v' due to missing publish infos", + deviceInfo.MultipathDevice, publishInfo.IscsiLunNumber)) + } + + // Identify if multiple publishInfos for a given targetIQN have the same LUN Number + var count int + for _, info := range allPublishInfos { + if publishInfo.IscsiLunNumber == info.IscsiLunNumber && publishInfo.IscsiTargetIQN == info.IscsiTargetIQN { + count++ + } + } + + if count > 1 { + listAllISCSIDevices(ctx) + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "count": count, + }).Error("Found multiple publish infos with same LUN ID.") + + return fmt.Errorf("found multiple publish infos with same LUN ID '%d'; user need to correct the publish"+ + " information by including the missing 'devicePath' based on `multipath -ll` output", + publishInfo.IscsiLunNumber) + } + + Logc(ctx).WithFields(LogFields{ + "lun": publishInfo.IscsiLunNumber, + "count": count, + }).Debug("Found publish info with the same LUN ID.") + + return nil +} + +// verifyMultipathDevice verifies that device being removed is correct based on published device path, +// device serial number (if present) or comparing all publications (allPublishInfos) for +// LUN number uniqueness. +func verifyMultipathDevice(ctx context.Context, publishInfo *VolumePublishInfo, allPublishInfos []VolumePublishInfo, + deviceInfo *ScsiDeviceInfo, +) (bool, error) { + // Ensure a correct multipath device is being discovered. + // Following steps can be performed: + // 1. If DM device is known, compare it with deviceInfo.MultipathDevice + // If no match check if the DM device is a ghost device by checking /sys/block.../slaves and remove it. + // 2. Else if LUN SerialNumber is available, compare it with deviceInfo.Devices Serial Number + // If no match, find a DM device with the matching serial number, + // if a ghost device by checking /sys/block.../uuid then remove it. + // 3. Else if Check all tracking infos to ensure no more than 1 tracking files have the same LUN number. + // If multiple are found, then it requires user intervention. + + if publishInfo.DevicePath != "" { + return compareWithPublishedDevicePath(ctx, publishInfo, deviceInfo) + } else if publishInfo.IscsiLunSerial != "" { + return compareWithPublishedSerialNumber(ctx, publishInfo, deviceInfo) + } + + return false, compareWithAllPublishInfos(ctx, publishInfo, allPublishInfos, deviceInfo) +} + +// PrepareDeviceForRemoval informs Linux that a device will be removed, the function +// also verifies that device being removed is correct based on published device path, +// device serial number (if present) or comparing all publications (allPublishInfos) for +// LUN number uniqueness. +func PrepareDeviceForRemoval(ctx context.Context, publishInfo *VolumePublishInfo, allPublishInfos []VolumePublishInfo, ignoreErrors, + force bool, +) (string, error) { GenerateRequestContextForLayer(ctx, LogLayerUtils) + lunID := int(publishInfo.IscsiLunNumber) + iSCSINodeName := publishInfo.IscsiTargetIQN + fields := LogFields{ "lunID": lunID, "iSCSINodeName": iSCSINodeName, @@ -893,9 +1102,16 @@ func PrepareDeviceForRemoval(ctx context.Context, lunID int, iSCSINodeName strin return multipathDevice, nil } + if publishInfo.IscsiTargetPortal != "" /* CSI Case */ { + _, err = verifyMultipathDevice(ctx, publishInfo, allPublishInfos, deviceInfo) + if err != nil { + return multipathDevice, err + } + } + performDeferredDeviceRemoval, err = removeSCSIDevice(ctx, deviceInfo, ignoreErrors, force) if performDeferredDeviceRemoval && deviceInfo.MultipathDevice != "" { - multipathDevice = "/dev/" + deviceInfo.MultipathDevice + multipathDevice = devPrefix + deviceInfo.MultipathDevice } return multipathDevice, err @@ -1013,6 +1229,7 @@ type ScsiDeviceInfo struct { Target string LUN string Devices []string + DevicePaths []string MultipathDevice string Filesystem string IQN string @@ -1065,9 +1282,9 @@ func getDeviceInfoForLUN( var devicePath string if multipathDevice != "" { - devicePath = "/dev/" + multipathDevice + devicePath = devPrefix + multipathDevice } else { - devicePath = "/dev/" + devices[0] + devicePath = devPrefix + devices[0] } fsType := "" @@ -1084,7 +1301,7 @@ func getDeviceInfoForLUN( } Logc(ctx).WithFields(LogFields{ - "LUN": strconv.Itoa(lunID), + "lun": strconv.Itoa(lunID), "multipathDevice": multipathDevice, "fsType": fsType, "deviceNames": devices, @@ -1095,6 +1312,7 @@ func getDeviceInfoForLUN( LUN: strconv.Itoa(lunID), MultipathDevice: multipathDevice, Devices: devices, + DevicePaths: paths, Filesystem: fsType, IQN: iSCSINodeName, HostSessionMap: hostSessionMap, @@ -1121,7 +1339,7 @@ func getDeviceInfoForMountPath(ctx context.Context, mountpath string) (*ScsiDevi return nil, err } - device = strings.TrimPrefix(device, "/dev/") + device = strings.TrimPrefix(device, devPrefix) var deviceInfo *ScsiDeviceInfo diff --git a/utils/errors/errors.go b/utils/errors/errors.go index 3afad7ef7..8edeb4ecf 100644 --- a/utils/errors/errors.go +++ b/utils/errors/errors.go @@ -593,6 +593,28 @@ func IsISCSIDeviceFlushError(err error) bool { return ok } +// /////////////////////////////////////////////////////////////////////////// +// iSCSISameLunNumberError +// /////////////////////////////////////////////////////////////////////////// + +type iSCSISameLunNumberError struct { + message string +} + +func (e *iSCSISameLunNumberError) Error() string { return e.message } + +func ISCSISameLunNumberError(message string) error { + return &iSCSISameLunNumberError{message} +} + +func IsISCSISameLunNumberError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*iSCSISameLunNumberError) + return ok +} + // /////////////////////////////////////////////////////////////////////////// // tooManyRequestsError (HTTP 429) // /////////////////////////////////////////////////////////////////////////// diff --git a/utils/iscsi.go b/utils/iscsi.go index f53437ec4..4d6638ba2 100644 --- a/utils/iscsi.go +++ b/utils/iscsi.go @@ -5,6 +5,7 @@ package utils import ( "context" "encoding/binary" + "encoding/hex" "fmt" "os" "os/exec" @@ -58,17 +59,19 @@ var ( // AttachISCSIVolumeRetry attaches a volume with retry by invoking AttachISCSIVolume with backoff. func AttachISCSIVolumeRetry( ctx context.Context, name, mountpoint string, publishInfo *VolumePublishInfo, secrets map[string]string, timeout time.Duration, -) error { +) (int64, error) { Logc(ctx).Debug(">>>> iscsi.AttachISCSIVolumeRetry") defer Logc(ctx).Debug("<<<< iscsi.AttachISCSIVolumeRetry") var err error + var mpathSize int64 if err = ISCSIPreChecks(ctx); err != nil { - return err + return mpathSize, err } checkAttachISCSIVolume := func() error { - return AttachISCSIVolume(ctx, name, mountpoint, publishInfo, secrets) + mpathSize, err = AttachISCSIVolume(ctx, name, mountpoint, publishInfo, secrets) + return err } attachNotify := func(err error, duration time.Duration) { @@ -85,18 +88,21 @@ func AttachISCSIVolumeRetry( attachBackoff.MaxElapsedTime = timeout err = backoff.RetryNotify(checkAttachISCSIVolume, attachBackoff, attachNotify) - return err + return mpathSize, err } // AttachISCSIVolume attaches the volume to the local host. This method must be able to accomplish its task using only the data passed in. // It may be assumed that this method always runs on the host to which the volume will be attached. If the mountpoint // parameter is specified, the volume will be mounted. The device path is set on the in-out publishInfo parameter // so that it may be mounted later instead. -func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo *VolumePublishInfo, secrets map[string]string) error { +func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo *VolumePublishInfo, + secrets map[string]string, +) (int64, error) { Logc(ctx).Debug(">>>> iscsi.AttachISCSIVolume") defer Logc(ctx).Debug("<<<< iscsi.AttachISCSIVolume") var err error + var mpathSize int64 lunID := int(publishInfo.IscsiLunNumber) var portals []string @@ -125,38 +131,38 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo }).Debug("Attaching iSCSI volume.") if err = ISCSIPreChecks(ctx); err != nil { - return err + return mpathSize, err } // Ensure we are logged into correct portals pendingPortalsToLogin, loggedIn, err := portalsToLogin(ctx, publishInfo.IscsiTargetIQN, portals) if err != nil { - return err + return mpathSize, err } newLogin, err := EnsureISCSISessions(ctx, publishInfo, pendingPortalsToLogin) if !loggedIn && !newLogin { - return err + return mpathSize, err } // First attempt to fix invalid serials by rescanning them err = handleInvalidSerials(ctx, lunID, publishInfo.IscsiTargetIQN, publishInfo.IscsiLunSerial, rescanOneLun) if err != nil { - return err + return mpathSize, err } // Then attempt to fix invalid serials by purging them (to be scanned // again later) err = handleInvalidSerials(ctx, lunID, publishInfo.IscsiTargetIQN, publishInfo.IscsiLunSerial, purgeOneLun) if err != nil { - return err + return mpathSize, err } // Scan the target and wait for the device(s) to appear err = waitForDeviceScan(ctx, lunID, publishInfo.IscsiTargetIQN) if err != nil { Logc(ctx).Errorf("Could not find iSCSI device: %+v", err) - return err + return mpathSize, err } // At this point if the serials are still invalid, give up so the @@ -168,21 +174,21 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo } err = handleInvalidSerials(ctx, lunID, publishInfo.IscsiTargetIQN, publishInfo.IscsiLunSerial, failHandler) if err != nil { - return err + return mpathSize, err } // Wait for multipath device i.e. /dev/dm-* for the given LUN err = waitForMultipathDeviceForLUN(ctx, lunID, publishInfo.IscsiTargetIQN) if err != nil { - return err + return mpathSize, err } // Lookup all the SCSI device information deviceInfo, err := getDeviceInfoForLUN(ctx, lunID, publishInfo.IscsiTargetIQN, false, false) if err != nil { - return fmt.Errorf("error getting iSCSI device information: %v", err) + return mpathSize, fmt.Errorf("error getting iSCSI device information: %v", err) } else if deviceInfo == nil { - return fmt.Errorf("could not get iSCSI device information for LUN %d", lunID) + return mpathSize, fmt.Errorf("could not get iSCSI device information for LUN %d", lunID) } Logc(ctx).WithFields(LogFields{ @@ -192,24 +198,73 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo "iqn": deviceInfo.IQN, }).Debug("Found device.") - // Make sure we use the proper device (multipath if in use) + // Make sure we use the proper device deviceToUse := deviceInfo.Devices[0] if deviceInfo.MultipathDevice != "" { deviceToUse = deviceInfo.MultipathDevice + + // To avoid LUN ID conflict with a ghost device below checks + // are necessary: + // Conflict 1: Due to race conditons, it is possible a ghost + // DM device is discovered instead of the actual + // DM device. + // Conflict 2: Some OS like RHEL displays the ghost device size + // instead of the actual LUN size. + // + // Below check ensures that the correct device with the correct + // size is being discovered. + + // If LUN Serial Number exists, then compare it with DM + // device's UUID in sysfs + if err = verifyMultipathDeviceSerial(ctx, deviceToUse, publishInfo.IscsiLunSerial); err != nil { + return mpathSize, err + } + + // Once the multipath device has been found, compare its size with + // the size of one of the devices, if it differs then mark it for + // resize after the staging. + correctMpathSize, mpathSizeCorrect, err := verifyMultipathDeviceSize(ctx, deviceToUse, deviceInfo.Devices[0]) + if err != nil { + Logc(ctx).WithFields(LogFields{ + "scsiLun": deviceInfo.LUN, + "multipathDevice": deviceInfo.MultipathDevice, + "device": deviceInfo.Devices[0], + "iqn": deviceInfo.IQN, + "err": err, + }).Error("Failed to verify multipath device size.") + + return mpathSize, fmt.Errorf("failed to verify multipath device %s size", deviceInfo.MultipathDevice) + } + + if !mpathSizeCorrect { + mpathSize = correctMpathSize + + Logc(ctx).WithFields(LogFields{ + "scsiLun": deviceInfo.LUN, + "multipathDevice": deviceInfo.MultipathDevice, + "device": deviceInfo.Devices[0], + "iqn": deviceInfo.IQN, + "mpathSize": mpathSize, + }).Error("Multipath device size does not match device size.") + } + } else { + return mpathSize, fmt.Errorf("could not find multipath device for LUN %d", lunID) } + if deviceToUse == "" { - return fmt.Errorf("could not determine device to use for %v", name) + return mpathSize, fmt.Errorf("could not determine device to use for %v", name) } devicePath := "/dev/" + deviceToUse if err := waitForDevice(ctx, devicePath); err != nil { - return fmt.Errorf("could not find device %v; %s", devicePath, err) + return mpathSize, fmt.Errorf("could not find device %v; %s", devicePath, err) } var isLUKSDevice, luksFormatted bool if publishInfo.LUKSEncryption != "" { isLUKSDevice, err = strconv.ParseBool(publishInfo.LUKSEncryption) if err != nil { - return fmt.Errorf("could not parse LUKSEncryption into a bool, got %v", publishInfo.LUKSEncryption) + return mpathSize, fmt.Errorf("could not parse LUKSEncryption into a bool, got %v", + publishInfo.LUKSEncryption) } } @@ -217,7 +272,7 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo luksDevice, _ := NewLUKSDevice(devicePath, name) luksFormatted, err = EnsureLUKSDeviceMappedOnHost(ctx, luksDevice, name, secrets) if err != nil { - return err + return mpathSize, err } devicePath = luksDevice.MappedDevicePath() } @@ -226,36 +281,36 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo publishInfo.DevicePath = devicePath if publishInfo.FilesystemType == fsRaw { - return nil + return mpathSize, nil } existingFstype, err := getDeviceFSType(ctx, devicePath) if err != nil { - return err + return mpathSize, err } if existingFstype == "" { if !isLUKSDevice { if unformatted, err := isDeviceUnformatted(ctx, devicePath); err != nil { Logc(ctx).WithField("device", devicePath).Errorf("Unable to identify if the device is unformatted; err: %v", err) - return err + return mpathSize, err } else if !unformatted { Logc(ctx).WithField("device", devicePath).Errorf("Device is not unformatted; err: %v", err) - return fmt.Errorf("device %v is not unformatted", devicePath) + return mpathSize, fmt.Errorf("device %v is not unformatted", devicePath) } } else { // We can safely assume if we just luksFormatted the device, we can also add a filesystem without dataloss if !luksFormatted { Logc(ctx).WithField("device", devicePath).Errorf("Unable to identify if the luks device is empty; err: %v", err) - return err + return mpathSize, err } } Logc(ctx).WithFields(LogFields{"volume": name, "fstype": publishInfo.FilesystemType}).Debug("Formatting LUN.") err := formatVolume(ctx, devicePath, publishInfo.FilesystemType) if err != nil { - return fmt.Errorf("error formatting LUN %s, device %s: %v", name, deviceToUse, err) + return mpathSize, fmt.Errorf("error formatting LUN %s, device %s: %v", name, deviceToUse, err) } } else if existingFstype != unknownFstype && existingFstype != publishInfo.FilesystemType { Logc(ctx).WithFields(LogFields{ @@ -263,7 +318,7 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo "existingFstype": existingFstype, "requestedFstype": publishInfo.FilesystemType, }).Error("LUN already formatted with a different file system type.") - return fmt.Errorf("LUN %s, device %s already formatted with other filesystem: %s", + return mpathSize, fmt.Errorf("LUN %s, device %s already formatted with other filesystem: %s", name, deviceToUse, existingFstype) } else { Logc(ctx).WithFields(LogFields{ @@ -278,7 +333,7 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo // even if they are completely and automatically fixed, so we don't return any error here. mounted, err := IsMounted(ctx, devicePath, "", "") if err != nil { - return err + return mpathSize, err } if !mounted { _ = repairVolume(ctx, devicePath, publishInfo.FilesystemType) @@ -287,12 +342,12 @@ func AttachISCSIVolume(ctx context.Context, name, mountpoint string, publishInfo // Optionally mount the device if mountpoint != "" { if err := MountDevice(ctx, devicePath, mountpoint, publishInfo.MountOptions, false); err != nil { - return fmt.Errorf("error mounting LUN %v, device %v, mountpoint %v; %s", + return mpathSize, fmt.Errorf("error mounting LUN %v, device %v, mountpoint %v; %s", name, deviceToUse, mountpoint, err) } } - return nil + return mpathSize, nil } // GetInitiatorIqns returns parsed contents of /etc/iscsi/initiatorname.iscsi @@ -376,6 +431,89 @@ func (h *IscsiReconcileHelper) GetDevicesForLUN(paths []string) ([]string, error return devices, nil } +// GetMultipathDeviceUUID find the /sys/block/dmX/dm/uuid UUID that contains DM device serial in hex format. +func (h *IscsiReconcileHelper) GetMultipathDeviceUUID(multipathDevicePath string) (string, error) { + multipathDevice := strings.TrimPrefix(multipathDevicePath, "/dev/") + + deviceUUIDPath := chrootPathPrefix + fmt.Sprintf("/sys/block/%s/dm/uuid", multipathDevice) + + exists, err := PathExists(deviceUUIDPath) + if !exists || err != nil { + return "", errors.NotFoundError("multipath device '%s' UUID not found", multipathDevice) + } + + UUID, err := os.ReadFile(deviceUUIDPath) + if err != nil { + return "", err + } + + return string(UUID), nil +} + +// GetMultipathDeviceDisks find the /sys/block/dmX/slaves/sdX disks. +func (h *IscsiReconcileHelper) GetMultipathDeviceDisks(ctx context.Context, multipathDevicePath string) ([]string, + error, +) { + devices := make([]string, 0) + multipathDevice := strings.TrimPrefix(multipathDevicePath, "/dev/") + + diskPath := chrootPathPrefix + fmt.Sprintf("/sys/block/%s/slaves/", multipathDevice) + diskDirs, err := os.ReadDir(diskPath) + if err != nil { + Logc(ctx).WithError(err).Errorf("Could not read %s", diskDirs) + return nil, fmt.Errorf("failed to identify multipath device disks; unable to read '%s'", diskDirs) + } + + for _, diskDir := range diskDirs { + contentName := diskDir.Name() + if !strings.HasPrefix(contentName, "sd") { + continue + } + + devices = append(devices, contentName) + } + + return devices, nil +} + +// GetMultipathDeviceBySerial find DM device whose UUID /sys/block/dmX/dm/uuid contains serial in hex format. +func (h *IscsiReconcileHelper) GetMultipathDeviceBySerial(ctx context.Context, hexSerial string) (string, error) { + sysPath := chrootPathPrefix + "/sys/block/" + + blockDirs, err := os.ReadDir(sysPath) + if err != nil { + Logc(ctx).WithError(err).Errorf("Could not read %s", sysPath) + return "", fmt.Errorf("failed to find multipath device by serial; unable to read '%s'", sysPath) + } + + for _, blockDir := range blockDirs { + dmDeviceName := blockDir.Name() + if !strings.HasPrefix(dmDeviceName, "dm-") { + continue + } + + uuid, err := h.GetMultipathDeviceUUID(dmDeviceName) + if err != nil { + Logc(ctx).WithFields(LogFields{ + "UUID": hexSerial, + "multipathDevice": dmDeviceName, + "err": err, + }).Error("Failed to get UUID of multipath device.") + continue + } + + if strings.Contains(uuid, hexSerial) { + Logc(ctx).WithFields(LogFields{ + "UUID": hexSerial, + "multipathDevice": dmDeviceName, + }).Debug("Found multipath device by UUID.") + return dmDeviceName, nil + } + } + + return "", errors.NotFoundError("no multipath device found") +} + // waitForDeviceScan scans all paths to a specific LUN and waits until all // SCSI disk-by-path devices for that LUN are present on the host. func waitForDeviceScan(ctx context.Context, lunID int, iSCSINodeName string) error { @@ -935,6 +1073,88 @@ func handleInvalidSerials( return nil } +// verifyMultipathDeviceSerial compares the serial number of the DM device with the serial +// of the LUN to ensure correct DM device has been discovered +func verifyMultipathDeviceSerial( + ctx context.Context, multipathDevice, lunSerial string, +) error { + if lunSerial == "" { + // Empty string means don't care + return nil + } + + // Multipath UUID contains LUN serial in hex format + lunSerialHex := hex.EncodeToString([]byte(lunSerial)) + + multipathDeviceUUID, err := IscsiUtils.GetMultipathDeviceUUID(multipathDevice) + if err != nil { + if errors.IsNotFoundError(err) { + // If UUID does not exist, then it is hard to verify the DM serial + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + }).Warn("Unable to verify multipath device serial.") + + return nil + } + + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + "error": err, + }).Error("Failed to verify multipath device serial.") + + return err + } + + if !strings.Contains(multipathDeviceUUID, lunSerialHex) { + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + "lunSerialNumberHex": lunSerialHex, + "multipathDeviceUUID": multipathDeviceUUID, + }).Error("Failed to verify multipath device serial.") + + return fmt.Errorf("multipath device '%s' serial check failed", multipathDevice) + } + + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "lunSerialNumber": lunSerial, + "lunSerialNumberHex": lunSerialHex, + "multipathDeviceUUID": multipathDeviceUUID, + }).Debug("Multipath device serial check passed.") + + return nil +} + +// verifyMultipathDeviceSize compares the size of the DM device with the size +// of a device to ensure correct DM device has the correct size. +func verifyMultipathDeviceSize( + ctx context.Context, multipathDevice, device string, +) (int64, bool, error) { + deviceSize, err := getISCSIDiskSize(ctx, "/dev/"+device) + if err != nil { + return 0, false, err + } + + mpathSize, err := getISCSIDiskSize(ctx, "/dev/"+multipathDevice) + if err != nil { + return 0, false, err + } + + if deviceSize != mpathSize { + return deviceSize, false, nil + } + + Logc(ctx).WithFields(LogFields{ + "multipathDevice": multipathDevice, + "device": device, + }).Debug("Multipath device size check passed.") + + return 0, true, nil +} + // GetISCSIHostSessionMapForTarget returns a map of iSCSI host numbers to iSCSI session numbers // for a given iSCSI target. func (h *IscsiReconcileHelper) GetISCSIHostSessionMapForTarget(ctx context.Context, iSCSINodeName string) map[int]int { diff --git a/utils/iscsi_types.go b/utils/iscsi_types.go index 550a57492..d9c379f31 100644 --- a/utils/iscsi_types.go +++ b/utils/iscsi_types.go @@ -9,6 +9,9 @@ import ( type IscsiReconcileUtils interface { GetISCSIHostSessionMapForTarget(context.Context, string) map[int]int GetSysfsBlockDirsForLUN(int, map[int]int) []string + GetMultipathDeviceUUID(string) (string, error) + GetMultipathDeviceBySerial(context.Context, string) (string, error) + GetMultipathDeviceDisks(context.Context, string) ([]string, error) GetDevicesForLUN(paths []string) ([]string, error) ReconcileISCSIVolumeInfo(ctx context.Context, trackingInfo *VolumeTrackingInfo) (bool, error) } From d9550d88e721404bc985205ebc0e89bd0e4d931d Mon Sep 17 00:00:00 2001 From: Joe Webster <31218426+jwebster7@users.noreply.github.com> Date: Fri, 21 Jul 2023 21:20:51 -0500 Subject: [PATCH 17/17] Bump ASUP version to 23.04 --- config/config.go | 2 +- deploy/crds/tridentorchestrator_cr_autosupport.yaml | 2 +- deploy/crds/tridentorchestrator_cr_customimage.yaml | 2 +- helm/trident-operator/values.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config/config.go b/config/config.go index b78d8482a..c28429721 100644 --- a/config/config.go +++ b/config/config.go @@ -176,7 +176,7 @@ const ( /* Kubernetes operator constants */ OperatorContainerName = "trident-operator" - DefaultAutosupportImage = "docker.io/netapp/trident-autosupport:23.01" + DefaultAutosupportImage = "docker.io/netapp/trident-autosupport:23.04" // IscsiSelfHealingInterval is an interval with which the iSCSI self-healing thread is called periodically IscsiSelfHealingInterval = 300 * time.Second diff --git a/deploy/crds/tridentorchestrator_cr_autosupport.yaml b/deploy/crds/tridentorchestrator_cr_autosupport.yaml index 164de9e6b..d963cab57 100644 --- a/deploy/crds/tridentorchestrator_cr_autosupport.yaml +++ b/deploy/crds/tridentorchestrator_cr_autosupport.yaml @@ -6,5 +6,5 @@ spec: debug: true namespace: trident silenceAutosupport: false - autosupportImage: "netapp/trident-autosupport:23.01" + autosupportImage: "netapp/trident-autosupport:23.04" autosupportProxy: "http://proxy.example.com:8888" diff --git a/deploy/crds/tridentorchestrator_cr_customimage.yaml b/deploy/crds/tridentorchestrator_cr_customimage.yaml index d19516db0..c7465dab6 100644 --- a/deploy/crds/tridentorchestrator_cr_customimage.yaml +++ b/deploy/crds/tridentorchestrator_cr_customimage.yaml @@ -5,4 +5,4 @@ metadata: spec: debug: true namespace: trident - tridentImage: localhost:5000/netapp/trident:23.01 + tridentImage: localhost:5000/netapp/trident:23.04 diff --git a/helm/trident-operator/values.yaml b/helm/trident-operator/values.yaml index 92aec2788..a6c109101 100644 --- a/helm/trident-operator/values.yaml +++ b/helm/trident-operator/values.yaml @@ -77,7 +77,7 @@ tridentSilenceAutosupport: false tridentAutosupportImage: "" # tridentAutosupportImageTag allows overriding the tag of the image for Trident's Autosupport container. -tridentAutosupportImageTag: "23.01" +tridentAutosupportImageTag: "23.04" # tridentAutosupportProxy allows Trident's autosupport container to phone home via an HTTP proxy. tridentAutosupportProxy: ""