diff --git a/README.md b/README.md index 8328e735..b3d254cf 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ ## Description CSI Driver for Unity XT is part of the [CSM (Container Storage Modules)](https://github.com/dell/csm) open-source suite of Kubernetes storage enablers for Dell products. CSI Driver for Unity XT is a Container Storage Interface (CSI) driver that provides support for provisioning persistent storage using Dell Unity XT storage array. -It supports CSI specification version 1.5. +It supports CSI specification version 1.6. This project may be compiled as a stand-alone binary using Golang that, when run, provides a valid CSI endpoint. It also can be used as a precompiled container image. diff --git a/go.mod b/go.mod index 9bd2748c..70956662 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,14 @@ module github.com/dell/csi-unity go 1.21 require ( - github.com/container-storage-interface/spec v1.5.0 + github.com/container-storage-interface/spec v1.6.0 github.com/cucumber/godog v0.10.0 github.com/dell/dell-csi-extensions/podmon v1.2.0 github.com/dell/gobrick v1.9.0 github.com/dell/gocsi v1.8.0 github.com/dell/gofsutil v1.13.1 github.com/dell/goiscsi v1.8.0 - github.com/dell/gounity v1.15.0 + github.com/dell/gounity v1.15.1-0.20231027105111-fc1c259b8753 github.com/fsnotify/fsnotify v1.4.9 github.com/golang/protobuf v1.5.3 github.com/kubernetes-csi/csi-lib-utils v0.7.0 @@ -20,6 +20,7 @@ require ( go.uber.org/atomic v1.7.0 golang.org/x/net v0.14.0 google.golang.org/grpc v1.57.0 + google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/apimachinery v0.22.2 @@ -74,7 +75,6 @@ require ( google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878 // indirect - google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.51.0 // indirect k8s.io/api v0.22.2 // indirect diff --git a/go.sum b/go.sum index 28be6630..668b2ac0 100644 --- a/go.sum +++ b/go.sum @@ -68,8 +68,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= -github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= +github.com/container-storage-interface/spec v1.6.0 h1:vwN9uCciKygX/a0toYryoYD5+qI9ZFeAMuhEEKO+JBA= +github.com/container-storage-interface/spec v1.6.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -101,8 +101,8 @@ github.com/dell/goiscsi v1.8.0 h1:kocGVOdgnufc6eGpfmwP66hyhY7OVgIafaS/+uM6ogU= github.com/dell/goiscsi v1.8.0/go.mod h1:PTlQGJaGKYgia95mGwwHSBgvfOr3BfLIjGNh1HT6p+s= github.com/dell/gonvme v1.4.0 h1:SK94ETt0pYZbaKkRJOcq81TbrzC38ufBX+w4uKwJnks= github.com/dell/gonvme v1.4.0/go.mod h1:fIu54BDTyIu8JOTXo6Q0BqMF1tOjpO+wKXVXjLReR2o= -github.com/dell/gounity v1.15.0 h1:dGi0E3uZ6EQisBDD6idbKDE/RsMDCiuSLNyVLckfz9w= -github.com/dell/gounity v1.15.0/go.mod h1:w634+uuEp6QkkP/j2TFymCPb8gQQg9d4ZZiVrRreuLQ= +github.com/dell/gounity v1.15.1-0.20231027105111-fc1c259b8753 h1:Tm5AfD5a6L/jEeNg97/SSBpxQtpf49AE4drlHrvRdhI= +github.com/dell/gounity v1.15.1-0.20231027105111-fc1c259b8753/go.mod h1:w634+uuEp6QkkP/j2TFymCPb8gQQg9d4ZZiVrRreuLQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= diff --git a/service/controller.go b/service/controller.go index 760d14a9..7f7e99c0 100644 --- a/service/controller.go +++ b/service/controller.go @@ -31,6 +31,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/wrapperspb" ) const ( @@ -88,8 +89,10 @@ type CRParams struct { type resourceType string -const volumeType resourceType = "volume" -const snapshotType resourceType = "snapshot" +const ( + volumeType resourceType = "volume" + snapshotType resourceType = "snapshot" +) func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { ctx, log, rid := GetRunidLog(ctx) @@ -161,7 +164,7 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest } } - //Create Fresh Volume + // Create Fresh Volume if protocol == NFS { nasServer, ok := params[keyNasServer] @@ -169,7 +172,7 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "`%s` is a required parameter", keyNasServer)) } - //Add AdditionalFilesystemSize in size as Unity XT use this much size for metadata in filesystem + // Add AdditionalFilesystemSize in size as Unity XT use this much size for metadata in filesystem size += AdditionalFilesystemSize // log all parameters used in Create File System call @@ -186,7 +189,7 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest } log.WithFields(fields).Infof("Executing Create File System with following fields") - //Idempotency check + // Idempotency check fileAPI := gounity.NewFilesystem(unity) filesystem, _ := fileAPI.FindFilesystemByName(ctx, volName) if filesystem != nil { @@ -202,9 +205,9 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest } log.Debug("Filesystem does not exist, proceeding to create new filesystem") - //Hardcoded ProtocolNFS to 0 in order to support only NFS + // Hardcoded ProtocolNFS to 0 in order to support only NFS resp, err := fileAPI.CreateFilesystem(ctx, volName, storagePool, desc, nasServer, uint64(size), int(tieringPolicy), int(hostIoSize), ProtocolNFS, thin, dataReduction) - //Add method to create filesystem + // Add method to create filesystem if err != nil { log.Debugf("Filesystem create response:%v Error:%v", resp, err) return nil, status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Create Filesystem %s failed with error: %v", volName, err)) @@ -246,7 +249,7 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest hostIOLimitID = hostIOLimit.IoLimitPolicyContent.ID } - //Idempotency check + // Idempotency check vol, _ := volumeAPI.FindVolumeByName(ctx, volName) if vol != nil { content := vol.VolumeContent @@ -278,7 +281,8 @@ func (s *service) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest func (s *service) DeleteVolume( ctx context.Context, req *csi.DeleteVolumeRequest) ( - *csi.DeleteVolumeResponse, error) { + *csi.DeleteVolumeResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing DeleteVolume with args: %+v", *req) var snapErr error @@ -291,10 +295,10 @@ func (s *service) DeleteVolume( return nil, err } deleteVolumeResp := &csi.DeleteVolumeResponse{} - //Not validating protocol here to support deletion of pvcs from v1.0 + // Not validating protocol here to support deletion of pvcs from v1.0 if protocol != NFS { - //Delete logic for FC and iSCSI volumes + // Delete logic for FC and iSCSI volumes var throwErr error err, throwErr = s.deleteBlockVolume(ctx, volID, unity) if throwErr != nil { @@ -303,7 +307,7 @@ func (s *service) DeleteVolume( } else { - //Delete logic for Filesystem + // Delete logic for Filesystem var throwErr error err, snapErr, throwErr = s.deleteFilesystem(ctx, volID, unity) if throwErr != nil { @@ -311,7 +315,7 @@ func (s *service) DeleteVolume( } } - //Idempotency check + // Idempotency check if err == nil { log.Debugf("DeleteVolume successful for volid: [%s]", req.VolumeId) return deleteVolumeResp, nil @@ -326,7 +330,8 @@ func (s *service) DeleteVolume( func (s *service) ControllerPublishVolume( ctx context.Context, req *csi.ControllerPublishVolumeRequest) ( - *csi.ControllerPublishVolumeResponse, error) { + *csi.ControllerPublishVolumeResponse, error, +) { ctx, log, _ := GetRunidLog(ctx) log.Debugf("Executing ControllerPublishVolume with args: %+v", *req) @@ -365,7 +370,7 @@ func (s *service) ControllerPublishVolume( return resp, err } - //Export for NFS + // Export for NFS resp, err := s.exportFilesystem(ctx, volID, hostID, nodeID, arrayID, unity, pinfo, am) return resp, err } @@ -373,7 +378,8 @@ func (s *service) ControllerPublishVolume( func (s *service) ControllerUnpublishVolume( ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) ( - *csi.ControllerUnpublishVolumeResponse, error) { + *csi.ControllerUnpublishVolumeResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing ControllerUnpublishVolume with args: %+v", *req) @@ -413,13 +419,13 @@ func (s *service) ControllerUnpublishVolume( return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "%v", err)) } - //Idempotency check + // Idempotency check content := vol.VolumeContent if len(content.HostAccessResponse) > 0 { hostIDList := make([]string, 0) - //Check if the volume is published to any other node and retain it - RWX raw block + // Check if the volume is published to any other node and retain it - RWX raw block for _, hostaccess := range content.HostAccessResponse { hostcontent := hostaccess.HostContent hostAccessID := hostcontent.ID @@ -441,7 +447,7 @@ func (s *service) ControllerUnpublishVolume( return &csi.ControllerUnpublishVolumeResponse{}, nil } - //Unexport for NFS + // Unexport for NFS err = s.unexportFilesystem(ctx, volID, hostID, nodeID, req.GetVolumeId(), arrayID, unity) if err != nil { return nil, err @@ -496,8 +502,8 @@ func (s *service) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) func (s *service) GetCapacity( ctx context.Context, req *csi.GetCapacityRequest) ( - *csi.GetCapacityResponse, error) { - + *csi.GetCapacityResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing GetCapacity with args: %+v", *req) @@ -516,25 +522,42 @@ func (s *service) GetCapacity( } unity, err := s.getUnityClient(ctx, arrayID) - if err != nil { return nil, err } metricsAPI := gounity.NewMetrics(unity) - resp, err := metricsAPI.GetCapacity(ctx) - + capacity, err := metricsAPI.GetCapacity(ctx) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } - log.Infof("Available capacity from the Array: %d", resp.Entries[0].Content.SizeFree) + log.Infof("Available capacity from the Array: %d", capacity.Entries[0].Content.SizeFree) + + maxVolSize, err := s.getMaximumVolumeSize(ctx, arrayID) + if err != nil { + return &csi.GetCapacityResponse{ + AvailableCapacity: int64(capacity.Entries[0].Content.SizeFree), + }, nil + } return &csi.GetCapacityResponse{ - AvailableCapacity: int64(resp.Entries[0].Content.SizeFree), + AvailableCapacity: int64(capacity.Entries[0].Content.SizeFree), + MaximumVolumeSize: wrapperspb.Int64(maxVolSize), }, nil +} +func (s *service) getMaximumVolumeSize(ctx context.Context, arrayID string) (int64, error) { + ctx, log, _ := GetRunidLog(ctx) + unity, err := s.getUnityClient(ctx, arrayID) + volumeAPI := gounity.NewVolume(unity) + maxVolumeSize, err := volumeAPI.GetMaxVolumeSize(ctx, "Limit_MaxLUNSize") + if err != nil { + log.Debugf("GetMaxVolumeSize returning: %v for Array having arrayId %s", err, arrayID) + return 0, err + } + return int64(maxVolumeSize.MaxVolumSizeContent.Limit), nil } func (s *service) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { @@ -550,7 +573,7 @@ func (s *service) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotReq return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "invalid snapshot name [%v]", err)) } - //Source volume is for volume clone or snapshot clone + // Source volume is for volume clone or snapshot clone volID, protocol, arrayID, _, err := s.validateAndGetResourceDetails(ctx, req.SourceVolumeId, volumeType) if err != nil { return nil, err @@ -561,7 +584,7 @@ func (s *service) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotReq return nil, err } - //Idempotency check + // Idempotency check snap, err := s.createIdempotentSnapshot(ctx, req.Name, volID, req.Parameters["description"], req.Parameters["retentionDuration"], protocol, arrayID, false) if err != nil { return nil, err @@ -583,9 +606,9 @@ func (s *service) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotReq } snapAPI := gounity.NewSnapshot(unity) - //Idempotency check + // Idempotency check snap, err := snapAPI.FindSnapshotByID(ctx, snapID) - //snapshot exists, continue deleting the snapshot + // snapshot exists, continue deleting the snapshot if err != nil { log.Info("Snapshot doesn't exists") } @@ -623,7 +646,7 @@ func (s *service) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsReque snapAPI := gounity.NewSnapshot(unity) - //Limiting the number of snapshots to 100 to avoid timeout issues + // Limiting the number of snapshots to 100 to avoid timeout issues if maxEntries > MaxEntriesSnapshot || maxEntries == 0 { maxEntries = MaxEntriesSnapshot } @@ -794,7 +817,7 @@ func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.Controlle } if protocol == NFS { - //Adding Additional size used for metadata + // Adding Additional size used for metadata capacity += AdditionalFilesystemSize filesystemAPI := gounity.NewFilesystem(unity) @@ -808,7 +831,7 @@ func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.Controlle return nil, status.Error(codes.Unimplemented, utils.GetMessageWithRunID(rid, "Expand Volume not supported for cloned filesystems(snapshot on array)")) } - //Idempotency check + // Idempotency check if filesystem.FileContent.SizeTotal >= uint64(capacity) { log.Infof("New Filesystem size (%d) is same as existing Filesystem size. Ignoring expand volume operation.", filesystem.FileContent.SizeTotal) expandVolumeResp := &csi.ControllerExpandVolumeResponse{ @@ -832,7 +855,7 @@ func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.Controlle return expandVolumeResp, err } volumeAPI := gounity.NewVolume(unity) - //Idempotency check + // Idempotency check volume, err := volumeAPI.FindVolumeByID(ctx, volID) if err != nil { return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find volume failed with error: %v", err)) @@ -840,7 +863,7 @@ func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.Controlle nodeExpansionRequired := false content := volume.VolumeContent - if len(content.HostAccessResponse) >= 1 { //If the volume has 1 or more host access then set nodeExpansionRequired as true + if len(content.HostAccessResponse) >= 1 { // If the volume has 1 or more host access then set nodeExpansionRequired as true nodeExpansionRequired = true } @@ -877,7 +900,7 @@ func (s *service) getCSIVolumes(volumes []types.Volume) ([]*csi.ListVolumesRespo "Wwn": vol.VolumeContent.Wwn, "StoragePoolID": vol.VolumeContent.Pool.ID, } - //Create CSI volume + // Create CSI volume vi := &csi.Volume{ VolumeId: vol.VolumeContent.ResourceID, CapacityBytes: int64(vol.VolumeContent.SizeTotal), @@ -910,7 +933,7 @@ func (s *service) getCSISnapshots(snaps []types.Snapshot, volID, protocol, array if protocol == NFS { size -= AdditionalFilesystemSize } - //Create CSI Snapshot + // Create CSI Snapshot vi := &csi.Snapshot{ SizeBytes: size, SnapshotId: snapID, @@ -1003,7 +1026,7 @@ func (s *service) createIdempotentSnapshot(ctx context.Context, snapshotName, so snap, _ := snapshotAPI.FindSnapshotByName(ctx, snapshotName) if snap != nil { if snap.SnapshotContent.StorageResource.ID == sourceVolID || (isSnapshot && snap.SnapshotContent.StorageResource.ID == filesystemResp.FileContent.StorageResource.ID) { - //Subtract AdditionalFilesystemSize for Filesystem snapshots + // Subtract AdditionalFilesystemSize for Filesystem snapshots if protocol == NFS { snap.SnapshotContent.Size -= AdditionalFilesystemSize } @@ -1036,7 +1059,7 @@ func (s *service) createIdempotentSnapshot(ctx context.Context, snapshotName, so newSnapshot, _ = snapshotAPI.FindSnapshotByName(ctx, snapshotName) if newSnapshot != nil { - //Subtract AdditionalFilesystemSize for Filesystem snapshots{ + // Subtract AdditionalFilesystemSize for Filesystem snapshots{ if protocol == NFS { newSnapshot.SnapshotContent.Size -= AdditionalFilesystemSize } @@ -1081,7 +1104,6 @@ func (s *service) getHostID(ctx context.Context, arrayID, shortHostname, longHos // createVolumeClone - Method to create a volume clone with idempotency for all protocols func (s *service) createVolumeClone(ctx context.Context, crParams *CRParams, sourceVolID, arrayID string, contentSource *csi.VolumeContentSource, unity *gounity.Client, preferredAccessibility []*csi.Topology) (*csi.CreateVolumeResponse, error) { - ctx, log, rid := GetRunidLog(ctx) if sourceVolID == "" { return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume ID cannot be empty")) @@ -1115,7 +1137,7 @@ func (s *service) createVolumeClone(ctx context.Context, crParams *CRParams, sou var snapResp *types.Snapshot var snapErr error if err != nil { - //Filesystem not found - Check if PVC exists as a snapshot [Cloned volume in case of NFS] + // Filesystem not found - Check if PVC exists as a snapshot [Cloned volume in case of NFS] snapResp, snapErr = snapAPI.FindSnapshotByID(ctx, sourceVolID) if snapErr != nil { log.Debugf("Tried to check if PVC exists as a snapshot: %v", snapErr) @@ -1139,10 +1161,10 @@ func (s *service) createVolumeClone(ctx context.Context, crParams *CRParams, sou if snapSize != size { return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Requested size %d should be same as source filesystem size %d", size, snapSize)) } - //Idempotency check + // Idempotency check snapResp, err := snapAPI.FindSnapshotByName(ctx, volName) if snapResp == nil { - //Create Volume from Snapshot(Copy snapshot on array) + // Create Volume from Snapshot(Copy snapshot on array) snapResp, err = s.createFilesystemFromSnapshot(ctx, sourceVolID, volName, arrayID) if err != nil { return nil, err @@ -1170,7 +1192,7 @@ func (s *service) createVolumeClone(ctx context.Context, crParams *CRParams, sou return csiVolResp, nil } - //If protocol is FC or iSCSI + // If protocol is FC or iSCSI volumeAPI := gounity.NewVolume(unity) sourceVolResp, err := volumeAPI.FindVolumeByID(ctx, sourceVolID) if err != nil { @@ -1184,7 +1206,7 @@ func (s *service) createVolumeClone(ctx context.Context, crParams *CRParams, sou volResp, _ := volumeAPI.FindVolumeByName(ctx, volName) if volResp != nil { - //Idempotency Check + // Idempotency Check if volResp.VolumeContent.IsThinClone && len(volResp.VolumeContent.ParentVolume.ID) > 0 && volResp.VolumeContent.ParentVolume.ID == sourceVolID && volResp.VolumeContent.SizeTotal == sourceVolResp.VolumeContent.SizeTotal { log.Infof("Volume %s exists in the requested state as a clone of volume %s", volName, sourceVolResp.VolumeContent.Name) @@ -1195,7 +1217,7 @@ func (s *service) createVolumeClone(ctx context.Context, crParams *CRParams, sou return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Volume with same name %s already exists", volName)) } - //Perform volume cloning + // Perform volume cloning volResp, err = volumeAPI.CreateCloneFromVolume(ctx, volName, sourceVolID) if err != nil { if err == gounity.ErrorCreateSnapshotFailed { @@ -1216,7 +1238,6 @@ func (s *service) createVolumeClone(ctx context.Context, crParams *CRParams, sou // createVolumeFromSnap - Method to create a volume from snapshot with idempotency for all protocols func (s *service) createVolumeFromSnap(ctx context.Context, crParams *CRParams, snapshotID, arrayID string, contentSource *csi.VolumeContentSource, unity *gounity.Client, preferredAccessibility []*csi.Topology) (*csi.CreateVolumeResponse, error) { - ctx, log, rid := GetRunidLog(ctx) if snapshotID == "" { return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source snapshot ID cannot be empty")) @@ -1265,7 +1286,7 @@ func (s *service) createVolumeFromSnap(ctx context.Context, crParams *CRParams, snapResp, err := snapAPI.FindSnapshotByName(ctx, volName) if snapResp != nil { - //Idempotency check + // Idempotency check if snapResp.SnapshotContent.ParentSnap.ID == snapshotID && snapResp.SnapshotContent.AccessType == int(gounity.ProtocolAccessType) { log.Infof("Filesystem %s exists in the requested state as a volume from snapshot(snapshot on array) %s", volName, snapshotID) snapResp.SnapshotContent.Size -= AdditionalFilesystemSize @@ -1276,7 +1297,7 @@ func (s *service) createVolumeFromSnap(ctx context.Context, crParams *CRParams, return nil, status.Error(codes.AlreadyExists, utils.GetMessageWithRunID(rid, "Filesystem with same name %s already exists", volName)) } - //Create Volume from Snapshot(Copy snapshot on array) + // Create Volume from Snapshot(Copy snapshot on array) snapResp, err = s.createFilesystemFromSnapshot(ctx, snapshotID, volName, arrayID) if err != nil { return nil, err @@ -1291,7 +1312,7 @@ func (s *service) createVolumeFromSnap(ctx context.Context, crParams *CRParams, return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Filesystem: %s not found after create. Error: %v", volName, err)) } - //If protocol is FC or iSCSI + // If protocol is FC or iSCSI volID := snapResp.SnapshotContent.StorageResource.ID volumeAPI := gounity.NewVolume(unity) sourceVolResp, err := volumeAPI.FindVolumeByID(ctx, volID) @@ -1311,7 +1332,7 @@ func (s *service) createVolumeFromSnap(ctx context.Context, crParams *CRParams, volResp, _ := volumeAPI.FindVolumeByName(ctx, volName) if volResp != nil { - //Idempotency Check + // Idempotency Check if volResp.VolumeContent.IsThinClone == true && len(volResp.VolumeContent.ParentSnap.ID) > 0 && volResp.VolumeContent.ParentSnap.ID == snapshotID { log.Info("Volume exists in the requested state") csiVolResp := utils.GetVolumeResponseFromVolume(volResp, arrayID, protocol, preferredAccessibility) @@ -1353,7 +1374,7 @@ func (s *service) deleteFilesystem(ctx context.Context, volID string, unity *gou var snapErr error filesystemResp, err := fileAPI.FindFilesystemByID(ctx, volID) if err == nil { - //Validate if filesystem has any NFS or SMB shares or snapshots attached + // Validate if filesystem has any NFS or SMB shares or snapshots attached if len(filesystemResp.FileContent.NFSShare) > 0 || len(filesystemResp.FileContent.CIFSShare) > 0 { return nil, nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Filesystem %s can not be deleted as it has associated NFS or SMB shares.", volID)) } @@ -1370,12 +1391,12 @@ func (s *service) deleteFilesystem(ctx context.Context, volID string, unity *gou } err = fileAPI.DeleteFilesystem(ctx, volID) } else { - //Do not reuse err as it is used for idempotency check + // Do not reuse err as it is used for idempotency check snapshotAPI := gounity.NewSnapshot(unity) snapResp, fsSnapErr := snapshotAPI.FindSnapshotByID(ctx, volID) snapErr = fsSnapErr if fsSnapErr == nil { - //Validate if snapshot has any NFS or SMB shares + // Validate if snapshot has any NFS or SMB shares sourceVolID, err := fileAPI.GetFilesystemIDFromResID(ctx, snapResp.SnapshotContent.StorageResource.ID) if err != nil { return nil, nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Source storage resource: %s filesystem Id not found. Error: %v", snapResp.SnapshotContent.StorageResource.ID, err)) @@ -1397,9 +1418,8 @@ func (s *service) deleteFilesystem(ctx context.Context, volID string, unity *gou // deleteBlockVolume - Method to handle delete FC and iSCSI volumes func (s *service) deleteBlockVolume(ctx context.Context, volID string, unity *gounity.Client) (error, error) { - ctx, _, rid := GetRunidLog(ctx) - //Check stale snapshots used for volume cloning and delete if exist + // Check stale snapshots used for volume cloning and delete if exist snapshotAPI := gounity.NewSnapshot(unity) snapsResp, _, snapshotErr := snapshotAPI.ListSnapshots(ctx, 0, 0, volID, "") if snapshotErr != nil { @@ -1421,7 +1441,7 @@ func (s *service) deleteBlockVolume(ctx context.Context, volID string, unity *go if totalSnaps > 0 { return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Volume %s can not be deleted as it has associated snapshots.", volID)) } - //Delete the block volume + // Delete the block volume volumeAPI := gounity.NewVolume(unity) err := volumeAPI.DeleteVolume(ctx, volID) return err, nil @@ -1429,7 +1449,6 @@ func (s *service) deleteBlockVolume(ctx context.Context, volID string, unity *go // exportFilesystem - Method to export filesystem with idempotency func (s *service) exportFilesystem(ctx context.Context, volID, hostID, nodeID, arrayID string, unity *gounity.Client, pinfo map[string]string, am *csi.VolumeCapability_AccessMode) (*csi.ControllerPublishVolumeResponse, error) { - ctx, log, rid := GetRunidLog(ctx) pinfo["filesystem"] = volID fileAPI := gounity.NewFilesystem(unity) @@ -1450,7 +1469,7 @@ func (s *service) exportFilesystem(ctx context.Context, volID, hostID, nodeID, a return nil, err } } - //Create NFS Share if not already present on array + // Create NFS Share if not already present on array nfsShareName := NFSShareNamePrefix + filesystemResp.FileContent.Name if isSnapshot { nfsShareName = NFSShareNamePrefix + snapResp.SnapshotContent.Name @@ -1492,7 +1511,7 @@ func (s *service) exportFilesystem(ctx context.Context, volID, hostID, nodeID, a } } - //Allocate host access to NFS Share with appropriate access mode + // Allocate host access to NFS Share with appropriate access mode nfsShareResp, err := fileAPI.FindNFSShareByID(ctx, nfsShareID) if err != nil { return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Find NFS Share: %s failed. Error: %v", nfsShareID, err)) @@ -1554,7 +1573,7 @@ func (s *service) exportFilesystem(ctx context.Context, volID, hostID, nodeID, a if (am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER || am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER || am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER) && otherHostsWithAccess > 0 { return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Other hosts have access on NFS Share: %s", nfsShareID)) } - //Idempotent case + // Idempotent case if foundIdempotent { log.Info("Host has access to the given host and exists in the required state.") return &csi.ControllerPublishVolumeResponse{PublishContext: pinfo}, nil @@ -1580,12 +1599,10 @@ func (s *service) exportFilesystem(ctx context.Context, volID, hostID, nodeID, a log.Debugf("NFS Share: %s is accessible to host: %s with access mode: %s", nfsShareID, nodeID, am.Mode) log.Debugf("ControllerPublishVolume successful for volid: [%s]", pinfo["volumeContextId"]) return &csi.ControllerPublishVolumeResponse{PublishContext: pinfo}, nil - } // exportVolume - Method to export volume with idempotency func (s *service) exportVolume(ctx context.Context, protocol, volID, hostID, nodeID, arrayID string, unity *gounity.Client, pinfo map[string]string, host *types.Host, vc *csi.VolumeCapability) (*csi.ControllerPublishVolumeResponse, error) { - ctx, log, rid := GetRunidLog(ctx) pinfo["lun"] = volID am := vc.GetAccessMode() @@ -1605,7 +1622,7 @@ func (s *service) exportVolume(ctx context.Context, protocol, volID, hostID, nod content := vol.VolumeContent hostIDList := make([]string, 0) - //Idempotency check + // Idempotency check for _, hostaccess := range content.HostAccessResponse { hostcontent := hostaccess.HostContent hostAccessID := hostcontent.ID @@ -1615,11 +1632,11 @@ func (s *service) exportVolume(ctx context.Context, protocol, volID, hostID, nod } else if vc.GetMount() != nil && (am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER || am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER || am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER || am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY) { return nil, status.Error(codes.Aborted, utils.GetMessageWithRunID(rid, "Volume has been published to a different host already.")) } - //Gather list of hosts to which the volume is already published to + // Gather list of hosts to which the volume is already published to hostIDList = append(hostIDList, hostAccessID) } - //Append the curent hostID as well + // Append the curent hostID as well hostIDList = append(hostIDList, hostID) log.Debug("Adding host access to ", hostID, " on volume ", volID) @@ -1634,7 +1651,6 @@ func (s *service) exportVolume(ctx context.Context, protocol, volID, hostID, nod // unexportFilesystem - Method to handle unexport filesystem logic with idempotency func (s *service) unexportFilesystem(ctx context.Context, volID, hostID, nodeID, volumeContextID, arrayID string, unity *gounity.Client) error { - ctx, log, rid := GetRunidLog(ctx) fileAPI := gounity.NewFilesystem(unity) isSnapshot := false @@ -1658,7 +1674,7 @@ func (s *service) unexportFilesystem(ctx context.Context, volID, hostID, nodeID, return err } } - //Remove host access from NFS Share + // Remove host access from NFS Share nfsShareName := NFSShareNamePrefix + filesystem.FileContent.Name if isSnapshot { nfsShareName = NFSShareNamePrefix + snapResp.SnapshotContent.Name @@ -1671,7 +1687,7 @@ func (s *service) unexportFilesystem(ctx context.Context, volID, hostID, nodeID, if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.ID == volID { shareExists = true if nfsShare.Name != nfsShareName { - //This means that share was created manually on array, hence don't delete via driver + // This means that share was created manually on array, hence don't delete via driver deleteShare = false nfsShareName = nfsShare.Name } @@ -1681,7 +1697,7 @@ func (s *service) unexportFilesystem(ctx context.Context, volID, hostID, nodeID, if nfsShare.Path == NFSShareLocalPath && nfsShare.ParentSnap.ID == "" { shareExists = true if nfsShare.Name != nfsShareName { - //This means that share was created manually on array, hence don't delete via driver + // This means that share was created manually on array, hence don't delete via driver deleteShare = false nfsShareName = nfsShare.Name } @@ -1761,7 +1777,7 @@ func (s *service) unexportFilesystem(ctx context.Context, volID, hostID, nodeID, err = fileAPI.ModifyNFSShareHostAccess(ctx, volID, nfsShareID, readWriteHostIDList, gounity.ReadWriteRootAccessType) } } else { - //Idempotent case + // Idempotent case log.Infof("Host: %s has no access on NFS Share: %s", nodeID, nfsShareID) } if err != nil { @@ -1769,7 +1785,7 @@ func (s *service) unexportFilesystem(ctx context.Context, volID, hostID, nodeID, } log.Debugf("Host: %s access is removed from NFS Share: %s", nodeID, nfsShareID) - //Delete NFS Share + // Delete NFS Share if deleteShare { if otherHostsWithAccess > 0 { log.Infof("NFS Share: %s can not be deleted as other hosts have access on it.", nfsShareID) @@ -1825,8 +1841,8 @@ func (s *service) getMetricsCollection(ctx context.Context, arrayID string, id i } func (s *service) ControllerGetVolume(ctx context.Context, - req *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) { - + req *csi.ControllerGetVolumeRequest, +) (*csi.ControllerGetVolumeResponse, error) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing ControllerGetVolume with args: %+v", *req) @@ -1875,7 +1891,6 @@ func (s *service) ControllerGetVolume(ctx context.Context, fileAPI := gounity.NewFilesystem(unity) isSnapshot := false filesystem, err := fileAPI.FindFilesystemByID(ctx, volID) - if err != nil { var snapResp *types.Snapshot snapshotAPI := gounity.NewSnapshot(unity) diff --git a/service/controller_test.go b/service/controller_test.go index 4a6345e0..2dd931cf 100644 --- a/service/controller_test.go +++ b/service/controller_test.go @@ -26,7 +26,7 @@ import ( func TestControllerProbe(t *testing.T) { DriverConfig = testConf.unityConfig - //Dynamic update of config + // Dynamic update of config err := testConf.service.BeforeServe(context.Background(), nil, nil) if err != nil { t.Fatalf("TestBeforeServe failed with error %v", err) diff --git a/service/csi_extension_server.go b/service/csi_extension_server.go index 96b1107d..a061a5ee 100644 --- a/service/csi_extension_server.go +++ b/service/csi_extension_server.go @@ -49,23 +49,29 @@ var GetMetricsCollection = getMetricsCollection var CreateMetricsCollection = createMetricsCollection // MetricsCollectionInterval is used for interval to use in the creation of a Unity MetricsCollection -var MetricsCollectionInterval = 5 // seconds -// CollectionWait - Collection wait time -var CollectionWait = (MetricsCollectionInterval + 1) * 1000 +var ( + MetricsCollectionInterval = 5 // seconds + // CollectionWait - Collection wait time + CollectionWait = (MetricsCollectionInterval + 1) * 1000 +) -var metricsCollectionCache sync.Map -var currentIOCount = []string{ - "sp.*.storage.lun.*.currentIOCount", -} +var ( + metricsCollectionCache sync.Map + currentIOCount = []string{ + "sp.*.storage.lun.*.currentIOCount", + } +) var fileSystemRWs = []string{ "sp.*.storage.filesystem.*.clientReads", "sp.*.storage.filesystem.*.clientWrites", } -var cacheRWLock sync.RWMutex -var kickoffOnce sync.Once -var refreshCount atomic.Int32 -var refreshEnabled bool +var ( + cacheRWLock sync.RWMutex + kickoffOnce sync.Once + refreshCount atomic.Int32 + refreshEnabled bool +) // Constants that can be used across module const ( diff --git a/service/csi_extension_test.go b/service/csi_extension_test.go index 6bf098d9..8618de4a 100644 --- a/service/csi_extension_test.go +++ b/service/csi_extension_test.go @@ -221,7 +221,8 @@ func TestValidateVolumeHostConnectivityVolumeIds(t *testing.T) { Connected: true, Messages: []string{ "iSCSI Health is good for array:array1, Health:ALRT_COMPONENT_OK", - "sv_1 on array array1 has no IOs"}, + "sv_1 on array array1 has no IOs", + }, IosInProgress: false, }, }, @@ -247,7 +248,8 @@ func TestValidateVolumeHostConnectivityVolumeIds(t *testing.T) { Connected: true, Messages: []string{ "iSCSI Health is good for array:array1, Health:ALRT_COMPONENT_OK", - "sv_1 on array array1 has no IOs"}, + "sv_1 on array array1 has no IOs", + }, IosInProgress: false, }, }, @@ -270,7 +272,8 @@ func TestValidateVolumeHostConnectivityVolumeIds(t *testing.T) { VolumeIds: []string{ "vol1-iscsi-array1-sv_1", "vol1-iscsi-array2-sv_2", - "vol1-iscsi-array3-sv_3"}, + "vol1-iscsi-array3-sv_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -453,7 +456,6 @@ func TestValidateVolumeHostConnectivityFC(t *testing.T) { var testCases map[string]testCaseSpec testCases = map[string]testCaseSpec{ - "Default Array, Good FC HostConnections": { setup: func() { testConf.service.opts.AutoProbe = true @@ -577,7 +579,8 @@ func TestVolumeIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-iscsi-array1-sv_1", "vol1-iscsi-array2-sv_2", - "vol1-iscsi-array3-sv_3"}, + "vol1-iscsi-array3-sv_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -609,7 +612,8 @@ func TestVolumeIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-iscsi-array1-sv_1", "vol1-iscsi-array2-sv_2", - "vol1-iscsi-array3-sv_3"}, + "vol1-iscsi-array3-sv_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -648,7 +652,8 @@ func TestVolumeIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-iscsi-array1-sv_1", "vol1-iscsi-array2-sv_2", - "vol1-iscsi-array3-sv_3"}, + "vol1-iscsi-array3-sv_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -688,7 +693,8 @@ func TestVolumeIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-iscsi-array1-sv_1", "vol1-iscsi-array2-sv_2", - "vol1-iscsi-array3-sv_3"}, + "vol1-iscsi-array3-sv_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -756,7 +762,8 @@ func TestVolumeIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-iscsi-array1-sv_1", "vol1-iscsi-array2-sv_2", - "vol1-iscsi-array3-sv_3"}, + "vol1-iscsi-array3-sv_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -803,7 +810,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -835,7 +843,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -884,7 +893,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -962,7 +972,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -1044,7 +1055,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -1102,7 +1114,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -1164,7 +1177,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -1227,7 +1241,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -1303,7 +1318,8 @@ func TestFileSystemIOCheck(t *testing.T) { VolumeIds: []string{ "vol1-nfs-array1-fs_1", "vol1-nfs-array2-fs_2", - "vol1-nfs-array3-fs_3"}, + "vol1-nfs-array3-fs_3", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -1385,7 +1401,8 @@ func TestFileSystemIOCheck(t *testing.T) { // --- VolumeIds: []string{ "vol1-nfs-array1-fs_1", - "vol1-nfs-array2-fs_2"}, + "vol1-nfs-array2-fs_2", + }, }, expectedResponse: &podmon.ValidateVolumeHostConnectivityResponse{ Connected: true, @@ -1644,20 +1661,22 @@ func runTestCases(ctx context.Context, t *testing.T, testCases map[string]testCa } } -var mockGetArrayFromVolumeFail string -var mockGetArrayIDErr error -var mockRequireProbeErr error -var mockGetHost *types.Host -var mockGetHostErr error -var mockGetUnity *gounity.Client -var mockGetUnityErr error -var mockFindHostInitiatorErr error -var mockBadInitiator string -var mockFindInitatorFail string -var mockGetMetricsCollectionError error -var mockCreateMetricsCollectionError error -var mockMetricsCollectionID int -var mockMetricValueMap map[string]interface{} +var ( + mockGetArrayFromVolumeFail string + mockGetArrayIDErr error + mockRequireProbeErr error + mockGetHost *types.Host + mockGetHostErr error + mockGetUnity *gounity.Client + mockGetUnityErr error + mockFindHostInitiatorErr error + mockBadInitiator string + mockFindInitatorFail string + mockGetMetricsCollectionError error + mockCreateMetricsCollectionError error + mockMetricsCollectionID int + mockMetricValueMap map[string]interface{} +) func defaultMocks() { GetHostID = mockGetHostID diff --git a/service/envvars.go b/service/envvars.go index 168cc3d2..1f7e2908 100644 --- a/service/envvars.go +++ b/service/envvars.go @@ -29,11 +29,11 @@ const ( // violation of the CSI spec EnvAutoProbe = "X_CSI_UNITY_AUTOPROBE" - //EnvPvtMountDir is required to Node Unstage volume where the volume has been mounted - //as a global mount via CSI-Unity v1.0 or v1.1 + // EnvPvtMountDir is required to Node Unstage volume where the volume has been mounted + // as a global mount via CSI-Unity v1.0 or v1.1 EnvPvtMountDir = "X_CSI_PRIVATE_MOUNT_DIR" - //EnvEphemeralStagingPath - Ephemeral staging path + // EnvEphemeralStagingPath - Ephemeral staging path EnvEphemeralStagingPath = "X_CSI_EPHEMERAL_STAGING_PATH" // EnvISCSIChroot is the path to which the driver will chroot before @@ -44,14 +44,14 @@ const ( // EnvKubeConfigPath indicates kubernetes configuration that has to be used by CSI Driver EnvKubeConfigPath = "KUBECONFIG" - //SyncNodeInfoTimeInterval - Time interval to add node info to array. Default 60 minutes. - //X_CSI_UNITY_SYNC_NODEINFO_INTERVAL has been deprecated and will be removes in a future release + // SyncNodeInfoTimeInterval - Time interval to add node info to array. Default 60 minutes. + // X_CSI_UNITY_SYNC_NODEINFO_INTERVAL has been deprecated and will be removes in a future release SyncNodeInfoTimeInterval = "X_CSI_UNITY_SYNC_NODEINFO_INTERVAL" // EnvAllowRWOMultiPodAccess - Environment variable to configure sharing of a single volume across multiple pods within the same node // Multi-node access is still not allowed for ReadWriteOnce Mount volumes. // Enabling this option techincally violates the CSI 1.3 spec in the NodePublishVolume stating the required error returns. - //X_CSI_UNITY_ALLOW_MULTI_POD_ACCESS has been deprecated and will be removes in a future release + // X_CSI_UNITY_ALLOW_MULTI_POD_ACCESS has been deprecated and will be removes in a future release EnvAllowRWOMultiPodAccess = "X_CSI_UNITY_ALLOW_MULTI_POD_ACCESS" // EnvIsVolumeHealthMonitorEnabled - Environment variable to enable/disable health monitor of CSI volumes diff --git a/service/identity.go b/service/identity.go index c3c59a63..589f0097 100644 --- a/service/identity.go +++ b/service/identity.go @@ -25,7 +25,8 @@ import ( func (s *service) Probe( ctx context.Context, req *csi.ProbeRequest) ( - *csi.ProbeResponse, error) { + *csi.ProbeResponse, error, +) { ctx, log, _ := GetRunidLog(ctx) log.Infof("Executing Probe with args: %+v", *req) if strings.EqualFold(s.mode, "controller") { @@ -47,8 +48,8 @@ func (s *service) Probe( func (s *service) GetPluginInfo( ctx context.Context, req *csi.GetPluginInfoRequest) ( - *csi.GetPluginInfoResponse, error) { - + *csi.GetPluginInfoResponse, error, +) { return &csi.GetPluginInfoResponse{ Name: Name, VendorVersion: core.SemVer, @@ -59,7 +60,8 @@ func (s *service) GetPluginInfo( func (s *service) GetPluginCapabilities( ctx context.Context, req *csi.GetPluginCapabilitiesRequest) ( - *csi.GetPluginCapabilitiesResponse, error) { + *csi.GetPluginCapabilitiesResponse, error, +) { ctx, log, _ := GetRunidLog(ctx) log.Infof("Executing GetPluginCapabilities with args: %+v", *req) return &csi.GetPluginCapabilitiesResponse{ diff --git a/service/mount.go b/service/mount.go index f55a143f..7eb41692 100644 --- a/service/mount.go +++ b/service/mount.go @@ -66,7 +66,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor if len(mnts) != 0 { for _, m := range mnts { - //Idempotency check + // Idempotency check for _, exportPathURL := range exportPaths { if m.Device == exportPathURL { if m.Path == stagingTargetPath { @@ -76,7 +76,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor } return status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Staging target path: %s is already mounted to export path: %s with conflicting access modes", stagingTargetPath, exportPathURL)) } - //It is possible that a different export path URL is used to mount stage target path + // It is possible that a different export path URL is used to mount stage target path continue } } @@ -85,7 +85,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor log.Debugf("Stage - Mount flags for NFS: %s", mntFlags) - var mountOverride = false + mountOverride := false for _, flag := range mntFlags { if strings.Contains(flag, "vers") { mountOverride = true @@ -93,7 +93,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor } } - //if nfsv4 specified or mount options is provided, mount as is + // if nfsv4 specified or mount options is provided, mount as is if nfsv4 || mountOverride { nfsv4 = false for _, exportPathURL := range exportPaths { @@ -105,7 +105,7 @@ func stagePublishNFS(ctx context.Context, req *csi.NodeStageVolumeRequest, expor } } - //Try remount as nfsv3 only if NAS server supports nfs v3 + // Try remount as nfsv3 only if NAS server supports nfs v3 if nfsv3 && !nfsv4 && !mountOverride { mntFlags = append(mntFlags, "vers=3") for _, exportPathURL := range exportPaths { @@ -152,7 +152,7 @@ func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPa mntVol := volCap.GetMount() mntFlags := mntVol.GetMountFlags() rwoArray = append(rwoArray, mntFlags...) - //Check if stage target mount exists + // Check if stage target mount exists var stageExportPathURL string stageMountExists := false for _, exportPathURL := range exportPaths { @@ -178,7 +178,7 @@ func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPa if len(mnts) != 0 { for _, m := range mnts { - //Idempotency check + // Idempotency check if m.Device == stageExportPathURL { if m.Path == targetPath { if utils.ArrayContains(m.Opts, rwo) { @@ -192,7 +192,7 @@ func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPa if accMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER || (accMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER && !allowRWOmultiPodAccess) { return status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Export path: %s is already mounted to different target path: %s", stageExportPathURL, m.Path)) } - //For multi-node access modes and when allowRWOmultiPodAccess is true for single-node access, target mount will be executed + // For multi-node access modes and when allowRWOmultiPodAccess is true for single-node access, target mount will be executed continue } } @@ -201,7 +201,7 @@ func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPa log.Debugf("Publish - Mount flags for NFS: %s", mntFlags) - var mountOverride = false + mountOverride := false for _, flag := range mntFlags { if strings.Contains(flag, "vers") { mountOverride = true @@ -209,8 +209,8 @@ func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, exportPa } } - //if nfsv4 specified or mount options is provided, mount as is - //Proceeding to perform bind mount to target path + // if nfsv4 specified or mount options is provided, mount as is + // Proceeding to perform bind mount to target path if nfsv4 || mountOverride { nfsv4 = false err = gofsutil.BindMount(ctx, stagingTargetPath, targetPath, rwoArray...) @@ -237,7 +237,7 @@ func stageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest, stagingPa volCap := req.GetVolumeCapability() id := req.GetVolumeId() mntVol := volCap.GetMount() - ro := false //since only SINGLE_NODE_WRITER is supported + ro := false // since only SINGLE_NODE_WRITER is supported // make sure device is valid sysDevice, err := GetDevice(ctx, symlinkPath) @@ -327,7 +327,7 @@ func stageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest, stagingPa return status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "access mode conflicts with existing mounts")) } } - //It is ok if the device is mounted elsewhere - could be targetPath. If not this will be caught during NodePublish + // It is ok if the device is mounted elsewhere - could be targetPath. If not this will be caught during NodePublish } if !mounted { return status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "device already in use and mounted elsewhere. Cannot do private mount")) @@ -338,7 +338,6 @@ func stageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest, stagingPa } func publishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, targetPath, symlinkPath, chroot string, allowRWOmultiPodAccess bool) error { - rid, log := utils.GetRunidAndLogger(ctx) stagingPath := req.GetStagingTargetPath() id := req.GetVolumeId() @@ -379,7 +378,7 @@ func publishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, targe } else if m.Path == stagingPath || m.Path == chroot+stagingPath { continue } else if accMode.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER || (accMode.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER && !allowRWOmultiPodAccess) { - //Device has been mounted aleady to another target + // Device has been mounted aleady to another target return status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "device already in use and mounted elsewhere")) } } @@ -393,7 +392,7 @@ func publishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, targe if len(pathMnts) > 0 { for _, m := range pathMnts { if !(m.Source == sysDevice.FullPath || m.Device == sysDevice.FullPath) { - //target is mounted by some other device + // target is mounted by some other device return status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Target is mounted using a different device %s", m.Device)) } } @@ -458,7 +457,7 @@ func unpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) e return nil } - //It is alright if the device is mounted elsewhere - could be staging mount + // It is alright if the device is mounted elsewhere - could be staging mount } devicePath := targetMount.Device @@ -475,7 +474,7 @@ func unpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) e return nil } - //Get existing mounts + // Get existing mounts mnts, err := gofsutil.GetMounts(ctx) if err != nil { return status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "could not reliably determine existing mount status: %s", err.Error())) @@ -489,7 +488,7 @@ func unpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) e break } } else if (m.Source == ubuntuNodeRoot+sysDevice.RealDev || m.Source == sysDevice.RealDev) && m.Device == "udev" { - //For Ubuntu mounts + // For Ubuntu mounts if m.Path == target { tgtMnt = true break @@ -532,7 +531,7 @@ func unstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest, devic stageMount, err := getTargetMount(ctx, stagingTarget) - //Make sure volume is not mounted elsewhere + // Make sure volume is not mounted elsewhere devMnts := make([]gofsutil.Info, 0) symlinkPath, devicePath, err := gofsutil.WWNToDevicePathX(ctx, deviceWWN) @@ -567,7 +566,7 @@ func unstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest, devic return true, devicePath, nil } - //Get existing mounts + // Get existing mounts mnts, err := gofsutil.GetMounts(ctx) if err != nil { return lastUnmounted, "", status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "could not reliably determine existing mount status: %s", err.Error())) @@ -611,7 +610,7 @@ func unpublishNFS(ctx context.Context, targetPath, arrayID string, exportPaths [ ctx, log, rid := GetRunidLog(ctx) ctx, log = setArrayIDContext(ctx, arrayID) - //Get existing mounts + // Get existing mounts mnts, err := gofsutil.GetMounts(ctx) if err != nil { return status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "could not reliably determine existing mount status: %v", err)) @@ -628,7 +627,7 @@ func unpublishNFS(ctx context.Context, targetPath, arrayID string, exportPaths [ } } if !mountExists { - //Path is mounted but with some other NFS Share + // Path is mounted but with some other NFS Share return status.Error(codes.Unknown, utils.GetMessageWithRunID(rid, "Path: %s mounted by different NFS Share with export path: %s", targetPath, m.Device)) } } @@ -637,7 +636,7 @@ func unpublishNFS(ctx context.Context, targetPath, arrayID string, exportPaths [ } } if !mountExists { - //Idempotent case + // Idempotent case log.Debugf("Path: %s not mounted", targetPath) return nil } @@ -662,7 +661,7 @@ func getDevMounts(ctx context.Context, sysDevice *Device) ([]gofsutil.Info, erro } else if (m.Source == ubuntuNodeRoot+sysDevice.RealDev || m.Source == sysDevice.RealDev) && m.Device == "udev" { devMnts = append(devMnts, m) } else { - //Find the multipath device mapper from the device obtained + // Find the multipath device mapper from the device obtained mpDevName := strings.TrimPrefix(sysDevice.RealDev, "/dev/") filename := fmt.Sprintf("/sys/devices/virtual/block/%s/dm/name", mpDevName) if name, err := os.ReadFile(filepath.Clean(filename)); err != nil { @@ -708,7 +707,7 @@ func createDirIfNotExist(ctx context.Context, path, arrayID string) error { tgtStat, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { - //Create target directory if it doesnt exist + // Create target directory if it doesnt exist _, err := mkdir(ctx, path) if err != nil { return status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Could not create path: %s. Error: %v", path, err)) @@ -731,7 +730,7 @@ func mkdir(ctx context.Context, path string) (bool, error) { log := utils.GetRunidLogger(ctx) st, err := os.Stat(path) if os.IsNotExist(err) { - if err := os.MkdirAll(path, 0750); err != nil { + if err := os.MkdirAll(path, 0o750); err != nil { log.WithField("dir", path).WithError(err).Error("Unable to create dir") return false, err } @@ -750,7 +749,7 @@ func mkfile(ctx context.Context, path string) (bool, error) { log := utils.GetRunidLogger(ctx) st, err := os.Stat(path) if os.IsNotExist(err) { - file, err := os.OpenFile(filepath.Clean(path), os.O_CREATE, 0600) + file, err := os.OpenFile(filepath.Clean(path), os.O_CREATE, 0o600) if err != nil { log.WithField("path", path).WithError( err).Error("Unable to create file") @@ -811,7 +810,8 @@ func GetDevice(ctx context.Context, path string) (*Device, error) { func unmountStagingMount( ctx context.Context, dev *Device, - target, chroot string) (bool, error) { + target, chroot string, +) (bool, error) { log := utils.GetRunidLogger(ctx) lastUnmounted := false diff --git a/service/node.go b/service/node.go index 5c98b533..cdb7dde8 100644 --- a/service/node.go +++ b/service/node.go @@ -63,7 +63,8 @@ const ( func (s *service) NodeStageVolume( ctx context.Context, req *csi.NodeStageVolumeRequest) ( - *csi.NodeStageVolumeResponse, error) { + *csi.NodeStageVolumeResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing NodeStageVolume with args: %+v", *req) volID, protocol, arrayID, unity, err := s.validateAndGetResourceDetails(ctx, req.GetVolumeId(), volumeType) @@ -100,7 +101,7 @@ func (s *service) NodeStageVolume( log.Debugf("Protocol is: %s", protocol) if protocol == NFS { - //Perform stage mount for NFS + // Perform stage mount for NFS nfsShare, nfsv3, nfsv4, err := s.getNFSShare(ctx, volID, arrayID) if err != nil { return nil, err @@ -123,7 +124,7 @@ func (s *service) NodeStageVolume( log.Debugf("Node Stage completed successfully: filesystem: %s is mounted on staging target path: %s", volID, stagingPath) return &csi.NodeStageVolumeResponse{}, nil } - //Protocol if FC or iSCSI + // Protocol if FC or iSCSI volumeAPI := gounity.NewVolume(unity) volume, err := volumeAPI.FindVolumeByID(ctx, volID) @@ -132,7 +133,7 @@ func (s *service) NodeStageVolume( return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(rid, "Volume not found. [%v]", err)) } - //Check if the volume is given access to the node + // Check if the volume is given access to the node hlu, err := s.checkVolumeMapping(ctx, volume, arrayID) if err != nil { return nil, err @@ -206,7 +207,7 @@ func (s *service) NodeStageVolume( return nil, err } - //Skip staging for Block devices + // Skip staging for Block devices if !isBlock { err = stageVolume(ctx, req, stagingPath, devicePath) if err != nil { @@ -221,7 +222,8 @@ func (s *service) NodeStageVolume( func (s *service) NodeUnstageVolume( ctx context.Context, req *csi.NodeUnstageVolumeRequest) ( - *csi.NodeUnstageVolumeResponse, error) { + *csi.NodeUnstageVolumeResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing NodeUnstageVolume with args: %+v", *req) @@ -265,7 +267,7 @@ func (s *service) NodeUnstageVolume( log.Debugf("Node Unstage completed successfully. No mounts on staging target path: %s", req.GetStagingTargetPath()) return &csi.NodeUnstageVolumeResponse{}, nil } else if protocol == ProtocolUnknown { - //Volume is mounted via CSI-Unity v1.0 or v1.1 and hence different staging target path was used + // Volume is mounted via CSI-Unity v1.0 or v1.1 and hence different staging target path was used stageTgt = path.Join(s.opts.PvtMountDir, volID) host, err := s.getHostID(ctx, arrayID, s.opts.NodeName, s.opts.LongNodeName) @@ -274,7 +276,7 @@ func (s *service) NodeUnstageVolume( } if len(host.HostContent.FcInitiators) == 0 { - //FC gets precedence if host has both initiators - which is not supported by the driver + // FC gets precedence if host has both initiators - which is not supported by the driver protocol = FC } else if len(host.HostContent.IscsiInitiators) == 0 { protocol = ISCSI @@ -349,7 +351,8 @@ func (s *service) NodeUnstageVolume( func (s *service) NodePublishVolume( ctx context.Context, req *csi.NodePublishVolumeRequest) ( - *csi.NodePublishVolumeResponse, error) { + *csi.NodePublishVolumeResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing NodePublishVolume with args: %+v", *req) @@ -403,7 +406,7 @@ func (s *service) NodePublishVolume( } if protocol == NFS { - //Perform target mount for NFS + // Perform target mount for NFS nfsShare, nfsv3, nfsv4, err := s.getNFSShare(ctx, volID, arrayID) if err != nil { return nil, err @@ -420,7 +423,7 @@ func (s *service) NodePublishVolume( return &csi.NodePublishVolumeResponse{}, nil } - //Protocol FC or iSCSI + // Protocol FC or iSCSI isBlock := accTypeBlock(volCap) @@ -451,10 +454,11 @@ func (s *service) NodePublishVolume( func (s *service) ephemeralNodePublishVolume( ctx context.Context, req *csi.NodePublishVolumeRequest) ( - *csi.NodePublishVolumeResponse, error) { + *csi.NodePublishVolumeResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) - //Create Ephemeral Volume + // Create Ephemeral Volume volName := req.VolumeId if len(volName) > VolumeNameLengthConstraint { volName = volName[0 : VolumeNameLengthConstraint-1] @@ -477,13 +481,13 @@ func (s *service) ephemeralNodePublishVolume( } log.Debugf("Ephemeral Volume %s created successfully", volName) - //Create NodeUnpublishRequest for rollback scenario + // Create NodeUnpublishRequest for rollback scenario nodeUnpublishRequest := &csi.NodeUnpublishVolumeRequest{ VolumeId: createVolResp.Volume.VolumeId, TargetPath: req.TargetPath, } - //ControllerPublishVolume to current node + // ControllerPublishVolume to current node controllerPublishResp, err := s.ControllerPublishVolume(ctx, &csi.ControllerPublishVolumeRequest{ VolumeId: createVolResp.Volume.VolumeId, NodeId: s.opts.NodeName + "," + s.opts.LongNodeName, @@ -493,7 +497,7 @@ func (s *service) ephemeralNodePublishVolume( VolumeContext: createVolResp.Volume.VolumeContext, }) if err != nil { - //Call Ephemeral Node Unpublish for recovery + // Call Ephemeral Node Unpublish for recovery _, _ = s.ephemeralNodeUnpublish(ctx, nodeUnpublishRequest, req.VolumeId) return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Ephemeral Controller Publish Volume failed with error: %v", err)) } @@ -501,7 +505,7 @@ func (s *service) ephemeralNodePublishVolume( stagingMountPath := path.Join(s.opts.EnvEphemeralStagingTargetPath, req.VolumeId) - //Node Stage for Ephemeral Volume + // Node Stage for Ephemeral Volume _, err = s.NodeStageVolume(ctx, &csi.NodeStageVolumeRequest{ VolumeId: createVolResp.Volume.VolumeId, PublishContext: controllerPublishResp.PublishContext, @@ -511,13 +515,13 @@ func (s *service) ephemeralNodePublishVolume( VolumeContext: createVolResp.Volume.VolumeContext, }) if err != nil { - //Call Ephemeral Node Unpublish for recovery + // Call Ephemeral Node Unpublish for recovery _, _ = s.ephemeralNodeUnpublish(ctx, nodeUnpublishRequest, req.VolumeId) return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(rid, "Ephemeral Node Stage Volume failed with error: %v", err)) } log.Debug("Ephemeral Node Stage Successful") - //Node Publish for Ephemeral Volume + // Node Publish for Ephemeral Volume _, err = s.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{ VolumeId: createVolResp.Volume.VolumeId, PublishContext: controllerPublishResp.PublishContext, @@ -529,7 +533,7 @@ func (s *service) ephemeralNodePublishVolume( VolumeContext: createVolResp.Volume.VolumeContext, }) if err != nil { - //Call Ephemeral Node Unpublish for recovery + // Call Ephemeral Node Unpublish for recovery _, _ = s.ephemeralNodeUnpublish(ctx, nodeUnpublishRequest, req.VolumeId) return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Ephemeral Node Publish Volume failed with error: %v", err)) } @@ -537,14 +541,14 @@ func (s *service) ephemeralNodePublishVolume( f, err := os.Create(filepath.Clean(path.Join(stagingMountPath, "id"))) if err != nil { - //Call Ephemeral Node Unpublish for recovery + // Call Ephemeral Node Unpublish for recovery _, _ = s.ephemeralNodeUnpublish(ctx, nodeUnpublishRequest, req.VolumeId) return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Creation of file failed with error: %v", err)) } defer f.Close() _, err2 := f.WriteString(createVolResp.Volume.VolumeId) if err2 != nil { - //Call Ephemeral Node Unpublish for recovery + // Call Ephemeral Node Unpublish for recovery _, _ = s.ephemeralNodeUnpublish(ctx, nodeUnpublishRequest, req.VolumeId) return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Save Volume Id in file failed with error: %v", err)) } @@ -557,8 +561,8 @@ func (s *service) ephemeralNodePublishVolume( func (s *service) NodeUnpublishVolume( ctx context.Context, req *csi.NodeUnpublishVolumeRequest) ( - *csi.NodeUnpublishVolumeResponse, error) { - + *csi.NodeUnpublishVolumeResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing NodeUnpublishVolume with args: %+v", *req) @@ -649,10 +653,11 @@ func (s *service) NodeUnpublishVolume( func (s *service) ephemeralNodeUnpublish( ctx context.Context, req *csi.NodeUnpublishVolumeRequest, volName string) ( - *csi.NodeUnpublishVolumeResponse, error) { + *csi.NodeUnpublishVolumeResponse, error, +) { ctx, _, rid := GetRunidLog(ctx) - //Node Unpublish for Ephemeral Volume + // Node Unpublish for Ephemeral Volume _, err := s.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{ VolumeId: req.VolumeId, TargetPath: req.TargetPath, @@ -661,7 +666,7 @@ func (s *service) ephemeralNodeUnpublish( return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Node Unpublish for ephemeral volume failed with error: %v", err)) } - //Node Unstage for Ephemeral Volume + // Node Unstage for Ephemeral Volume _, err = s.NodeUnstageVolume(ctx, &csi.NodeUnstageVolumeRequest{ VolumeId: req.VolumeId, StagingTargetPath: path.Join(s.opts.EnvEphemeralStagingTargetPath, volName, "globalmount"), @@ -670,7 +675,7 @@ func (s *service) ephemeralNodeUnpublish( return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Node Unstage for ephemeral volume failed with error: %v", err)) } - //Controller Unpublish for Ephemeral Volume + // Controller Unpublish for Ephemeral Volume _, err = s.ControllerUnpublishVolume(ctx, &csi.ControllerUnpublishVolumeRequest{ VolumeId: req.VolumeId, NodeId: s.opts.NodeName + "," + s.opts.LongNodeName, @@ -679,7 +684,7 @@ func (s *service) ephemeralNodeUnpublish( return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Controller Unpublish for ephemeral volume failed with error: %v", err)) } - //Delete Volume for Ephemeral Volume + // Delete Volume for Ephemeral Volume _, err = s.DeleteVolume(ctx, &csi.DeleteVolumeRequest{ VolumeId: req.VolumeId, }) @@ -697,13 +702,13 @@ func (s *service) ephemeralNodeUnpublish( func (s *service) NodeGetInfo( ctx context.Context, req *csi.NodeGetInfoRequest) ( - *csi.NodeGetInfoResponse, error) { - + *csi.NodeGetInfoResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing NodeGetInfo with args: %+v", *req) atleastOneArraySuccess := false - //Sleep for a while and wait untill iscsi discovery is completed + // Sleep for a while and wait untill iscsi discovery is completed time.Sleep(nodeStartTimeout) arraysList := s.getStorageArrayList() @@ -757,7 +762,8 @@ func (s *service) NodeGetInfo( func (s *service) NodeGetCapabilities( ctx context.Context, req *csi.NodeGetCapabilitiesRequest) ( - *csi.NodeGetCapabilitiesResponse, error) { + *csi.NodeGetCapabilitiesResponse, error, +) { ctx, log, _ := GetRunidLog(ctx) log.Infof("Executing NodeGetCapabilities with args: %+v", *req) capabilities := []*csi.NodeServiceCapability{ @@ -817,8 +823,8 @@ func (s *service) NodeGetCapabilities( func (s *service) NodeGetVolumeStats( ctx context.Context, req *csi.NodeGetVolumeStatsRequest) ( - *csi.NodeGetVolumeStatsResponse, error) { - + *csi.NodeGetVolumeStatsResponse, error, +) { ctx, log, rid := GetRunidLog(ctx) log.Debugf("Executing NodeGetVolumeStats with args: %+v", *req) @@ -970,10 +976,10 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum volName := volume.VolumeContent.Name - //Locate and fetch all (multipath/regular) mounted paths using this volume + // Locate and fetch all (multipath/regular) mounted paths using this volume devMnt, err := gofsutil.GetMountInfoFromDevice(ctx, volName) if err != nil { - //No mounts found - Could be raw block device + // No mounts found - Could be raw block device volWwn := utils.GetWwnFromVolumeContentWwn(volume.VolumeContent.Wwn) deviceNames, _ := gofsutil.GetSysBlockDevicesForVolumeWWN(context.Background(), volWwn) if len(deviceNames) > 0 { @@ -1027,8 +1033,8 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum utils.GetMessageWithRunID(rid, "Failed to resize filesystem: device (%s) with error %v", devMnt.MountPoint, err)) } } - //For a regular device, get the device path (devMnt.DeviceNames[1]) where the filesystem is mounted - //PublishVolume creates devMnt.DeviceNames[0] but is left unused for regular devices + // For a regular device, get the device path (devMnt.DeviceNames[1]) where the filesystem is mounted + // PublishVolume creates devMnt.DeviceNames[0] but is left unused for regular devices var devicePath string if len(devMnt.DeviceNames) > 1 { devicePath = "/dev/" + devMnt.DeviceNames[1] @@ -1047,7 +1053,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum log.Infof("Found %s filesystem mounted on volume %s", fsType, devMnt.MountPoint) - //Resize the filesystem + // Resize the filesystem err = gofsutil.ResizeFS(ctx, devMnt.MountPoint, devicePath, devMnt.PPathName, devMnt.MPathName, fsType) if err != nil { return nil, status.Error(codes.Internal, @@ -1304,7 +1310,7 @@ func (s *service) iScsiDiscoverFetchTargets(ctx context.Context, interfaceIps [] iscsiTargets = append(iscsiTargets, tgt) } } - //All targets are obtained with one valid IP + // All targets are obtained with one valid IP break } return iscsiTargets @@ -1372,7 +1378,8 @@ func (s *service) connectDevice(ctx context.Context, data publishContextData, us } func (s *service) connectISCSIDevice(ctx context.Context, - lun int, data publishContextData) (gobrick.Device, error) { + lun int, data publishContextData, +) (gobrick.Device, error) { var targets []gobrick.ISCSITargetInfo for _, t := range data.iscsiTargets { targets = append(targets, gobrick.ISCSITargetInfo{Target: t.Target, Portal: t.Portal}) @@ -1388,7 +1395,8 @@ func (s *service) connectISCSIDevice(ctx context.Context, } func (s *service) connectFCDevice(ctx context.Context, - lun int, data publishContextData) (gobrick.Device, error) { + lun int, data publishContextData, +) (gobrick.Device, error) { var targets []gobrick.FCTargetInfo for _, wwn := range data.fcTargets { targets = append(targets, gobrick.FCTargetInfo{WWPN: wwn}) @@ -1520,7 +1528,7 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag } fqdnHost := false - //Find Host on the Array + // Find Host on the Array host, err := hostAPI.FindHostByName(ctx, s.opts.NodeName) if err != nil { if err == gounity.ErrorHostNotFound { @@ -1570,7 +1578,7 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag } } if addNewInitiators { - //Modify host operation + // Modify host operation for _, wwn := range wwns { log.Debugf("Adding wwn Initiator: %s to host: %s ", hostContent.ID, wwn) _, err = hostAPI.CreateHostInitiator(ctx, hostContent.ID, wwn, gounityapi.FCInitiatorType) @@ -1586,7 +1594,7 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag } } } - //Check Ip of the host with Host IP Port + // Check Ip of the host with Host IP Port findHostNamePort := false for _, ipPort := range hostContent.IPPorts { hostIPPort, err := hostAPI.FindHostIPPortByID(ctx, ipPort.ID) @@ -1607,9 +1615,9 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag } } } - var ipFormat = regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) + ipFormat := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) if findHostNamePort == false { - //Create Host Ip Port + // Create Host Ip Port _, err = hostAPI.CreateHostIPPort(ctx, hostContent.ID, s.opts.LongNodeName) if err != nil { return err @@ -1636,7 +1644,7 @@ func (s *service) addNodeInformationIntoArray(ctx context.Context, array *Storag interfaceIps := utils.GetIPsFromInferfaces(ctx, ipInterfaces) - //Always discover and login during driver start up + // Always discover and login during driver start up s.iScsiDiscoverAndLogin(ctx, interfaceIps) } array.IsHostAdded = true @@ -1652,7 +1660,7 @@ func (s *service) checkHostIdempotency(ctx context.Context, array *StorageArrayC return false, false, status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Error while finding initiators for host %s on the array: %s error: %v", hostContent.ID, array, err)) } - //Check if all elements of wwns is present inside arrayHostWwns + // Check if all elements of wwns is present inside arrayHostWwns if utils.ArrayContainsAll(append(wwns, iqns...), arrayHostWwns) && len(append(wwns, iqns...)) == len(arrayHostWwns) { log.Info("Node initiators are synchronized with the Host Wwns on the array") return false, true, nil @@ -1676,7 +1684,7 @@ func (s *service) addNewNodeToArray(ctx context.Context, array *StorageArrayConf tenantName := s.opts.TenantName var tenantID string - //Create Host + // Create Host hostAPI := gounity.NewHost(unity) // get tenantid from tenant name @@ -1697,7 +1705,6 @@ func (s *service) addNewNodeToArray(ctx context.Context, array *StorageArrayConf var hostContent types.HostContent if tenantName != "" && tenantID == "" { return status.Error(codes.Internal, utils.GetMessageWithRunID(rid, "Please enter Valid tenant Name : %s", tenantName)) - } host, err := hostAPI.CreateHost(ctx, s.opts.LongNodeName, tenantID) if err != nil { @@ -1706,12 +1713,12 @@ func (s *service) addNewNodeToArray(ctx context.Context, array *StorageArrayConf hostContent = host.HostContent log.Debugf("New Host Id: %s", hostContent.ID) - //Create Host Ip Port + // Create Host Ip Port _, err = hostAPI.CreateHostIPPort(ctx, hostContent.ID, s.opts.LongNodeName) if err != nil { return err } - var ipFormat = regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) + ipFormat := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) for _, nodeIP := range nodeIps { _, err = hostAPI.CreateHostIPPort(ctx, hostContent.ID, nodeIP) if err != nil && !ipFormat.MatchString(s.opts.NodeName) { @@ -1720,7 +1727,7 @@ func (s *service) addNewNodeToArray(ctx context.Context, array *StorageArrayConf } if len(wwns) > 0 { - //Create Host FC Initiators + // Create Host FC Initiators log.Debugf("FC Initiators found: %s", wwns) for _, wwn := range wwns { log.Debugf("Adding wwn Initiator: %s to host: %s ", hostContent.ID, wwn) @@ -1731,7 +1738,7 @@ func (s *service) addNewNodeToArray(ctx context.Context, array *StorageArrayConf } } if len(iqns) > 0 { - //Create Host iSCSI Initiators + // Create Host iSCSI Initiators log.Debugf("iSCSI Initiators found: %s", iqns) for _, iqn := range iqns { log.Debugf("Adding iSCSI Initiator: %s to host: %s ", hostContent.ID, iqn) @@ -1755,7 +1762,7 @@ func (s *service) syncNodeInfoRoutine(ctx context.Context) { ctx, log = incrementLogID(ctx, "node") case <-time.After(time.Duration(s.opts.SyncNodeInfoTimeInterval) * time.Minute): log.Debug("Checking if host information is added to array") - var allHostsAdded = true + allHostsAdded := true s.arrays.Range(func(key, value interface{}) bool { array := value.(*StorageArrayConfig) if !array.IsHostAdded { @@ -1833,10 +1840,9 @@ func (s *service) validateProtocols(ctx context.Context, arraysList []*StorageAr if len(iscsiInitiators) != 0 || len(fcInitiators) != 0 { log.Info("iSCSI/FC package found in this node proceeding to further validations") - //To get all iSCSI initiators and FC Initiators + // To get all iSCSI initiators and FC Initiators ctx, _ = setArrayIDContext(ctx, array.ArrayID) unity, err := s.getUnityClient(ctx, array.ArrayID) - if err != nil { log.Infof("Unable to get unity client for topology validation: %v", err) } diff --git a/service/node_test.go b/service/node_test.go index 85ff38b5..aa1f452e 100644 --- a/service/node_test.go +++ b/service/node_test.go @@ -19,8 +19,8 @@ import ( ) func TestNodeGetInfo(t *testing.T) { - //fmt.Println(testConf.service.arrays) - //testConf.service.nodeProbe(testConf.ctx, "") - //testConf.service.discoverNodes(testConf.ctx, "1") - //time.Sleep(30 * time.Second) + // fmt.Println(testConf.service.arrays) + // testConf.service.nodeProbe(testConf.ctx, "") + // testConf.service.discoverNodes(testConf.ctx, "1") + // time.Sleep(30 * time.Second) } diff --git a/service/service.go b/service/service.go index 75567835..970914ec 100644 --- a/service/service.go +++ b/service/service.go @@ -137,7 +137,7 @@ type service struct { arrays *sync.Map mode string iscsiClient goiscsi.ISCSIinterface - fcConnector fcConnector //gobrick connectors + fcConnector fcConnector // gobrick connectors iscsiConnector iSCSIConnector } @@ -170,7 +170,8 @@ func (s StorageArrayConfig) String() string { // modify the SP's interceptors, server options, or prevent the // server from starting by returning a non-nil error. func (s *service) BeforeServe( - ctx context.Context, sp *gocsi.StoragePlugin, lis net.Listener) error { + ctx context.Context, sp *gocsi.StoragePlugin, lis net.Listener, +) error { ctx, log := setRunIDContext(ctx, "start") var err error defer func() { @@ -195,8 +196,8 @@ func (s *service) BeforeServe( log.Infof("%s: %s", EnvNodeName, name) opts.LongNodeName = name var shortHostName string - //check its ip or not - var ipFormat = regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) + // check its ip or not + ipFormat := regexp.MustCompile(`(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`) if ipFormat.MatchString(name) { shortHostName = name } else { @@ -249,7 +250,7 @@ func (s *service) BeforeServe( opts.IsVolumeHealthMonitorEnabled = false } - //Global mount directory will be used to node unstage volumes mounted via CSI-Unity v1.0 or v1.1 + // Global mount directory will be used to node unstage volumes mounted via CSI-Unity v1.0 or v1.1 if pvtmountDir, ok := csictx.LookupEnv(ctx, EnvPvtMountDir); ok { opts.PvtMountDir = pvtmountDir } @@ -267,7 +268,7 @@ func (s *service) BeforeServe( s.iscsiClient = goiscsi.NewLinuxISCSI(iscsiOpts) s.opts = opts - //Update the storage array list + // Update the storage array list runid := fmt.Sprintf("config-%d", 0) ctx, log = setRunIDContext(ctx, runid) s.arrays = new(sync.Map) @@ -276,12 +277,12 @@ func (s *service) BeforeServe( return err } syncNodeInfoChan = make(chan bool) - //Dynamically load the config + // Dynamically load the config go s.loadDynamicConfig(ctx, DriverSecret, DriverConfig) - //Add node information to hosts + // Add node information to hosts if s.mode == "node" { - //Get Host Name + // Get Host Name if s.opts.NodeName == "" { return status.Error(codes.InvalidArgument, "'Node Name' has not been configured. Set environment variable X_CSI_UNITY_NODENAME") } @@ -386,7 +387,7 @@ func (s *service) loadDynamicConfig(ctx context.Context, secretFile, configFile log.Info("Dynamic config load goroutine invoked") - //Dynamic update of config + // Dynamic update of config vc := viper.New() vc.AutomaticEnv() vc.SetConfigFile(configFile) @@ -403,7 +404,7 @@ func (s *service) loadDynamicConfig(ctx context.Context, secretFile, configFile s.syncDriverConfig(ctx, vc) }) - //Dynamic update of secret + // Dynamic update of secret watcher, _ := fsnotify.NewWatcher() defer watcher.Close() @@ -423,7 +424,7 @@ func (s *service) loadDynamicConfig(ctx context.Context, secretFile, configFile if err != nil { log.Debug("Driver configuration array length:", s.getStorageArrayLength()) log.Error("Invalid configuration in secret.yaml. Error:", err) - //return + // return } if s.mode == "node" { syncNodeInfoChan <- true @@ -576,16 +577,16 @@ func (s *service) syncDriverConfig(ctx context.Context, v *viper.Viper) { inputLogLevel := v.GetString(constants.ParamCSILogLevel) if inputLogLevel == "" { - //setting default log level to Info if input is invalid + // setting default log level to Info if input is invalid s.opts.LogLevel = "Info" } if s.opts.LogLevel != inputLogLevel { s.opts.LogLevel = inputLogLevel utils.ChangeLogLevel(s.opts.LogLevel) - //Change log level on gounity + // Change log level on gounity util.ChangeLogLevel(s.opts.LogLevel) - //Change log level on gocsi + // Change log level on gocsi // set X_CSI_LOG_LEVEL so that gocsi doesn't overwrite the loglevel set by us _ = os.Setenv(gocsi.EnvVarLogLevel, s.opts.LogLevel) log.Warnf("Log level changed to: %s", s.opts.LogLevel) @@ -609,7 +610,6 @@ func (s *service) syncDriverConfig(ctx context.Context, v *viper.Viper) { if v.IsSet(constants.ParamSyncNodeInfoTimeInterval) { s.opts.SyncNodeInfoTimeInterval = v.GetInt64(constants.ParamSyncNodeInfoTimeInterval) - } } @@ -649,8 +649,10 @@ func setLogFieldsInContext(ctx context.Context, logID string, logType string) (c return ctx, ulog } -var syncNodeLogCount int32 -var syncConfigLogCount int32 +var ( + syncNodeLogCount int32 + syncConfigLogCount int32 +) // Increment run id log func incrementLogID(ctx context.Context, runidPrefix string) (context.Context, *logrus.Entry) { @@ -753,10 +755,12 @@ func (lg *customLogger) Info(ctx context.Context, format string, args ...interfa log := utils.GetLogger() log.WithFields(getLogFields(ctx)).Infof(format, args...) } + func (lg *customLogger) Debug(ctx context.Context, format string, args ...interface{}) { log := utils.GetLogger() log.WithFields(getLogFields(ctx)).Debugf(format, args...) } + func (lg *customLogger) Error(ctx context.Context, format string, args ...interface{}) { log := utils.GetLogger() log.WithFields(getLogFields(ctx)).Errorf(format, args...) diff --git a/service/service_test.go b/service/service_test.go index 063c211d..0786f7f4 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -44,7 +44,7 @@ func TestSetRunIdContext(t *testing.T) { } func TestGetVolumeIdFromVolumeContext(t *testing.T) { - //When old id + // When old id id := getVolumeIDFromVolumeContext("id_1234") assert.True(t, id == "id_1234", "Expected id_1234 but found [%s]", id) id = getVolumeIDFromVolumeContext("name1234-arrid1234-id_1234") @@ -56,7 +56,7 @@ func TestGetVolumeIdFromVolumeContext(t *testing.T) { } func TestGetArrayIdFromVolumeContext(t *testing.T) { - //When old id + // When old id id, _ := testConf.service.getArrayIDFromVolumeContext("id_1234") assert.True(t, id == testConf.defaultArray, "Expected [%s] but found [%s]", testConf.defaultArray, id) id, _ = testConf.service.getArrayIDFromVolumeContext("name1234-arrid1234-id_1234") @@ -79,7 +79,7 @@ func TestSetArrayIdContext(t *testing.T) { fmt.Println(message) assert.True(t, strings.Contains(message, `runid=1111 msg="Hi This is log test1"`), "Log message not found") - //ctx, log, _ := GetRunidLog(ctx) + // ctx, log, _ := GetRunidLog(ctx) ctx, entry = setArrayIDContext(ctx, "arr1111") entry.Message = "Hi this is TestSetArrayIdContext" message, _ = entry.String() diff --git a/service/utils/emcutils.go b/service/utils/emcutils.go index 04ab39c3..1bfaacf6 100644 --- a/service/utils/emcutils.go +++ b/service/utils/emcutils.go @@ -124,7 +124,7 @@ func GetFCInitiators(ctx context.Context) ([]string, error) { continue } nodeNameStr := strings.TrimSpace(string(nodeName)) - //Ignore first 2 digits + // Ignore first 2 digits port := strings.Split(portNameStr, "x")[1] node := strings.Split(nodeNameStr, "x")[1] @@ -206,7 +206,6 @@ func GetSnapshotResponseFromSnapshot(snap *types.Snapshot, protocol, arrayID str // ArrayContains method does contains check operation func ArrayContains(stringArray []string, value string) bool { - for _, arrayValue := range stringArray { if value == arrayValue { return true @@ -217,7 +216,6 @@ func ArrayContains(stringArray []string, value string) bool { // ArrayContainsAll method checks if all elements of stringArray1 is present in stringArray2 func ArrayContainsAll(stringArray1 []string, stringArray2 []string) bool { - for _, arrayElement := range stringArray1 { if !ArrayContains(stringArray2, arrayElement) { return false @@ -240,7 +238,7 @@ func FindAdditionalWwns(stringArray1 []string, stringArray2 []string) []string { // IpsCompare checks if the given ip is present as IP or FQDN in the given list of host ips func IpsCompare(ctx context.Context, ip string, hostIps []string) (bool, []string) { log := GetRunidLogger(ctx) - var result = false + result := false var additionalIps []string for _, hostIP := range hostIps { @@ -248,10 +246,10 @@ func IpsCompare(ctx context.Context, ip string, hostIps []string) (bool, []strin log.Debug(fmt.Sprintf("Host Ip port %s matched Node IP", hostIP)) result = true } else { - //If HostIpPort is contains fqdn + // If HostIpPort is contains fqdn lookupIps, err := net.LookupIP(hostIP) if err != nil { - //Lookup failed and hostIp is considered not to match Ip + // Lookup failed and hostIp is considered not to match Ip log.Info("Ip Lookup failed: ", err) additionalIps = append(additionalIps, hostIP) } else if ipListContains(lookupIps, ip) { @@ -292,7 +290,6 @@ func IPReachable(ctx context.Context, ip, port string, pingTimeout int) bool { log.Debug("Tcp test on IP", ip) _, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", ip, port), timeout) - if err != nil { log.Debugf("Interface IP %s is not reachable %v", ip, err) return false diff --git a/service/utils/logging.go b/service/utils/logging.go index 63b01451..beb0920f 100644 --- a/service/utils/logging.go +++ b/service/utils/logging.go @@ -27,8 +27,10 @@ import ( "github.com/sirupsen/logrus" ) -var singletonLog *logrus.Logger -var once sync.Once +var ( + singletonLog *logrus.Logger + once sync.Once +) const ( // Default log format will output [INFO]: 2006-01-02T15:04:05Z07:00 - Log message @@ -38,7 +40,7 @@ const ( // Formatter implements logrus.Formatter interface. type Formatter struct { - //logrus.TextFormatter + // logrus.TextFormatter // Timestamp format TimestampFormat string // Available standard keys: time, msg, lvl @@ -122,7 +124,7 @@ func GetLogger() *logrus.Logger { singletonLog = logrus.New() fmt.Println("csi-unity logger initiated. This should be called only once.") - //Setting default level to Info since the driver is yet to read secrect that has the debug level set + // Setting default level to Info since the driver is yet to read secrect that has the debug level set singletonLog.Level = logrus.InfoLevel singletonLog.SetReportCaller(true) @@ -146,7 +148,6 @@ func GetLogger() *logrus.Logger { // ChangeLogLevel - change log level func ChangeLogLevel(logLevel string) { - switch strings.ToLower(logLevel) { case "debug": @@ -162,7 +163,7 @@ func ChangeLogLevel(logLevel string) { break case "info": - //Default level will be Info + // Default level will be Info fallthrough default: @@ -201,7 +202,7 @@ func GetRunidAndLogger(ctx context.Context) (string, *logrus.Entry) { tempLog := ctx.Value(UnityLogger) if tempLog != nil && reflect.TypeOf(tempLog) == reflect.TypeOf(&logrus.Entry{}) { - //rid = fmt.Sprintf("%s", tempLog.(*logrus.Logger).Data[RUNID]) + // rid = fmt.Sprintf("%s", tempLog.(*logrus.Logger).Data[RUNID]) return rid, tempLog.(*logrus.Entry) } return rid, nil diff --git a/service/validator.go b/service/validator.go index 073891f5..5eda773f 100644 --- a/service/validator.go +++ b/service/validator.go @@ -111,7 +111,7 @@ func valVolumeCaps(vcs []*csi.VolumeCapability, protocol string) (bool, string) } else if protocol == FC || protocol == ISCSI { supported = false reason = errNoMultiNodeReader - } //else NFS case + } // else NFS case break case csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER: fallthrough @@ -135,7 +135,6 @@ func valVolumeCaps(vcs []*csi.VolumeCapability, protocol string) (bool, string) // validateCreateFsFromSnapshot - Validates idempotency of an existing snapshot created from a filesystem func validateCreateFsFromSnapshot(ctx context.Context, sourceFilesystemResp *types.Filesystem, storagePool string, tieringPolicy, hostIoSize int64, thin, dataReduction bool) error { - rid, _ := utils.GetRunidAndLogger(ctx) // Validate the storagePool parameter @@ -144,25 +143,25 @@ func validateCreateFsFromSnapshot(ctx context.Context, sourceFilesystemResp *typ sourceFilesystemResp.FileContent.Pool.ID, storagePool)) } - //Validate the thinProvisioned parameter + // Validate the thinProvisioned parameter if sourceFilesystemResp.FileContent.IsThinEnabled != thin { return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem thin provision %v is different than the requested thin provision %v", sourceFilesystemResp.FileContent.IsThinEnabled, thin)) } - //Validate the dataReduction parameter + // Validate the dataReduction parameter if sourceFilesystemResp.FileContent.IsDataReductionEnabled != dataReduction { return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem data reduction %v is different than the requested data reduction %v", sourceFilesystemResp.FileContent.IsDataReductionEnabled, dataReduction)) } - //Validate the tieringPolicy parameter + // Validate the tieringPolicy parameter if int64(sourceFilesystemResp.FileContent.TieringPolicy) != tieringPolicy { return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem tiering policy %v is different than the requested tiering policy %v", sourceFilesystemResp.FileContent.TieringPolicy, tieringPolicy)) } - //Validate the hostIOSize parameter + // Validate the hostIOSize parameter if sourceFilesystemResp.FileContent.HostIOSize != hostIoSize { return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source filesystem host IO size %v is different than the requested host IO size %v", sourceFilesystemResp.FileContent.HostIOSize, hostIoSize)) @@ -173,7 +172,6 @@ func validateCreateFsFromSnapshot(ctx context.Context, sourceFilesystemResp *typ // validateCreateVolumeFromSource - Validates idempotency of an existing volume created from a volume func validateCreateVolumeFromSource(ctx context.Context, sourceVolResp *types.Volume, storagePool string, tieringPolicy, size int64, thin, dataReduction, skipSizeValidation bool) error { - rid, _ := utils.GetRunidAndLogger(ctx) // Validate the storagePool parameter @@ -181,17 +179,17 @@ func validateCreateVolumeFromSource(ctx context.Context, sourceVolResp *types.Vo return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume storage pool %s is different than the requested storage pool %s", sourceVolResp.VolumeContent.Pool.ID, storagePool)) } - //Validate the tieringPolicy parameter + // Validate the tieringPolicy parameter if int64(sourceVolResp.VolumeContent.TieringPolicy) != tieringPolicy { return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume tiering policy %v is different than the requested tiering policy %v", sourceVolResp.VolumeContent.TieringPolicy, tieringPolicy)) } - //Validate the thinProvisioned parameter + // Validate the thinProvisioned parameter if sourceVolResp.VolumeContent.IsThinEnabled != thin { return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume thin provision %v is different than the requested thin provision %v", sourceVolResp.VolumeContent.IsThinEnabled, thin)) } - //Validate the dataReduction parameter + // Validate the dataReduction parameter if sourceVolResp.VolumeContent.IsDataReductionEnabled != dataReduction { return status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(rid, "Source volume data reduction %v is different than the requested data reduction %v", sourceVolResp.VolumeContent.IsDataReductionEnabled, dataReduction)) @@ -212,7 +210,6 @@ func validateCreateVolumeFromSource(ctx context.Context, sourceVolResp *types.Vo // ValidateCreateVolumeRequest - Validates all mandatory parameters in create volume request func ValidateCreateVolumeRequest(ctx context.Context, req *csi.CreateVolumeRequest) (protocol, storagePool string, size, tieringPolicy, hostIoSize int64, thin, dataReduction bool, err error) { - ctx, log, rid := GetRunidLog(ctx) if req.GetName() == "" { @@ -227,8 +224,8 @@ func ValidateCreateVolumeRequest(ctx context.Context, req *csi.CreateVolumeReque protocol = FC } - //We dont have protocol from volume context ID and hence considering protocol from storage class as the - //primary protocol + // We dont have protocol from volume context ID and hence considering protocol from storage class as the + // primary protocol protocol, err = ValidateAndGetProtocol(ctx, protocol, "") if err != nil { return "", "", 0, 0, 0, false, false, err @@ -297,7 +294,6 @@ func ValidateCreateVolumeRequest(ctx context.Context, req *csi.CreateVolumeReque // ValidateControllerPublishRequest - method to validate Controller publish volume request func ValidateControllerPublishRequest(ctx context.Context, req *csi.ControllerPublishVolumeRequest, contextProtocol string) (protocol, nodeID string, err error) { - ctx, _, rid := GetRunidLog(ctx) vc := req.GetVolumeCapability() diff --git a/test/unit-test/features/unit.feature b/test/unit-test/features/unit.feature index fc9c2351..11e74d42 100644 --- a/test/unit-test/features/unit.feature +++ b/test/unit-test/features/unit.feature @@ -802,3 +802,7 @@ Feature: CSI interface And when I call DeleteVolume Then there are no errors + Scenario: GetCapacity successfully requested + Given a CSI service + When I Call GetCapacity + Then there are no errors diff --git a/test/unit-test/unit_main_test.go b/test/unit-test/unit_main_test.go index a2333505..49bbd278 100644 --- a/test/unit-test/unit_main_test.go +++ b/test/unit-test/unit_main_test.go @@ -32,8 +32,10 @@ import ( "google.golang.org/grpc" ) -var grpcClient *grpc.ClientConn -var stop func() +var ( + grpcClient *grpc.ClientConn + stop func() +) // To parse the secret json file type StorageArrayList struct { diff --git a/test/unit-test/unit_test.go b/test/unit-test/unit_test.go index 1e5c77e7..9d486c69 100644 --- a/test/unit-test/unit_test.go +++ b/test/unit-test/unit_test.go @@ -41,6 +41,8 @@ type feature struct { createSnapshotResponse *csi.CreateSnapshotResponse deleteSnapshotRequest *csi.DeleteSnapshotRequest deleteSnapshotResponse *csi.DeleteSnapshotResponse + getCapacityRequest *csi.GetCapacityRequest + getCapacityResponse *csi.GetCapacityResponse capability *csi.VolumeCapability capabilities []*csi.VolumeCapability validateVolumeCapabilitiesRequest *csi.ValidateVolumeCapabilitiesRequest @@ -1242,6 +1244,45 @@ func (f *feature) nodeExpandVolume(volID, volPath string) error { return err } +func (f *feature) whenICallGetCapacity() error { + ctx := context.Background() + client := csi.NewControllerClient(grpcClient) + + params := make(map[string]string) + params["storagePool"] = os.Getenv("STORAGE_POOL") + params["thinProvisioned"] = "true" + params["isDataReductionEnabled"] = "false" + params["tieringPolicy"] = "0" + params["description"] = "CSI Volume Unit Test" + params["arrayId"] = os.Getenv("arrayId") + params["nasServer"] = os.Getenv("NAS_SERVER") + + capability := new(csi.VolumeCapability) + mount := new(csi.VolumeCapability_MountVolume) + mountType := new(csi.VolumeCapability_Mount) + mountType.Mount = mount + capability.AccessType = mountType + accessMode := new(csi.VolumeCapability_AccessMode) + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + capability.AccessMode = accessMode + f.capability = capability + capabilities := make([]*csi.VolumeCapability, 0) + capabilities = append(capabilities, capability) + + f.getCapacityRequest = &csi.GetCapacityRequest{VolumeCapabilities: capabilities, Parameters: params} + response, err := client.GetCapacity(ctx, f.getCapacityRequest) + if err != nil { + fmt.Printf("GetCapacity %s:\n", err.Error()) + f.addError(err) + return err + } + if err == nil { + fmt.Printf("Maximum Volume Size: %v \n", response.MaximumVolumeSize) + } + f.getCapacityResponse = response + return nil +} + func FeatureContext(s *godog.Suite) { f := &feature{} s.Step(`^a CSI service$`, f.aCSIService) @@ -1290,5 +1331,5 @@ func FeatureContext(s *godog.Suite) { s.Step(`^When I call GetPluginCapabilities$`, f.whenICallGetPluginCapabilities) s.Step(`^When I call GetPluginInfo$`, f.whenICallGetPluginInfo) s.Step(`^when I call Node Expand Volume$`, f.whenICallNodeExpandVolume) - + s.Step(`^I Call GetCapacity$`, f.whenICallGetCapacity) }