diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 66903245..9268db7d 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -3705,7 +3705,7 @@ var _ = Describe("CSIControllerService", func() { RemoteResourceId: validRemoteGroupID, StorageElementPairs: []gopowerstore.StorageElementPair{{ LocalStorageElementId: validBaseVolID, - RemoteStorageElementId: validRemoteVolId, + RemoteStorageElementId: validRemoteVolID, }}, }, nil) diff --git a/pkg/controller/replication.go b/pkg/controller/replication.go index 760ea3e1..5c8b88e8 100644 --- a/pkg/controller/replication.go +++ b/pkg/controller/replication.go @@ -231,7 +231,7 @@ func EnsureReplicationRuleExists(ctx context.Context, arr *array.PowerStoreArray } // GetReplicationCapabilities is a getter for replication capabilities -func (s *Service) GetReplicationCapabilities(ctx context.Context, req *csiext.GetReplicationCapabilityRequest) (*csiext.GetReplicationCapabilityResponse, error) { +func (s *Service) GetReplicationCapabilities(_ context.Context, _ *csiext.GetReplicationCapabilityRequest) (*csiext.GetReplicationCapabilityResponse, error) { rep := new(csiext.GetReplicationCapabilityResponse) rep.Capabilities = []*csiext.ReplicationCapability{ { @@ -337,7 +337,7 @@ func (s *Service) ExecuteAction(ctx context.Context, } client := pstoreClient var execAction gopowerstore.ActionType - var params *gopowerstore.FailoverParams = nil + var params *gopowerstore.FailoverParams = "" switch action { case csiext.ActionTypes_FAILOVER_REMOTE.String(): execAction = gopowerstore.RS_ACTION_FAILOVER diff --git a/pkg/identity/identity.go b/pkg/identity/identity.go index 207e9cdd..c871b6af 100644 --- a/pkg/identity/identity.go +++ b/pkg/identity/identity.go @@ -45,7 +45,7 @@ type Service struct { } // GetPluginInfo returns general information about plugin (driver) such as name, version and manifest -func (s Service) GetPluginInfo(context context.Context, request *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { +func (s Service) GetPluginInfo(_ context.Context, _ *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { return &csi.GetPluginInfoResponse{ Name: s.name, VendorVersion: s.version, @@ -54,7 +54,7 @@ func (s Service) GetPluginInfo(context context.Context, request *csi.GetPluginIn } // GetPluginCapabilities returns capabilities that are supported by the driver -func (s Service) GetPluginCapabilities(context context.Context, request *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { +func (s Service) GetPluginCapabilities(_ context.Context, _ *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { var rep csi.GetPluginCapabilitiesResponse rep.Capabilities = []*csi.PluginCapability{ { @@ -91,6 +91,6 @@ func (s Service) GetPluginCapabilities(context context.Context, request *csi.Get } // Probe returns current state of the driver and if it is ready to receive requests -func (s Service) Probe(context context.Context, request *csi.ProbeRequest) (*csi.ProbeResponse, error) { +func (s Service) Probe(_ context.Context, _ *csi.ProbeRequest) (*csi.ProbeResponse, error) { return &csi.ProbeResponse{Ready: &wrappers.BoolValue{Value: s.ready}}, nil } diff --git a/pkg/interceptors/interceptors.go b/pkg/interceptors/interceptors.go index 040a5cce..48860a10 100644 --- a/pkg/interceptors/interceptors.go +++ b/pkg/interceptors/interceptors.go @@ -79,7 +79,7 @@ type lockProvider struct { volNameLocks map[string]gosync.TryLocker } -func (i *lockProvider) GetLockWithID(ctx context.Context, id string) (gosync.TryLocker, error) { +func (i *lockProvider) GetLockWithID(_ context.Context, id string) (gosync.TryLocker, error) { i.volIDLocksL.Lock() defer i.volIDLocksL.Unlock() @@ -92,7 +92,7 @@ func (i *lockProvider) GetLockWithID(ctx context.Context, id string) (gosync.Try return lock, nil } -func (i *lockProvider) GetLockWithName(ctx context.Context, name string) (gosync.TryLocker, error) { +func (i *lockProvider) GetLockWithName(_ context.Context, name string) (gosync.TryLocker, error) { i.volNameLocksL.Lock() defer i.volNameLocksL.Unlock() @@ -168,7 +168,7 @@ func (i *interceptor) createMetadataRetrieverClient(ctx context.Context) { const pending = "pending" func (i *interceptor) nodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest, - info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, + _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (res interface{}, resErr error) { lock, err := i.opts.locker.GetLockWithID(ctx, req.VolumeId) if err != nil { @@ -188,7 +188,7 @@ func (i *interceptor) nodeStageVolume(ctx context.Context, req *csi.NodeStageVol } func (i *interceptor) nodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest, - info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, + _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (res interface{}, resErr error) { lock, err := i.opts.locker.GetLockWithID(ctx, req.VolumeId) if err != nil { @@ -206,7 +206,7 @@ func (i *interceptor) nodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag } func (i *interceptor) createVolume(ctx context.Context, req *csi.CreateVolumeRequest, - info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, + _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (res interface{}, resErr error) { lock, err := i.opts.locker.GetLockWithID(ctx, req.Name) if err != nil { diff --git a/pkg/node/base.go b/pkg/node/base.go index 0853c8bd..ecf6f707 100644 --- a/pkg/node/base.go +++ b/pkg/node/base.go @@ -255,7 +255,7 @@ func getTargetMount(ctx context.Context, target string, fs fs.Interface) (gofsut return targetMount, found, nil } -func getMounts(ctx context.Context, fs fs.Interface) ([]gofsutil.Info, error) { +func getMounts(_ context.Context, fs fs.Interface) ([]gofsutil.Info, error) { data, err := consistentRead(procMountsPath, procMountsRetries, fs) if err != nil { return []gofsutil.Info{}, err @@ -349,7 +349,7 @@ func getRWModeString(isRO bool) string { return "rw" } -func format(ctx context.Context, source, fsType string, fs fs.Interface, opts ...string) error { +func format(_ context.Context, source, fsType string, fs fs.Interface, opts ...string) error { f := log.Fields{ "source": source, "fsType": fsType, diff --git a/pkg/node/node.go b/pkg/node/node.go index e5512ca5..5ea61860 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -1000,7 +1000,7 @@ func (s *Service) nodeExpandRawBlockVolume(ctx context.Context, volumeWWN string } // NodeGetCapabilities returns supported features by the node service -func (s *Service) NodeGetCapabilities(context context.Context, request *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { +func (s *Service) NodeGetCapabilities(_ context.Context, _ *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { newCap := func(cap csi.NodeServiceCapability_RPC_Type) *csi.NodeServiceCapability { return &csi.NodeServiceCapability{ Type: &csi.NodeServiceCapability_Rpc{ @@ -1034,7 +1034,7 @@ func (s *Service) NodeGetCapabilities(context context.Context, request *csi.Node } // NodeGetInfo returns id of the node and topology constraints -func (s *Service) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { +func (s *Service) NodeGetInfo(ctx context.Context, _ *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { // Create the topology keys // /-: true resp := &csi.NodeGetInfoResponse{ @@ -1066,16 +1066,15 @@ func (s *Service) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) if err != nil { log.Errorf("couldn't discover NVMeFC targets") continue - } else { - for _, target := range NVMeFCTargets { - err = s.nvmeLib.NVMeFCConnect(target, false) - if err != nil { - log.Errorf("couldn't connect to NVMeFC target") - } else { - nvmefcConnectCount = nvmefcConnectCount + 1 - otherTargets := s.nvmeTargets[arr.GlobalID] - s.nvmeTargets[arr.GlobalID] = append(otherTargets, target.TargetNqn) - } + } + for _, target := range NVMeFCTargets { + err = s.nvmeLib.NVMeFCConnect(target, false) + if err != nil { + log.Errorf("couldn't connect to NVMeFC target") + } else { + nvmefcConnectCount = nvmefcConnectCount + 1 + otherTargets := s.nvmeTargets[arr.GlobalID] + s.nvmeTargets[arr.GlobalID] = append(otherTargets, target.TargetNqn) } } } @@ -1173,9 +1172,8 @@ func (s *Service) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) continue } break - } else { - log.Debugf("Portal %s is not rechable from the node", address.Portal) } + log.Debugf("Portal %s is not rechable from the node", address.Portal) } // login is also performed as a part of ConnectVolume by using dynamically created chap credentials, In case if it fails here if len(iscsiTargets) > 0 { @@ -1212,7 +1210,7 @@ func (s *Service) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) } } - var maxVolumesPerNode int64 = 0 + var maxVolumesPerNode int64 // Setting maxVolumesPerNode using the value of field maxPowerstoreVolumesPerNode specified in values.yaml if s.opts.MaxVolumesPerNode > 0 { diff --git a/pkg/node/node_connectivity_checker.go b/pkg/node/node_connectivity_checker.go index 756a1e7c..cbf34341 100644 --- a/pkg/node/node_connectivity_checker.go +++ b/pkg/node/node_connectivity_checker.go @@ -54,7 +54,7 @@ func (s *Service) startAPIService(ctx context.Context) { } // apiRouter serves http requests -func (s *Service) apiRouter(ctx context.Context) { +func (s *Service) apiRouter(_ context.Context) { log.Infof("starting http server on port %s", common.APIPort) // create a new mux router router := mux.NewRouter() @@ -76,7 +76,7 @@ func (s *Service) apiRouter(ctx context.Context) { } // connectivityStatus handler returns array connectivity status -func connectivityStatus(w http.ResponseWriter, r *http.Request) { +func connectivityStatus(w http.ResponseWriter, _ *http.Request) { log.Infof("connectivityStatus called, status is %v \n", probeStatus) // w.Header().Set("Content-Type", "application/json") if probeStatus == nil { @@ -211,7 +211,7 @@ func (s *Service) testConnectivityAndUpdateStatus(ctx context.Context, array *ar } // nodeProbe function used to store the status of array -func (s *Service) nodeProbe(timeOutCtx context.Context, array *array.PowerStoreArray) error { +func (s *Service) nodeProbe(_ context.Context, array *array.PowerStoreArray) error { // try to get the host host, err := array.Client.GetHostByName(context.Background(), s.nodeID) // possibly NFS could be there. @@ -255,28 +255,27 @@ func (s *Service) nodeProbe(timeOutCtx context.Context, array *array.PowerStoreA } } return fmt.Errorf("no active fc sessions") - } else { - // check if iscsi sessions are active - // if !s.useNVME && !s.useFC { - log.Debugf("Checking if iscsi sessions are active on node or not") - sessions, _ := s.iscsiLib.GetSessions() - for _, target := range s.iscsiTargets[array.GlobalID] { - for _, session := range sessions { - log.Debugf("matching %v with %v", target, session) - if session.Target == target && session.ISCSISessionState == goiscsi.ISCSISessionStateLOGGEDIN { - if s.useNFS { - s.useNFS = false - } - return nil + } + // check if iscsi sessions are active + // if !s.useNVME && !s.useFC { + log.Debugf("Checking if iscsi sessions are active on node or not") + sessions, _ := s.iscsiLib.GetSessions() + for _, target := range s.iscsiTargets[array.GlobalID] { + for _, session := range sessions { + log.Debugf("matching %v with %v", target, session) + if session.Target == target && session.ISCSISessionState == goiscsi.ISCSISessionStateLOGGEDIN { + if s.useNFS { + s.useNFS = false } + return nil } } - if s.useNFS { - log.Infof("Host Entry found but failed to login to iscsi target, seems to be this worker has only NFS") - return nil - } - return fmt.Errorf("no active iscsi sessions") } + if s.useNFS { + log.Infof("Host Entry found but failed to login to iscsi target, seems to be this worker has only NFS") + return nil + } + return fmt.Errorf("no active iscsi sessions") } // populateTargetsInCache checks if nvmeTargets or iscsiTargets in cache is empty, try to fetch the targets from array and populate the cache @@ -355,9 +354,8 @@ func (s *Service) populateTargetsInCache(array *array.PowerStoreArray) { s.iscsiTargets[array.GlobalID] = append(otherTargets, target.Target) } break - } else { - log.Debugf("Portal %s is not rechable from the node", address.Portal) } + log.Debugf("Portal %s is not rechable from the node", address.Portal) } } diff --git a/pkg/node/publisher.go b/pkg/node/publisher.go index f550e991..076e6ab4 100644 --- a/pkg/node/publisher.go +++ b/pkg/node/publisher.go @@ -56,7 +56,7 @@ func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs f return sp.publishMount(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) } -func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { +func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, fs fs.Interface, _ *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { log.WithFields(logFields).Info("start publishing as block device") if isRO {