From fd9cae003116af1c033915fa1ee14d0d2dc0154a Mon Sep 17 00:00:00 2001 From: Akshay Saini <109056238+AkshaySainiDell@users.noreply.github.com> Date: Tue, 12 Nov 2024 00:55:35 -0600 Subject: [PATCH 1/8] Fix mountOptions not used during node stage volume --- pkg/common/common.go | 9 + pkg/node/node.go | 4 +- pkg/node/publisher.go | 333 ++++++------- pkg/node/stager.go | 1029 +++++++++++++++++++++-------------------- 4 files changed, 695 insertions(+), 680 deletions(-) diff --git a/pkg/common/common.go b/pkg/common/common.go index 0e850614..7037bac8 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -525,3 +525,12 @@ func ReachableEndPoint(endpoint string) bool { } return true } + +func GetMountFlags(cap *csi.VolumeCapability) []string { + if cap != nil { + if mountCap := cap.GetMount(); mountCap != nil { + return mountCap.GetMountFlags() + } + } + return nil +} diff --git a/pkg/node/node.go b/pkg/node/node.go index 31db87dc..d7d0d46e 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -885,7 +885,9 @@ func (s *Service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to find mount info for (%s) with error (%s)", vol.Name, err.Error())) } - err = s.Fs.GetUtil().Mount(ctx, disklocation, targetmount, "") + + mntFlags := common.GetMountFlags(req.GetVolumeCapability()) + err = s.Fs.GetUtil().Mount(ctx, disklocation, targetmount, "", mntFlags...) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to find mount info for (%s) with error (%s)", vol.Name, err.Error())) diff --git a/pkg/node/publisher.go b/pkg/node/publisher.go index 076e6ab4..3ab6159e 100644 --- a/pkg/node/publisher.go +++ b/pkg/node/publisher.go @@ -16,169 +16,170 @@ * */ -package node - -import ( - "context" - - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/dell/csi-powerstore/v2/pkg/common/fs" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// VolumePublisher allows to node publish a volume -type VolumePublisher interface { - Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, - cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) -} - -// SCSIPublisher implementation of NodeVolumePublisher for SCSI based (FC, iSCSI) volumes -type SCSIPublisher struct { - isBlock bool -} - -// Publish publishes volume as either raw block or mount by mounting it to the target path -func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { - published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) - if err != nil { - return nil, err - } - - if published { - return &csi.NodePublishVolumeResponse{}, nil - } - - if sp.isBlock { - return sp.publishBlock(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) - } - return sp.publishMount(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) -} - -func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, fs fs.Interface, _ *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { - log.WithFields(logFields).Info("start publishing as block device") - - if isRO { - return nil, status.Error(codes.InvalidArgument, "read only not supported for Block Volume") - } - - if _, err := fs.MkFileIdempotent(targetPath); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target file %s: %s", targetPath, err.Error()) - } - log.WithFields(logFields).Info("target path successfully created") - - if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath); err != nil { - return nil, status.Errorf(codes.Internal, - "error bind disk %s to target path: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("volume successfully binded") - - return &csi.NodePublishVolumeResponse{}, nil -} - -func (sp *SCSIPublisher) publishMount(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { - if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { - // MULTI_WRITER not supported for mount volumes - return nil, status.Error(codes.Unimplemented, "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER") - } - - if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { - // Warning in case of MULTI_NODE_READER_ONLY for mount volumes - log.Warningf("Mount volume with the AccessMode ReadOnlyMany") - } - - var opts []string - mountCap := cap.GetMount() - mountFsType := mountCap.GetFsType() - mntFlags := mountCap.GetMountFlags() - if mountFsType == "xfs" { - mntFlags = append(mntFlags, "nouuid") - } - targetFS := mountCap.GetFsType() - if targetFS == "xfs" { - opts = []string{"-m", "crc=0,finobt=0"} - } - if err := fs.MkdirAll(targetPath, 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target dir with Mkdirall %s: %s", targetPath, err.Error()) - } - - log.WithFields(logFields).Info("target dir successfully created") - - curFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) - if err != nil { - return nil, status.Errorf(codes.Internal, - "error while trying to detect fs for staging path %s: %s", stagingPath, err.Error()) - } - - if curFS != "" && targetFS != "" && curFS != targetFS { - return nil, status.Errorf(codes.FailedPrecondition, - "filesystem mismatch. Target device already formatted to %s mount spec require %s", - curFS, targetFS) - } - - if curFS == "" { - log.WithFields(logFields).Infof("no filesystem found on staged disk %s", stagingPath) - if isRO { - return nil, status.Errorf(codes.FailedPrecondition, - "RO mount required but no fs detected on staged volume %s", stagingPath) - } - - if err := format(ctx, stagingPath, targetFS, fs, opts...); err != nil { - return nil, status.Errorf(codes.Internal, - "can't format staged device %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Infof("staged disk %s successfully formatted to %s", stagingPath, targetFS) - } - if isRO { - mntFlags = append(mntFlags, "ro") - } - - if err := fs.GetUtil().Mount(ctx, stagingPath, targetPath, targetFS, mntFlags...); err != nil { - return nil, status.Errorf(codes.Internal, - "error performing mount for staging path %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("volume successfully mounted") - - return &csi.NodePublishVolumeResponse{}, nil -} - -// NFSPublisher implementation of NodeVolumePublisher for NFS volumes -type NFSPublisher struct{} - -// Publish publishes nfs volume by mounting it to the target path -func (np *NFSPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, - cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string, -) (*csi.NodePublishVolumeResponse, error) { - published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) - if err != nil { - return nil, err - } - - if published { - return &csi.NodePublishVolumeResponse{}, nil - } - - if err := fs.MkdirAll(targetPath, 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target folder %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("target path successfully created") - - mountCap := cap.GetMount() - mntFlags := mountCap.GetMountFlags() - - if isRO { - mntFlags = append(mntFlags, "ro") - } - - if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath, mntFlags...); err != nil { - return nil, status.Errorf(codes.Internal, - "error bind disk %s to target path: %s", stagingPath, err.Error()) - } - - log.WithFields(logFields).Info("volume successfully binded") - return &csi.NodePublishVolumeResponse{}, nil -} + package node + + import ( + "context" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/dell/csi-powerstore/v2/pkg/common" + "github.com/dell/csi-powerstore/v2/pkg/common/fs" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + ) + + // VolumePublisher allows to node publish a volume + type VolumePublisher interface { + Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, + cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) + } + + // SCSIPublisher implementation of NodeVolumePublisher for SCSI based (FC, iSCSI) volumes + type SCSIPublisher struct { + isBlock bool + } + + // Publish publishes volume as either raw block or mount by mounting it to the target path + func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { + published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) + if err != nil { + return nil, err + } + + if published { + return &csi.NodePublishVolumeResponse{}, nil + } + + if sp.isBlock { + return sp.publishBlock(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) + } + return sp.publishMount(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) + } + + func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, fs fs.Interface, _ *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { + log.WithFields(logFields).Info("start publishing as block device") + + if isRO { + return nil, status.Error(codes.InvalidArgument, "read only not supported for Block Volume") + } + + if _, err := fs.MkFileIdempotent(targetPath); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target file %s: %s", targetPath, err.Error()) + } + log.WithFields(logFields).Info("target path successfully created") + + if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath); err != nil { + return nil, status.Errorf(codes.Internal, + "error bind disk %s to target path: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("volume successfully binded") + + return &csi.NodePublishVolumeResponse{}, nil + } + + func (sp *SCSIPublisher) publishMount(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { + if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { + // MULTI_WRITER not supported for mount volumes + return nil, status.Error(codes.Unimplemented, "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER") + } + + if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { + // Warning in case of MULTI_NODE_READER_ONLY for mount volumes + log.Warningf("Mount volume with the AccessMode ReadOnlyMany") + } + + var opts []string + mountCap := cap.GetMount() + mountFsType := mountCap.GetFsType() + mntFlags := common.GetMountFlags(cap) + if mountFsType == "xfs" { + mntFlags = append(mntFlags, "nouuid") + } + targetFS := mountCap.GetFsType() + if targetFS == "xfs" { + opts = []string{"-m", "crc=0,finobt=0"} + } + if err := fs.MkdirAll(targetPath, 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target dir with Mkdirall %s: %s", targetPath, err.Error()) + } + + log.WithFields(logFields).Info("target dir successfully created") + + curFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) + if err != nil { + return nil, status.Errorf(codes.Internal, + "error while trying to detect fs for staging path %s: %s", stagingPath, err.Error()) + } + + if curFS != "" && targetFS != "" && curFS != targetFS { + return nil, status.Errorf(codes.FailedPrecondition, + "filesystem mismatch. Target device already formatted to %s mount spec require %s", + curFS, targetFS) + } + + if curFS == "" { + log.WithFields(logFields).Infof("no filesystem found on staged disk %s", stagingPath) + if isRO { + return nil, status.Errorf(codes.FailedPrecondition, + "RO mount required but no fs detected on staged volume %s", stagingPath) + } + + if err := format(ctx, stagingPath, targetFS, fs, opts...); err != nil { + return nil, status.Errorf(codes.Internal, + "can't format staged device %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Infof("staged disk %s successfully formatted to %s", stagingPath, targetFS) + } + if isRO { + mntFlags = append(mntFlags, "ro") + } + + if err := fs.GetUtil().Mount(ctx, stagingPath, targetPath, targetFS, mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error performing mount for staging path %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("volume successfully mounted") + + return &csi.NodePublishVolumeResponse{}, nil + } + + // NFSPublisher implementation of NodeVolumePublisher for NFS volumes + type NFSPublisher struct{} + + // Publish publishes nfs volume by mounting it to the target path + func (np *NFSPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, + cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string, + ) (*csi.NodePublishVolumeResponse, error) { + published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) + if err != nil { + return nil, err + } + + if published { + return &csi.NodePublishVolumeResponse{}, nil + } + + if err := fs.MkdirAll(targetPath, 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target folder %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("target path successfully created") + + mntFlags := common.GetMountFlags(cap) + + if isRO { + mntFlags = append(mntFlags, "ro") + } + + if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath, mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error bind disk %s to target path: %s", stagingPath, err.Error()) + } + + log.WithFields(logFields).Info("volume successfully binded") + return &csi.NodePublishVolumeResponse{}, nil + } + \ No newline at end of file diff --git a/pkg/node/stager.go b/pkg/node/stager.go index e50545ec..3bbdf07a 100644 --- a/pkg/node/stager.go +++ b/pkg/node/stager.go @@ -16,516 +16,519 @@ * */ -package node - -import ( - "context" - "fmt" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/dell/csi-powerstore/v2/pkg/array" - "github.com/dell/csi-powerstore/v2/pkg/common" - "github.com/dell/csi-powerstore/v2/pkg/common/fs" - "github.com/dell/gobrick" - "github.com/dell/gopowerstore" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - procMountsPath = "/proc/self/mountinfo" - procMountsRetries = 15 -) - -// VolumeStager allows to node stage a volume -type VolumeStager interface { - Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, logFields log.Fields, fs fs.Interface, id string, isRemote bool) (*csi.NodeStageVolumeResponse, error) -} - -// ReachableEndPoint checks if the endpoint is reachable or not -var ReachableEndPoint = common.ReachableEndPoint - -// SCSIStager implementation of NodeVolumeStager for SCSI based (FC, iSCSI) volumes -type SCSIStager struct { - useFC bool - useNVME bool - iscsiConnector ISCSIConnector - nvmeConnector NVMEConnector - fcConnector FcConnector -} - -// Stage stages volume by connecting it through either FC or iSCSI and creating bind mount to staging path -func (s *SCSIStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, - logFields log.Fields, fs fs.Interface, id string, isRemote bool, -) (*csi.NodeStageVolumeResponse, error) { - // append additional path to be able to do bind mounts - stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) - - publishContext, err := readSCSIInfoFromPublishContext(req.PublishContext, s.useFC, s.useNVME, isRemote) - if err != nil { - return nil, err - } - - logFields["ID"] = id - if s.useNVME { - if s.useFC { - logFields["Targets"] = publishContext.nvmefcTargets - } else { - logFields["Targets"] = publishContext.nvmetcpTargets - } - } else { - logFields["Targets"] = publishContext.iscsiTargets - } - logFields["WWN"] = publishContext.deviceWWN - logFields["Lun"] = publishContext.volumeLUNAddress - logFields["StagingPath"] = stagingPath - ctx = common.SetLogFields(ctx, logFields) - - found, ready, err := isReadyToPublish(ctx, stagingPath, fs) - if err != nil { - return nil, err - } - if ready { - log.WithFields(logFields).Info("device already staged") - return &csi.NodeStageVolumeResponse{}, nil - } else if found { - log.WithFields(logFields).Warning("volume found in staging path but it is not ready for publish," + - "try to unmount it and retry staging again") - _, err := unstageVolume(ctx, stagingPath, id, logFields, err, fs) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmount volume: %s", err.Error()) - } - } - - devicePath, err := s.connectDevice(ctx, publishContext) - if err != nil { - return nil, err - } - - logFields["DevicePath"] = devicePath - - log.WithFields(logFields).Info("start staging") - if _, err := fs.MkFileIdempotent(stagingPath); err != nil { - return nil, status.Errorf(codes.Internal, "can't create target file %s: %s", - stagingPath, err.Error()) - } - log.WithFields(logFields).Info("target path successfully created") - - if err := fs.GetUtil().BindMount(ctx, devicePath, stagingPath); err != nil { - return nil, status.Errorf(codes.Internal, - "error bind disk %s to target path: %s", devicePath, err.Error()) - } - - log.WithFields(logFields).Info("stage complete") - return &csi.NodeStageVolumeResponse{}, nil -} - -// NFSStager implementation of NodeVolumeStager for NFS volumes -type NFSStager struct { - array *array.PowerStoreArray -} - -// Stage stages volume by mounting volumes as nfs to the staging path -func (n *NFSStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, - logFields log.Fields, fs fs.Interface, id string, _ bool, -) (*csi.NodeStageVolumeResponse, error) { - // append additional path to be able to do bind mounts - stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) - - hostIP := req.PublishContext[common.KeyHostIP] - exportID := req.PublishContext[common.KeyExportID] - nfsExport := req.PublishContext[common.KeyNfsExportPath] - allowRoot := req.PublishContext[common.KeyAllowRoot] - nasName := req.PublishContext[common.KeyNasName] - - natIP := "" - if ip, ok := req.PublishContext[common.KeyNatIP]; ok { - natIP = ip - } - - logFields["NfsExportPath"] = nfsExport - logFields["StagingPath"] = req.GetStagingTargetPath() - logFields["ID"] = id - logFields["AllowRoot"] = allowRoot - logFields["ExportID"] = exportID - logFields["HostIP"] = hostIP - logFields["NatIP"] = natIP - logFields["NFSv4ACLs"] = req.PublishContext[common.KeyNfsACL] - logFields["NasName"] = nasName - ctx = common.SetLogFields(ctx, logFields) - - found, err := isReadyToPublishNFS(ctx, stagingPath, fs) - if err != nil { - return nil, err - } - - if found { - log.WithFields(logFields).Info("device already staged") - return &csi.NodeStageVolumeResponse{}, nil - } - - if err := fs.MkdirAll(stagingPath, 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target folder %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("stage path successfully created") - - if err := fs.GetUtil().Mount(ctx, nfsExport, stagingPath, ""); err != nil { - return nil, status.Errorf(codes.Internal, - "error mount nfs share %s to target path: %s", nfsExport, err.Error()) - } - - // Create folder with 1777 in nfs share so every user can use it - if err := fs.MkdirAll(filepath.Join(stagingPath, commonNfsVolumeFolder), 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create common folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) - } - - mode := os.ModePerm - acls := req.PublishContext[common.KeyNfsACL] - aclsConfigured := false - if acls != "" { - if posixMode(acls) { - perm, err := strconv.ParseUint(acls, 8, 32) - if err == nil { - mode = os.FileMode(perm) // #nosec: G115 false positive - } else { - log.WithFields(logFields).Warn("can't parse file mode, invalid mode specified. Default mode permissions will be set.") - } - } else { - aclsConfigured, err = validateAndSetACLs(ctx, &NFSv4ACLs{}, nasName, n.array.GetClient(), acls, filepath.Join(stagingPath, commonNfsVolumeFolder)) - if err != nil || !aclsConfigured { - return nil, err - } - } - } - - if !aclsConfigured { - if err := fs.Chmod(filepath.Join(stagingPath, commonNfsVolumeFolder), os.ModeSticky|mode); err != nil { - return nil, status.Errorf(codes.Internal, - "can't change permissions of folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) - } - } - - if allowRoot == "false" { - log.WithFields(logFields).Info("removing allow root from nfs export") - var hostsToRemove []string - var hostsToAdd []string - - hostsToRemove = append(hostsToRemove, hostIP+"/255.255.255.255") - hostsToAdd = append(hostsToAdd, hostIP) - - if natIP != "" { - hostsToRemove = append(hostsToRemove, natIP) - hostsToAdd = append(hostsToAdd, natIP) - } - - // Modify NFS export to RW with `root_squashing` - _, err = n.array.GetClient().ModifyNFSExport(ctx, &gopowerstore.NFSExportModify{ - RemoveRWRootHosts: hostsToRemove, - AddRWHosts: hostsToAdd, - }, exportID) - if err != nil { - if apiError, ok := err.(gopowerstore.APIError); !(ok && apiError.NotFound()) { - return nil, status.Errorf(codes.Internal, "failure when modifying nfs export: %s", err.Error()) - } - } - } - - log.WithFields(logFields).Info("nfs share successfully mounted") - return &csi.NodeStageVolumeResponse{}, nil -} - -type scsiPublishContextData struct { - deviceWWN string - volumeLUNAddress string - iscsiTargets []gobrick.ISCSITargetInfo - nvmetcpTargets []gobrick.NVMeTargetInfo - nvmefcTargets []gobrick.NVMeTargetInfo - fcTargets []gobrick.FCTargetInfo -} - -func readSCSIInfoFromPublishContext(publishContext map[string]string, useFC bool, useNVMe bool, isRemote bool) (scsiPublishContextData, error) { - // Get publishContext - var data scsiPublishContextData - deviceWwnKey := common.PublishContextDeviceWWN - lunAddressKey := common.PublishContextLUNAddress - if isRemote { - deviceWwnKey = common.PublishContextRemoteDeviceWWN - lunAddressKey = common.PublishContextRemoteLUNAddress - } - - deviceWWN, ok := publishContext[deviceWwnKey] - if !ok { - return data, status.Error(codes.InvalidArgument, "deviceWWN must be in publish context") - } - volumeLUNAddress, ok := publishContext[lunAddressKey] - if !ok { - return data, status.Error(codes.InvalidArgument, "volumeLUNAddress must be in publish context") - } - - iscsiTargets := readISCSITargetsFromPublishContext(publishContext, isRemote) - if len(iscsiTargets) == 0 && !useFC && !useNVMe { - return data, status.Error(codes.InvalidArgument, "iscsiTargets data must be in publish context") - } - nvmeTCPTargets := readNVMETCPTargetsFromPublishContext(publishContext, isRemote) - if len(nvmeTCPTargets) == 0 && useNVMe && !useFC { - return data, status.Error(codes.InvalidArgument, "NVMeTCP Targets data must be in publish context") - } - nvmeFCTargets := readNVMEFCTargetsFromPublishContext(publishContext, isRemote) - if len(nvmeFCTargets) == 0 && useNVMe && useFC { - return data, status.Error(codes.InvalidArgument, "NVMeFC Targets data must be in publish context") - } - fcTargets := readFCTargetsFromPublishContext(publishContext, isRemote) - if len(fcTargets) == 0 && useFC && !useNVMe { - return data, status.Error(codes.InvalidArgument, "fcTargets data must be in publish context") - } - return scsiPublishContextData{ - deviceWWN: deviceWWN, volumeLUNAddress: volumeLUNAddress, - iscsiTargets: iscsiTargets, nvmetcpTargets: nvmeTCPTargets, nvmefcTargets: nvmeFCTargets, fcTargets: fcTargets, - }, nil -} - -func readISCSITargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.ISCSITargetInfo { - var targets []gobrick.ISCSITargetInfo - iscsiTargetsKey := common.PublishContextISCSITargetsPrefix - iscsiPortalsKey := common.PublishContextISCSIPortalsPrefix - if isRemote { - iscsiTargetsKey = common.PublishContextRemoteISCSITargetsPrefix - iscsiPortalsKey = common.PublishContextRemoteISCSIPortalsPrefix - } - for i := 0; ; i++ { - target := gobrick.ISCSITargetInfo{} - t, tfound := pc[fmt.Sprintf("%s%d", iscsiTargetsKey, i)] - if tfound { - target.Target = t - } - p, pfound := pc[fmt.Sprintf("%s%d", iscsiPortalsKey, i)] - if pfound { - target.Portal = p - } - if !tfound || !pfound { - break - } - - if ReachableEndPoint(p) { - // if the portals from the context (set in ControllerPublishVolume) is not reachable from the nodes - targets = append(targets, target) - } - } - log.Infof("iSCSI iscsiTargets from context: %v", targets) - return targets -} - -func readNVMETCPTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { - var targets []gobrick.NVMeTargetInfo - nvmeTCPTargetsKey := common.PublishContextNVMETCPTargetsPrefix - nvmeTCPPortalsKey := common.PublishContextNVMETCPPortalsPrefix - if isRemote { - nvmeTCPTargetsKey = common.PublishContextRemoteNVMETCPTargetsPrefix - nvmeTCPPortalsKey = common.PublishContextRemoteNVMETCPPortalsPrefix - } - for i := 0; ; i++ { - target := gobrick.NVMeTargetInfo{} - t, tfound := pc[fmt.Sprintf("%s%d", nvmeTCPTargetsKey, i)] - if tfound { - target.Target = t - } - p, pfound := pc[fmt.Sprintf("%s%d", nvmeTCPPortalsKey, i)] - if pfound { - target.Portal = p - } - if !tfound || !pfound { - break - } - targets = append(targets, target) - } - log.Infof("NVMeTCP Targets from context: %v", targets) - return targets -} - -func readNVMEFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { - var targets []gobrick.NVMeTargetInfo - nvmeFcTargetsKey := common.PublishContextNVMEFCTargetsPrefix - nvmeFcPortalsKey := common.PublishContextNVMEFCPortalsPrefix - if isRemote { - nvmeFcTargetsKey = common.PublishContextRemoteNVMEFCTargetsPrefix - nvmeFcPortalsKey = common.PublishContextRemoteNVMEFCPortalsPrefix - } - for i := 0; ; i++ { - target := gobrick.NVMeTargetInfo{} - t, tfound := pc[fmt.Sprintf("%s%d", nvmeFcTargetsKey, i)] - if tfound { - target.Target = t - } - p, pfound := pc[fmt.Sprintf("%s%d", nvmeFcPortalsKey, i)] - if pfound { - target.Portal = p - } - if !tfound || !pfound { - break - } - targets = append(targets, target) - } - log.Infof("NVMeFC Targets from context: %v", targets) - return targets -} - -func readFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.FCTargetInfo { - var targets []gobrick.FCTargetInfo - fcWwpnKey := common.PublishContextFCWWPNPrefix - if isRemote { - fcWwpnKey = common.PublishContextRemoteFCWWPNPrefix - } - for i := 0; ; i++ { - wwpn, tfound := pc[fmt.Sprintf("%s%d", fcWwpnKey, i)] - if !tfound { - break - } - targets = append(targets, gobrick.FCTargetInfo{WWPN: wwpn}) - } - log.Infof("FC iscsiTargets from context: %v", targets) - return targets -} - -func (s *SCSIStager) connectDevice(ctx context.Context, data scsiPublishContextData) (string, error) { - logFields := common.GetLogFields(ctx) - var err error - lun, err := strconv.Atoi(data.volumeLUNAddress) - if err != nil { - log.WithFields(logFields).Errorf("failed to convert lun number to int: %s", err.Error()) - return "", status.Errorf(codes.Internal, - "failed to convert lun number to int: %s", err.Error()) - } - wwn := data.deviceWWN - var device gobrick.Device - if s.useNVME { - device, err = s.connectNVMEDevice(ctx, wwn, data, s.useFC) - } else if s.useFC { - device, err = s.connectFCDevice(ctx, lun, data) - } else { - device, err = s.connectISCSIDevice(ctx, lun, data) - } - - if err != nil { - log.WithFields(logFields).Errorf("Unable to find device after multiple discovery attempts: %s", err.Error()) - return "", status.Errorf(codes.Internal, - "unable to find device after multiple discovery attempts: %s", err.Error()) - } - devicePath := path.Join("/dev/", device.Name) - return devicePath, nil -} - -func (s *SCSIStager) connectISCSIDevice(ctx context.Context, - lun int, data scsiPublishContextData, -) (gobrick.Device, error) { - logFields := common.GetLogFields(ctx) - var targets []gobrick.ISCSITargetInfo - for _, t := range data.iscsiTargets { - targets = append(targets, gobrick.ISCSITargetInfo{Target: t.Target, Portal: t.Portal}) - } - // separate context to prevent 15 seconds cancel from kubernetes - connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) - defer cFunc() - - connectorCtx = common.SetLogFields(connectorCtx, logFields) - return s.iscsiConnector.ConnectVolume(connectorCtx, gobrick.ISCSIVolumeInfo{ - Targets: targets, - Lun: lun, - }) -} - -func (s *SCSIStager) connectNVMEDevice(ctx context.Context, - wwn string, data scsiPublishContextData, useFC bool, -) (gobrick.Device, error) { - logFields := common.GetLogFields(ctx) - var targets []gobrick.NVMeTargetInfo - - if useFC { - for _, t := range data.nvmefcTargets { - targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) - } - } else { - for _, t := range data.nvmetcpTargets { - targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) - } - } - // separate context to prevent 15 seconds cancel from kubernetes - connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) - defer cFunc() - - connectorCtx = common.SetLogFields(connectorCtx, logFields) - return s.nvmeConnector.ConnectVolume(connectorCtx, gobrick.NVMeVolumeInfo{ - Targets: targets, - WWN: wwn, - }, useFC) -} - -func (s *SCSIStager) connectFCDevice(ctx context.Context, - lun int, data scsiPublishContextData, -) (gobrick.Device, error) { - logFields := common.GetLogFields(ctx) - var targets []gobrick.FCTargetInfo - - for _, t := range data.fcTargets { - targets = append(targets, gobrick.FCTargetInfo{WWPN: t.WWPN}) - } - // separate context to prevent 15 seconds cancel from kubernetes - connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) - defer cFunc() - - connectorCtx = common.SetLogFields(connectorCtx, logFields) - return s.fcConnector.ConnectVolume(connectorCtx, gobrick.FCVolumeInfo{ - Targets: targets, - Lun: lun, - }) -} - -func isReadyToPublish(ctx context.Context, stagingPath string, fs fs.Interface) (bool, bool, error) { - logFields := common.GetLogFields(ctx) - stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) - if err != nil { - return found, false, err - } - if !found { - log.WithFields(logFields).Warning("staged device not found") - return found, false, nil - } - - if strings.HasSuffix(stageInfo.Source, "deleted") { - log.WithFields(logFields).Warning("staged device linked with deleted path") - return found, false, nil - } - - devFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) - if err != nil { - return found, false, err - } - return found, devFS != "mpath_member", nil -} - -func isReadyToPublishNFS(ctx context.Context, stagingPath string, fs fs.Interface) (bool, error) { - logFields := common.GetLogFields(ctx) - stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) - if err != nil { - return found, err - } - if !found { - log.WithFields(logFields).Warning("staged device not found") - return found, nil - } - - if strings.HasSuffix(stageInfo.Source, "deleted") { - log.WithFields(logFields).Warning("staged device linked with deleted path") - return found, nil - } - - return found, nil -} + package node + + import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/dell/csi-powerstore/v2/pkg/array" + "github.com/dell/csi-powerstore/v2/pkg/common" + "github.com/dell/csi-powerstore/v2/pkg/common/fs" + "github.com/dell/gobrick" + "github.com/dell/gopowerstore" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + ) + + const ( + procMountsPath = "/proc/self/mountinfo" + procMountsRetries = 15 + ) + + // VolumeStager allows to node stage a volume + type VolumeStager interface { + Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, logFields log.Fields, fs fs.Interface, id string, isRemote bool) (*csi.NodeStageVolumeResponse, error) + } + + // ReachableEndPoint checks if the endpoint is reachable or not + var ReachableEndPoint = common.ReachableEndPoint + + // SCSIStager implementation of NodeVolumeStager for SCSI based (FC, iSCSI) volumes + type SCSIStager struct { + useFC bool + useNVME bool + iscsiConnector ISCSIConnector + nvmeConnector NVMEConnector + fcConnector FcConnector + } + + // Stage stages volume by connecting it through either FC or iSCSI and creating bind mount to staging path + func (s *SCSIStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, + logFields log.Fields, fs fs.Interface, id string, isRemote bool, + ) (*csi.NodeStageVolumeResponse, error) { + // append additional path to be able to do bind mounts + stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) + + publishContext, err := readSCSIInfoFromPublishContext(req.PublishContext, s.useFC, s.useNVME, isRemote) + if err != nil { + return nil, err + } + + logFields["ID"] = id + if s.useNVME { + if s.useFC { + logFields["Targets"] = publishContext.nvmefcTargets + } else { + logFields["Targets"] = publishContext.nvmetcpTargets + } + } else { + logFields["Targets"] = publishContext.iscsiTargets + } + logFields["WWN"] = publishContext.deviceWWN + logFields["Lun"] = publishContext.volumeLUNAddress + logFields["StagingPath"] = stagingPath + ctx = common.SetLogFields(ctx, logFields) + + found, ready, err := isReadyToPublish(ctx, stagingPath, fs) + if err != nil { + return nil, err + } + if ready { + log.WithFields(logFields).Info("device already staged") + return &csi.NodeStageVolumeResponse{}, nil + } else if found { + log.WithFields(logFields).Warning("volume found in staging path but it is not ready for publish," + + "try to unmount it and retry staging again") + _, err := unstageVolume(ctx, stagingPath, id, logFields, err, fs) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to unmount volume: %s", err.Error()) + } + } + + devicePath, err := s.connectDevice(ctx, publishContext) + if err != nil { + return nil, err + } + + logFields["DevicePath"] = devicePath + + log.WithFields(logFields).Info("start staging") + if _, err := fs.MkFileIdempotent(stagingPath); err != nil { + return nil, status.Errorf(codes.Internal, "can't create target file %s: %s", + stagingPath, err.Error()) + } + log.WithFields(logFields).Info("target path successfully created") + + mntFlags := common.GetMountFlags(req.GetVolumeCapability()) + if err := fs.GetUtil().BindMount(ctx, devicePath, stagingPath, mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error bind disk %s to target path: %s", devicePath, err.Error()) + } + + log.WithFields(logFields).Info("stage complete") + return &csi.NodeStageVolumeResponse{}, nil + } + + // NFSStager implementation of NodeVolumeStager for NFS volumes + type NFSStager struct { + array *array.PowerStoreArray + } + + // Stage stages volume by mounting volumes as nfs to the staging path + func (n *NFSStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, + logFields log.Fields, fs fs.Interface, id string, _ bool, + ) (*csi.NodeStageVolumeResponse, error) { + // append additional path to be able to do bind mounts + stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) + + hostIP := req.PublishContext[common.KeyHostIP] + exportID := req.PublishContext[common.KeyExportID] + nfsExport := req.PublishContext[common.KeyNfsExportPath] + allowRoot := req.PublishContext[common.KeyAllowRoot] + nasName := req.PublishContext[common.KeyNasName] + + natIP := "" + if ip, ok := req.PublishContext[common.KeyNatIP]; ok { + natIP = ip + } + + logFields["NfsExportPath"] = nfsExport + logFields["StagingPath"] = req.GetStagingTargetPath() + logFields["ID"] = id + logFields["AllowRoot"] = allowRoot + logFields["ExportID"] = exportID + logFields["HostIP"] = hostIP + logFields["NatIP"] = natIP + logFields["NFSv4ACLs"] = req.PublishContext[common.KeyNfsACL] + logFields["NasName"] = nasName + ctx = common.SetLogFields(ctx, logFields) + + found, err := isReadyToPublishNFS(ctx, stagingPath, fs) + if err != nil { + return nil, err + } + + if found { + log.WithFields(logFields).Info("device already staged") + return &csi.NodeStageVolumeResponse{}, nil + } + + if err := fs.MkdirAll(stagingPath, 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target folder %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("stage path successfully created") + + mntFlags := common.GetMountFlags(req.GetVolumeCapability()) + if err := fs.GetUtil().Mount(ctx, nfsExport, stagingPath, "", mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error mount nfs share %s to target path: %s", nfsExport, err.Error()) + } + + // Create folder with 1777 in nfs share so every user can use it + if err := fs.MkdirAll(filepath.Join(stagingPath, commonNfsVolumeFolder), 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create common folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) + } + + mode := os.ModePerm + acls := req.PublishContext[common.KeyNfsACL] + aclsConfigured := false + if acls != "" { + if posixMode(acls) { + perm, err := strconv.ParseUint(acls, 8, 32) + if err == nil { + mode = os.FileMode(perm) // #nosec: G115 false positive + } else { + log.WithFields(logFields).Warn("can't parse file mode, invalid mode specified. Default mode permissions will be set.") + } + } else { + aclsConfigured, err = validateAndSetACLs(ctx, &NFSv4ACLs{}, nasName, n.array.GetClient(), acls, filepath.Join(stagingPath, commonNfsVolumeFolder)) + if err != nil || !aclsConfigured { + return nil, err + } + } + } + + if !aclsConfigured { + if err := fs.Chmod(filepath.Join(stagingPath, commonNfsVolumeFolder), os.ModeSticky|mode); err != nil { + return nil, status.Errorf(codes.Internal, + "can't change permissions of folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) + } + } + + if allowRoot == "false" { + log.WithFields(logFields).Info("removing allow root from nfs export") + var hostsToRemove []string + var hostsToAdd []string + + hostsToRemove = append(hostsToRemove, hostIP+"/255.255.255.255") + hostsToAdd = append(hostsToAdd, hostIP) + + if natIP != "" { + hostsToRemove = append(hostsToRemove, natIP) + hostsToAdd = append(hostsToAdd, natIP) + } + + // Modify NFS export to RW with `root_squashing` + _, err = n.array.GetClient().ModifyNFSExport(ctx, &gopowerstore.NFSExportModify{ + RemoveRWRootHosts: hostsToRemove, + AddRWHosts: hostsToAdd, + }, exportID) + if err != nil { + if apiError, ok := err.(gopowerstore.APIError); !(ok && apiError.NotFound()) { + return nil, status.Errorf(codes.Internal, "failure when modifying nfs export: %s", err.Error()) + } + } + } + + log.WithFields(logFields).Info("nfs share successfully mounted") + return &csi.NodeStageVolumeResponse{}, nil + } + + type scsiPublishContextData struct { + deviceWWN string + volumeLUNAddress string + iscsiTargets []gobrick.ISCSITargetInfo + nvmetcpTargets []gobrick.NVMeTargetInfo + nvmefcTargets []gobrick.NVMeTargetInfo + fcTargets []gobrick.FCTargetInfo + } + + func readSCSIInfoFromPublishContext(publishContext map[string]string, useFC bool, useNVMe bool, isRemote bool) (scsiPublishContextData, error) { + // Get publishContext + var data scsiPublishContextData + deviceWwnKey := common.PublishContextDeviceWWN + lunAddressKey := common.PublishContextLUNAddress + if isRemote { + deviceWwnKey = common.PublishContextRemoteDeviceWWN + lunAddressKey = common.PublishContextRemoteLUNAddress + } + + deviceWWN, ok := publishContext[deviceWwnKey] + if !ok { + return data, status.Error(codes.InvalidArgument, "deviceWWN must be in publish context") + } + volumeLUNAddress, ok := publishContext[lunAddressKey] + if !ok { + return data, status.Error(codes.InvalidArgument, "volumeLUNAddress must be in publish context") + } + + iscsiTargets := readISCSITargetsFromPublishContext(publishContext, isRemote) + if len(iscsiTargets) == 0 && !useFC && !useNVMe { + return data, status.Error(codes.InvalidArgument, "iscsiTargets data must be in publish context") + } + nvmeTCPTargets := readNVMETCPTargetsFromPublishContext(publishContext, isRemote) + if len(nvmeTCPTargets) == 0 && useNVMe && !useFC { + return data, status.Error(codes.InvalidArgument, "NVMeTCP Targets data must be in publish context") + } + nvmeFCTargets := readNVMEFCTargetsFromPublishContext(publishContext, isRemote) + if len(nvmeFCTargets) == 0 && useNVMe && useFC { + return data, status.Error(codes.InvalidArgument, "NVMeFC Targets data must be in publish context") + } + fcTargets := readFCTargetsFromPublishContext(publishContext, isRemote) + if len(fcTargets) == 0 && useFC && !useNVMe { + return data, status.Error(codes.InvalidArgument, "fcTargets data must be in publish context") + } + return scsiPublishContextData{ + deviceWWN: deviceWWN, volumeLUNAddress: volumeLUNAddress, + iscsiTargets: iscsiTargets, nvmetcpTargets: nvmeTCPTargets, nvmefcTargets: nvmeFCTargets, fcTargets: fcTargets, + }, nil + } + + func readISCSITargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.ISCSITargetInfo { + var targets []gobrick.ISCSITargetInfo + iscsiTargetsKey := common.PublishContextISCSITargetsPrefix + iscsiPortalsKey := common.PublishContextISCSIPortalsPrefix + if isRemote { + iscsiTargetsKey = common.PublishContextRemoteISCSITargetsPrefix + iscsiPortalsKey = common.PublishContextRemoteISCSIPortalsPrefix + } + for i := 0; ; i++ { + target := gobrick.ISCSITargetInfo{} + t, tfound := pc[fmt.Sprintf("%s%d", iscsiTargetsKey, i)] + if tfound { + target.Target = t + } + p, pfound := pc[fmt.Sprintf("%s%d", iscsiPortalsKey, i)] + if pfound { + target.Portal = p + } + if !tfound || !pfound { + break + } + + if ReachableEndPoint(p) { + // if the portals from the context (set in ControllerPublishVolume) is not reachable from the nodes + targets = append(targets, target) + } + } + log.Infof("iSCSI iscsiTargets from context: %v", targets) + return targets + } + + func readNVMETCPTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { + var targets []gobrick.NVMeTargetInfo + nvmeTCPTargetsKey := common.PublishContextNVMETCPTargetsPrefix + nvmeTCPPortalsKey := common.PublishContextNVMETCPPortalsPrefix + if isRemote { + nvmeTCPTargetsKey = common.PublishContextRemoteNVMETCPTargetsPrefix + nvmeTCPPortalsKey = common.PublishContextRemoteNVMETCPPortalsPrefix + } + for i := 0; ; i++ { + target := gobrick.NVMeTargetInfo{} + t, tfound := pc[fmt.Sprintf("%s%d", nvmeTCPTargetsKey, i)] + if tfound { + target.Target = t + } + p, pfound := pc[fmt.Sprintf("%s%d", nvmeTCPPortalsKey, i)] + if pfound { + target.Portal = p + } + if !tfound || !pfound { + break + } + targets = append(targets, target) + } + log.Infof("NVMeTCP Targets from context: %v", targets) + return targets + } + + func readNVMEFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { + var targets []gobrick.NVMeTargetInfo + nvmeFcTargetsKey := common.PublishContextNVMEFCTargetsPrefix + nvmeFcPortalsKey := common.PublishContextNVMEFCPortalsPrefix + if isRemote { + nvmeFcTargetsKey = common.PublishContextRemoteNVMEFCTargetsPrefix + nvmeFcPortalsKey = common.PublishContextRemoteNVMEFCPortalsPrefix + } + for i := 0; ; i++ { + target := gobrick.NVMeTargetInfo{} + t, tfound := pc[fmt.Sprintf("%s%d", nvmeFcTargetsKey, i)] + if tfound { + target.Target = t + } + p, pfound := pc[fmt.Sprintf("%s%d", nvmeFcPortalsKey, i)] + if pfound { + target.Portal = p + } + if !tfound || !pfound { + break + } + targets = append(targets, target) + } + log.Infof("NVMeFC Targets from context: %v", targets) + return targets + } + + func readFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.FCTargetInfo { + var targets []gobrick.FCTargetInfo + fcWwpnKey := common.PublishContextFCWWPNPrefix + if isRemote { + fcWwpnKey = common.PublishContextRemoteFCWWPNPrefix + } + for i := 0; ; i++ { + wwpn, tfound := pc[fmt.Sprintf("%s%d", fcWwpnKey, i)] + if !tfound { + break + } + targets = append(targets, gobrick.FCTargetInfo{WWPN: wwpn}) + } + log.Infof("FC iscsiTargets from context: %v", targets) + return targets + } + + func (s *SCSIStager) connectDevice(ctx context.Context, data scsiPublishContextData) (string, error) { + logFields := common.GetLogFields(ctx) + var err error + lun, err := strconv.Atoi(data.volumeLUNAddress) + if err != nil { + log.WithFields(logFields).Errorf("failed to convert lun number to int: %s", err.Error()) + return "", status.Errorf(codes.Internal, + "failed to convert lun number to int: %s", err.Error()) + } + wwn := data.deviceWWN + var device gobrick.Device + if s.useNVME { + device, err = s.connectNVMEDevice(ctx, wwn, data, s.useFC) + } else if s.useFC { + device, err = s.connectFCDevice(ctx, lun, data) + } else { + device, err = s.connectISCSIDevice(ctx, lun, data) + } + + if err != nil { + log.WithFields(logFields).Errorf("Unable to find device after multiple discovery attempts: %s", err.Error()) + return "", status.Errorf(codes.Internal, + "unable to find device after multiple discovery attempts: %s", err.Error()) + } + devicePath := path.Join("/dev/", device.Name) + return devicePath, nil + } + + func (s *SCSIStager) connectISCSIDevice(ctx context.Context, + lun int, data scsiPublishContextData, + ) (gobrick.Device, error) { + logFields := common.GetLogFields(ctx) + var targets []gobrick.ISCSITargetInfo + for _, t := range data.iscsiTargets { + targets = append(targets, gobrick.ISCSITargetInfo{Target: t.Target, Portal: t.Portal}) + } + // separate context to prevent 15 seconds cancel from kubernetes + connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) + defer cFunc() + + connectorCtx = common.SetLogFields(connectorCtx, logFields) + return s.iscsiConnector.ConnectVolume(connectorCtx, gobrick.ISCSIVolumeInfo{ + Targets: targets, + Lun: lun, + }) + } + + func (s *SCSIStager) connectNVMEDevice(ctx context.Context, + wwn string, data scsiPublishContextData, useFC bool, + ) (gobrick.Device, error) { + logFields := common.GetLogFields(ctx) + var targets []gobrick.NVMeTargetInfo + + if useFC { + for _, t := range data.nvmefcTargets { + targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) + } + } else { + for _, t := range data.nvmetcpTargets { + targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) + } + } + // separate context to prevent 15 seconds cancel from kubernetes + connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) + defer cFunc() + + connectorCtx = common.SetLogFields(connectorCtx, logFields) + return s.nvmeConnector.ConnectVolume(connectorCtx, gobrick.NVMeVolumeInfo{ + Targets: targets, + WWN: wwn, + }, useFC) + } + + func (s *SCSIStager) connectFCDevice(ctx context.Context, + lun int, data scsiPublishContextData, + ) (gobrick.Device, error) { + logFields := common.GetLogFields(ctx) + var targets []gobrick.FCTargetInfo + + for _, t := range data.fcTargets { + targets = append(targets, gobrick.FCTargetInfo{WWPN: t.WWPN}) + } + // separate context to prevent 15 seconds cancel from kubernetes + connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) + defer cFunc() + + connectorCtx = common.SetLogFields(connectorCtx, logFields) + return s.fcConnector.ConnectVolume(connectorCtx, gobrick.FCVolumeInfo{ + Targets: targets, + Lun: lun, + }) + } + + func isReadyToPublish(ctx context.Context, stagingPath string, fs fs.Interface) (bool, bool, error) { + logFields := common.GetLogFields(ctx) + stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) + if err != nil { + return found, false, err + } + if !found { + log.WithFields(logFields).Warning("staged device not found") + return found, false, nil + } + + if strings.HasSuffix(stageInfo.Source, "deleted") { + log.WithFields(logFields).Warning("staged device linked with deleted path") + return found, false, nil + } + + devFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) + if err != nil { + return found, false, err + } + return found, devFS != "mpath_member", nil + } + + func isReadyToPublishNFS(ctx context.Context, stagingPath string, fs fs.Interface) (bool, error) { + logFields := common.GetLogFields(ctx) + stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) + if err != nil { + return found, err + } + if !found { + log.WithFields(logFields).Warning("staged device not found") + return found, nil + } + + if strings.HasSuffix(stageInfo.Source, "deleted") { + log.WithFields(logFields).Warning("staged device linked with deleted path") + return found, nil + } + + return found, nil + } + \ No newline at end of file From 6bae367f7d103f930e5e941b750c3c94544946cb Mon Sep 17 00:00:00 2001 From: Akshay Saini Date: Tue, 12 Nov 2024 12:49:54 +0530 Subject: [PATCH 2/8] Fix whitespace issue --- pkg/node/publisher.go | 333 +++++++------ pkg/node/stager.go | 1031 ++++++++++++++++++++--------------------- 2 files changed, 681 insertions(+), 683 deletions(-) diff --git a/pkg/node/publisher.go b/pkg/node/publisher.go index 3ab6159e..805806bc 100644 --- a/pkg/node/publisher.go +++ b/pkg/node/publisher.go @@ -16,170 +16,169 @@ * */ - package node - - import ( - "context" - - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/dell/csi-powerstore/v2/pkg/common" - "github.com/dell/csi-powerstore/v2/pkg/common/fs" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - ) - - // VolumePublisher allows to node publish a volume - type VolumePublisher interface { - Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, - cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) - } - - // SCSIPublisher implementation of NodeVolumePublisher for SCSI based (FC, iSCSI) volumes - type SCSIPublisher struct { - isBlock bool - } - - // Publish publishes volume as either raw block or mount by mounting it to the target path - func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { - published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) - if err != nil { - return nil, err - } - - if published { - return &csi.NodePublishVolumeResponse{}, nil - } - - if sp.isBlock { - return sp.publishBlock(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) - } - return sp.publishMount(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) - } - - func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, fs fs.Interface, _ *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { - log.WithFields(logFields).Info("start publishing as block device") - - if isRO { - return nil, status.Error(codes.InvalidArgument, "read only not supported for Block Volume") - } - - if _, err := fs.MkFileIdempotent(targetPath); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target file %s: %s", targetPath, err.Error()) - } - log.WithFields(logFields).Info("target path successfully created") - - if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath); err != nil { - return nil, status.Errorf(codes.Internal, - "error bind disk %s to target path: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("volume successfully binded") - - return &csi.NodePublishVolumeResponse{}, nil - } - - func (sp *SCSIPublisher) publishMount(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { - if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { - // MULTI_WRITER not supported for mount volumes - return nil, status.Error(codes.Unimplemented, "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER") - } - - if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { - // Warning in case of MULTI_NODE_READER_ONLY for mount volumes - log.Warningf("Mount volume with the AccessMode ReadOnlyMany") - } - - var opts []string - mountCap := cap.GetMount() - mountFsType := mountCap.GetFsType() - mntFlags := common.GetMountFlags(cap) - if mountFsType == "xfs" { - mntFlags = append(mntFlags, "nouuid") - } - targetFS := mountCap.GetFsType() - if targetFS == "xfs" { - opts = []string{"-m", "crc=0,finobt=0"} - } - if err := fs.MkdirAll(targetPath, 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target dir with Mkdirall %s: %s", targetPath, err.Error()) - } - - log.WithFields(logFields).Info("target dir successfully created") - - curFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) - if err != nil { - return nil, status.Errorf(codes.Internal, - "error while trying to detect fs for staging path %s: %s", stagingPath, err.Error()) - } - - if curFS != "" && targetFS != "" && curFS != targetFS { - return nil, status.Errorf(codes.FailedPrecondition, - "filesystem mismatch. Target device already formatted to %s mount spec require %s", - curFS, targetFS) - } - - if curFS == "" { - log.WithFields(logFields).Infof("no filesystem found on staged disk %s", stagingPath) - if isRO { - return nil, status.Errorf(codes.FailedPrecondition, - "RO mount required but no fs detected on staged volume %s", stagingPath) - } - - if err := format(ctx, stagingPath, targetFS, fs, opts...); err != nil { - return nil, status.Errorf(codes.Internal, - "can't format staged device %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Infof("staged disk %s successfully formatted to %s", stagingPath, targetFS) - } - if isRO { - mntFlags = append(mntFlags, "ro") - } - - if err := fs.GetUtil().Mount(ctx, stagingPath, targetPath, targetFS, mntFlags...); err != nil { - return nil, status.Errorf(codes.Internal, - "error performing mount for staging path %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("volume successfully mounted") - - return &csi.NodePublishVolumeResponse{}, nil - } - - // NFSPublisher implementation of NodeVolumePublisher for NFS volumes - type NFSPublisher struct{} - - // Publish publishes nfs volume by mounting it to the target path - func (np *NFSPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, - cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string, - ) (*csi.NodePublishVolumeResponse, error) { - published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) - if err != nil { - return nil, err - } - - if published { - return &csi.NodePublishVolumeResponse{}, nil - } - - if err := fs.MkdirAll(targetPath, 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target folder %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("target path successfully created") - - mntFlags := common.GetMountFlags(cap) - - if isRO { - mntFlags = append(mntFlags, "ro") - } - - if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath, mntFlags...); err != nil { - return nil, status.Errorf(codes.Internal, - "error bind disk %s to target path: %s", stagingPath, err.Error()) - } - - log.WithFields(logFields).Info("volume successfully binded") - return &csi.NodePublishVolumeResponse{}, nil - } - \ No newline at end of file +package node + +import ( + "context" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/dell/csi-powerstore/v2/pkg/common" + "github.com/dell/csi-powerstore/v2/pkg/common/fs" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// VolumePublisher allows to node publish a volume +type VolumePublisher interface { + Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, + cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) +} + +// SCSIPublisher implementation of NodeVolumePublisher for SCSI based (FC, iSCSI) volumes +type SCSIPublisher struct { + isBlock bool +} + +// Publish publishes volume as either raw block or mount by mounting it to the target path +func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { + published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) + if err != nil { + return nil, err + } + + if published { + return &csi.NodePublishVolumeResponse{}, nil + } + + if sp.isBlock { + return sp.publishBlock(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) + } + return sp.publishMount(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) +} + +func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, fs fs.Interface, _ *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { + log.WithFields(logFields).Info("start publishing as block device") + + if isRO { + return nil, status.Error(codes.InvalidArgument, "read only not supported for Block Volume") + } + + if _, err := fs.MkFileIdempotent(targetPath); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target file %s: %s", targetPath, err.Error()) + } + log.WithFields(logFields).Info("target path successfully created") + + if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath); err != nil { + return nil, status.Errorf(codes.Internal, + "error bind disk %s to target path: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("volume successfully binded") + + return &csi.NodePublishVolumeResponse{}, nil +} + +func (sp *SCSIPublisher) publishMount(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { + if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { + // MULTI_WRITER not supported for mount volumes + return nil, status.Error(codes.Unimplemented, "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER") + } + + if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { + // Warning in case of MULTI_NODE_READER_ONLY for mount volumes + log.Warningf("Mount volume with the AccessMode ReadOnlyMany") + } + + var opts []string + mountCap := cap.GetMount() + mountFsType := mountCap.GetFsType() + mntFlags := common.GetMountFlags(cap) + if mountFsType == "xfs" { + mntFlags = append(mntFlags, "nouuid") + } + targetFS := mountCap.GetFsType() + if targetFS == "xfs" { + opts = []string{"-m", "crc=0,finobt=0"} + } + if err := fs.MkdirAll(targetPath, 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target dir with Mkdirall %s: %s", targetPath, err.Error()) + } + + log.WithFields(logFields).Info("target dir successfully created") + + curFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) + if err != nil { + return nil, status.Errorf(codes.Internal, + "error while trying to detect fs for staging path %s: %s", stagingPath, err.Error()) + } + + if curFS != "" && targetFS != "" && curFS != targetFS { + return nil, status.Errorf(codes.FailedPrecondition, + "filesystem mismatch. Target device already formatted to %s mount spec require %s", + curFS, targetFS) + } + + if curFS == "" { + log.WithFields(logFields).Infof("no filesystem found on staged disk %s", stagingPath) + if isRO { + return nil, status.Errorf(codes.FailedPrecondition, + "RO mount required but no fs detected on staged volume %s", stagingPath) + } + + if err := format(ctx, stagingPath, targetFS, fs, opts...); err != nil { + return nil, status.Errorf(codes.Internal, + "can't format staged device %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Infof("staged disk %s successfully formatted to %s", stagingPath, targetFS) + } + if isRO { + mntFlags = append(mntFlags, "ro") + } + + if err := fs.GetUtil().Mount(ctx, stagingPath, targetPath, targetFS, mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error performing mount for staging path %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("volume successfully mounted") + + return &csi.NodePublishVolumeResponse{}, nil +} + +// NFSPublisher implementation of NodeVolumePublisher for NFS volumes +type NFSPublisher struct{} + +// Publish publishes nfs volume by mounting it to the target path +func (np *NFSPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, + cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string, +) (*csi.NodePublishVolumeResponse, error) { + published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) + if err != nil { + return nil, err + } + + if published { + return &csi.NodePublishVolumeResponse{}, nil + } + + if err := fs.MkdirAll(targetPath, 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target folder %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("target path successfully created") + + mntFlags := common.GetMountFlags(cap) + + if isRO { + mntFlags = append(mntFlags, "ro") + } + + if err := fs.GetUtil().BindMount(ctx, stagingPath, targetPath, mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error bind disk %s to target path: %s", stagingPath, err.Error()) + } + + log.WithFields(logFields).Info("volume successfully binded") + return &csi.NodePublishVolumeResponse{}, nil +} diff --git a/pkg/node/stager.go b/pkg/node/stager.go index 3bbdf07a..e385ff46 100644 --- a/pkg/node/stager.go +++ b/pkg/node/stager.go @@ -16,519 +16,518 @@ * */ - package node - - import ( - "context" - "fmt" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/dell/csi-powerstore/v2/pkg/array" - "github.com/dell/csi-powerstore/v2/pkg/common" - "github.com/dell/csi-powerstore/v2/pkg/common/fs" - "github.com/dell/gobrick" - "github.com/dell/gopowerstore" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - ) - - const ( - procMountsPath = "/proc/self/mountinfo" - procMountsRetries = 15 - ) - - // VolumeStager allows to node stage a volume - type VolumeStager interface { - Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, logFields log.Fields, fs fs.Interface, id string, isRemote bool) (*csi.NodeStageVolumeResponse, error) - } - - // ReachableEndPoint checks if the endpoint is reachable or not - var ReachableEndPoint = common.ReachableEndPoint - - // SCSIStager implementation of NodeVolumeStager for SCSI based (FC, iSCSI) volumes - type SCSIStager struct { - useFC bool - useNVME bool - iscsiConnector ISCSIConnector - nvmeConnector NVMEConnector - fcConnector FcConnector - } - - // Stage stages volume by connecting it through either FC or iSCSI and creating bind mount to staging path - func (s *SCSIStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, - logFields log.Fields, fs fs.Interface, id string, isRemote bool, - ) (*csi.NodeStageVolumeResponse, error) { - // append additional path to be able to do bind mounts - stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) - - publishContext, err := readSCSIInfoFromPublishContext(req.PublishContext, s.useFC, s.useNVME, isRemote) - if err != nil { - return nil, err - } - - logFields["ID"] = id - if s.useNVME { - if s.useFC { - logFields["Targets"] = publishContext.nvmefcTargets - } else { - logFields["Targets"] = publishContext.nvmetcpTargets - } - } else { - logFields["Targets"] = publishContext.iscsiTargets - } - logFields["WWN"] = publishContext.deviceWWN - logFields["Lun"] = publishContext.volumeLUNAddress - logFields["StagingPath"] = stagingPath - ctx = common.SetLogFields(ctx, logFields) - - found, ready, err := isReadyToPublish(ctx, stagingPath, fs) - if err != nil { - return nil, err - } - if ready { - log.WithFields(logFields).Info("device already staged") - return &csi.NodeStageVolumeResponse{}, nil - } else if found { - log.WithFields(logFields).Warning("volume found in staging path but it is not ready for publish," + - "try to unmount it and retry staging again") - _, err := unstageVolume(ctx, stagingPath, id, logFields, err, fs) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmount volume: %s", err.Error()) - } - } - - devicePath, err := s.connectDevice(ctx, publishContext) - if err != nil { - return nil, err - } - - logFields["DevicePath"] = devicePath - - log.WithFields(logFields).Info("start staging") - if _, err := fs.MkFileIdempotent(stagingPath); err != nil { - return nil, status.Errorf(codes.Internal, "can't create target file %s: %s", - stagingPath, err.Error()) - } - log.WithFields(logFields).Info("target path successfully created") - - mntFlags := common.GetMountFlags(req.GetVolumeCapability()) - if err := fs.GetUtil().BindMount(ctx, devicePath, stagingPath, mntFlags...); err != nil { - return nil, status.Errorf(codes.Internal, - "error bind disk %s to target path: %s", devicePath, err.Error()) - } - - log.WithFields(logFields).Info("stage complete") - return &csi.NodeStageVolumeResponse{}, nil - } - - // NFSStager implementation of NodeVolumeStager for NFS volumes - type NFSStager struct { - array *array.PowerStoreArray - } - - // Stage stages volume by mounting volumes as nfs to the staging path - func (n *NFSStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, - logFields log.Fields, fs fs.Interface, id string, _ bool, - ) (*csi.NodeStageVolumeResponse, error) { - // append additional path to be able to do bind mounts - stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) - - hostIP := req.PublishContext[common.KeyHostIP] - exportID := req.PublishContext[common.KeyExportID] - nfsExport := req.PublishContext[common.KeyNfsExportPath] - allowRoot := req.PublishContext[common.KeyAllowRoot] - nasName := req.PublishContext[common.KeyNasName] - - natIP := "" - if ip, ok := req.PublishContext[common.KeyNatIP]; ok { - natIP = ip - } - - logFields["NfsExportPath"] = nfsExport - logFields["StagingPath"] = req.GetStagingTargetPath() - logFields["ID"] = id - logFields["AllowRoot"] = allowRoot - logFields["ExportID"] = exportID - logFields["HostIP"] = hostIP - logFields["NatIP"] = natIP - logFields["NFSv4ACLs"] = req.PublishContext[common.KeyNfsACL] - logFields["NasName"] = nasName - ctx = common.SetLogFields(ctx, logFields) - - found, err := isReadyToPublishNFS(ctx, stagingPath, fs) - if err != nil { - return nil, err - } - - if found { - log.WithFields(logFields).Info("device already staged") - return &csi.NodeStageVolumeResponse{}, nil - } - - if err := fs.MkdirAll(stagingPath, 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create target folder %s: %s", stagingPath, err.Error()) - } - log.WithFields(logFields).Info("stage path successfully created") - - mntFlags := common.GetMountFlags(req.GetVolumeCapability()) - if err := fs.GetUtil().Mount(ctx, nfsExport, stagingPath, "", mntFlags...); err != nil { - return nil, status.Errorf(codes.Internal, - "error mount nfs share %s to target path: %s", nfsExport, err.Error()) - } - - // Create folder with 1777 in nfs share so every user can use it - if err := fs.MkdirAll(filepath.Join(stagingPath, commonNfsVolumeFolder), 0o750); err != nil { - return nil, status.Errorf(codes.Internal, - "can't create common folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) - } - - mode := os.ModePerm - acls := req.PublishContext[common.KeyNfsACL] - aclsConfigured := false - if acls != "" { - if posixMode(acls) { - perm, err := strconv.ParseUint(acls, 8, 32) - if err == nil { - mode = os.FileMode(perm) // #nosec: G115 false positive - } else { - log.WithFields(logFields).Warn("can't parse file mode, invalid mode specified. Default mode permissions will be set.") - } - } else { - aclsConfigured, err = validateAndSetACLs(ctx, &NFSv4ACLs{}, nasName, n.array.GetClient(), acls, filepath.Join(stagingPath, commonNfsVolumeFolder)) - if err != nil || !aclsConfigured { - return nil, err - } - } - } - - if !aclsConfigured { - if err := fs.Chmod(filepath.Join(stagingPath, commonNfsVolumeFolder), os.ModeSticky|mode); err != nil { - return nil, status.Errorf(codes.Internal, - "can't change permissions of folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) - } - } - - if allowRoot == "false" { - log.WithFields(logFields).Info("removing allow root from nfs export") - var hostsToRemove []string - var hostsToAdd []string - - hostsToRemove = append(hostsToRemove, hostIP+"/255.255.255.255") - hostsToAdd = append(hostsToAdd, hostIP) - - if natIP != "" { - hostsToRemove = append(hostsToRemove, natIP) - hostsToAdd = append(hostsToAdd, natIP) - } - - // Modify NFS export to RW with `root_squashing` - _, err = n.array.GetClient().ModifyNFSExport(ctx, &gopowerstore.NFSExportModify{ - RemoveRWRootHosts: hostsToRemove, - AddRWHosts: hostsToAdd, - }, exportID) - if err != nil { - if apiError, ok := err.(gopowerstore.APIError); !(ok && apiError.NotFound()) { - return nil, status.Errorf(codes.Internal, "failure when modifying nfs export: %s", err.Error()) - } - } - } - - log.WithFields(logFields).Info("nfs share successfully mounted") - return &csi.NodeStageVolumeResponse{}, nil - } - - type scsiPublishContextData struct { - deviceWWN string - volumeLUNAddress string - iscsiTargets []gobrick.ISCSITargetInfo - nvmetcpTargets []gobrick.NVMeTargetInfo - nvmefcTargets []gobrick.NVMeTargetInfo - fcTargets []gobrick.FCTargetInfo - } - - func readSCSIInfoFromPublishContext(publishContext map[string]string, useFC bool, useNVMe bool, isRemote bool) (scsiPublishContextData, error) { - // Get publishContext - var data scsiPublishContextData - deviceWwnKey := common.PublishContextDeviceWWN - lunAddressKey := common.PublishContextLUNAddress - if isRemote { - deviceWwnKey = common.PublishContextRemoteDeviceWWN - lunAddressKey = common.PublishContextRemoteLUNAddress - } - - deviceWWN, ok := publishContext[deviceWwnKey] - if !ok { - return data, status.Error(codes.InvalidArgument, "deviceWWN must be in publish context") - } - volumeLUNAddress, ok := publishContext[lunAddressKey] - if !ok { - return data, status.Error(codes.InvalidArgument, "volumeLUNAddress must be in publish context") - } - - iscsiTargets := readISCSITargetsFromPublishContext(publishContext, isRemote) - if len(iscsiTargets) == 0 && !useFC && !useNVMe { - return data, status.Error(codes.InvalidArgument, "iscsiTargets data must be in publish context") - } - nvmeTCPTargets := readNVMETCPTargetsFromPublishContext(publishContext, isRemote) - if len(nvmeTCPTargets) == 0 && useNVMe && !useFC { - return data, status.Error(codes.InvalidArgument, "NVMeTCP Targets data must be in publish context") - } - nvmeFCTargets := readNVMEFCTargetsFromPublishContext(publishContext, isRemote) - if len(nvmeFCTargets) == 0 && useNVMe && useFC { - return data, status.Error(codes.InvalidArgument, "NVMeFC Targets data must be in publish context") - } - fcTargets := readFCTargetsFromPublishContext(publishContext, isRemote) - if len(fcTargets) == 0 && useFC && !useNVMe { - return data, status.Error(codes.InvalidArgument, "fcTargets data must be in publish context") - } - return scsiPublishContextData{ - deviceWWN: deviceWWN, volumeLUNAddress: volumeLUNAddress, - iscsiTargets: iscsiTargets, nvmetcpTargets: nvmeTCPTargets, nvmefcTargets: nvmeFCTargets, fcTargets: fcTargets, - }, nil - } - - func readISCSITargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.ISCSITargetInfo { - var targets []gobrick.ISCSITargetInfo - iscsiTargetsKey := common.PublishContextISCSITargetsPrefix - iscsiPortalsKey := common.PublishContextISCSIPortalsPrefix - if isRemote { - iscsiTargetsKey = common.PublishContextRemoteISCSITargetsPrefix - iscsiPortalsKey = common.PublishContextRemoteISCSIPortalsPrefix - } - for i := 0; ; i++ { - target := gobrick.ISCSITargetInfo{} - t, tfound := pc[fmt.Sprintf("%s%d", iscsiTargetsKey, i)] - if tfound { - target.Target = t - } - p, pfound := pc[fmt.Sprintf("%s%d", iscsiPortalsKey, i)] - if pfound { - target.Portal = p - } - if !tfound || !pfound { - break - } - - if ReachableEndPoint(p) { - // if the portals from the context (set in ControllerPublishVolume) is not reachable from the nodes - targets = append(targets, target) - } - } - log.Infof("iSCSI iscsiTargets from context: %v", targets) - return targets - } - - func readNVMETCPTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { - var targets []gobrick.NVMeTargetInfo - nvmeTCPTargetsKey := common.PublishContextNVMETCPTargetsPrefix - nvmeTCPPortalsKey := common.PublishContextNVMETCPPortalsPrefix - if isRemote { - nvmeTCPTargetsKey = common.PublishContextRemoteNVMETCPTargetsPrefix - nvmeTCPPortalsKey = common.PublishContextRemoteNVMETCPPortalsPrefix - } - for i := 0; ; i++ { - target := gobrick.NVMeTargetInfo{} - t, tfound := pc[fmt.Sprintf("%s%d", nvmeTCPTargetsKey, i)] - if tfound { - target.Target = t - } - p, pfound := pc[fmt.Sprintf("%s%d", nvmeTCPPortalsKey, i)] - if pfound { - target.Portal = p - } - if !tfound || !pfound { - break - } - targets = append(targets, target) - } - log.Infof("NVMeTCP Targets from context: %v", targets) - return targets - } - - func readNVMEFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { - var targets []gobrick.NVMeTargetInfo - nvmeFcTargetsKey := common.PublishContextNVMEFCTargetsPrefix - nvmeFcPortalsKey := common.PublishContextNVMEFCPortalsPrefix - if isRemote { - nvmeFcTargetsKey = common.PublishContextRemoteNVMEFCTargetsPrefix - nvmeFcPortalsKey = common.PublishContextRemoteNVMEFCPortalsPrefix - } - for i := 0; ; i++ { - target := gobrick.NVMeTargetInfo{} - t, tfound := pc[fmt.Sprintf("%s%d", nvmeFcTargetsKey, i)] - if tfound { - target.Target = t - } - p, pfound := pc[fmt.Sprintf("%s%d", nvmeFcPortalsKey, i)] - if pfound { - target.Portal = p - } - if !tfound || !pfound { - break - } - targets = append(targets, target) - } - log.Infof("NVMeFC Targets from context: %v", targets) - return targets - } - - func readFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.FCTargetInfo { - var targets []gobrick.FCTargetInfo - fcWwpnKey := common.PublishContextFCWWPNPrefix - if isRemote { - fcWwpnKey = common.PublishContextRemoteFCWWPNPrefix - } - for i := 0; ; i++ { - wwpn, tfound := pc[fmt.Sprintf("%s%d", fcWwpnKey, i)] - if !tfound { - break - } - targets = append(targets, gobrick.FCTargetInfo{WWPN: wwpn}) - } - log.Infof("FC iscsiTargets from context: %v", targets) - return targets - } - - func (s *SCSIStager) connectDevice(ctx context.Context, data scsiPublishContextData) (string, error) { - logFields := common.GetLogFields(ctx) - var err error - lun, err := strconv.Atoi(data.volumeLUNAddress) - if err != nil { - log.WithFields(logFields).Errorf("failed to convert lun number to int: %s", err.Error()) - return "", status.Errorf(codes.Internal, - "failed to convert lun number to int: %s", err.Error()) - } - wwn := data.deviceWWN - var device gobrick.Device - if s.useNVME { - device, err = s.connectNVMEDevice(ctx, wwn, data, s.useFC) - } else if s.useFC { - device, err = s.connectFCDevice(ctx, lun, data) - } else { - device, err = s.connectISCSIDevice(ctx, lun, data) - } - - if err != nil { - log.WithFields(logFields).Errorf("Unable to find device after multiple discovery attempts: %s", err.Error()) - return "", status.Errorf(codes.Internal, - "unable to find device after multiple discovery attempts: %s", err.Error()) - } - devicePath := path.Join("/dev/", device.Name) - return devicePath, nil - } - - func (s *SCSIStager) connectISCSIDevice(ctx context.Context, - lun int, data scsiPublishContextData, - ) (gobrick.Device, error) { - logFields := common.GetLogFields(ctx) - var targets []gobrick.ISCSITargetInfo - for _, t := range data.iscsiTargets { - targets = append(targets, gobrick.ISCSITargetInfo{Target: t.Target, Portal: t.Portal}) - } - // separate context to prevent 15 seconds cancel from kubernetes - connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) - defer cFunc() - - connectorCtx = common.SetLogFields(connectorCtx, logFields) - return s.iscsiConnector.ConnectVolume(connectorCtx, gobrick.ISCSIVolumeInfo{ - Targets: targets, - Lun: lun, - }) - } - - func (s *SCSIStager) connectNVMEDevice(ctx context.Context, - wwn string, data scsiPublishContextData, useFC bool, - ) (gobrick.Device, error) { - logFields := common.GetLogFields(ctx) - var targets []gobrick.NVMeTargetInfo - - if useFC { - for _, t := range data.nvmefcTargets { - targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) - } - } else { - for _, t := range data.nvmetcpTargets { - targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) - } - } - // separate context to prevent 15 seconds cancel from kubernetes - connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) - defer cFunc() - - connectorCtx = common.SetLogFields(connectorCtx, logFields) - return s.nvmeConnector.ConnectVolume(connectorCtx, gobrick.NVMeVolumeInfo{ - Targets: targets, - WWN: wwn, - }, useFC) - } - - func (s *SCSIStager) connectFCDevice(ctx context.Context, - lun int, data scsiPublishContextData, - ) (gobrick.Device, error) { - logFields := common.GetLogFields(ctx) - var targets []gobrick.FCTargetInfo - - for _, t := range data.fcTargets { - targets = append(targets, gobrick.FCTargetInfo{WWPN: t.WWPN}) - } - // separate context to prevent 15 seconds cancel from kubernetes - connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) - defer cFunc() - - connectorCtx = common.SetLogFields(connectorCtx, logFields) - return s.fcConnector.ConnectVolume(connectorCtx, gobrick.FCVolumeInfo{ - Targets: targets, - Lun: lun, - }) - } - - func isReadyToPublish(ctx context.Context, stagingPath string, fs fs.Interface) (bool, bool, error) { - logFields := common.GetLogFields(ctx) - stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) - if err != nil { - return found, false, err - } - if !found { - log.WithFields(logFields).Warning("staged device not found") - return found, false, nil - } - - if strings.HasSuffix(stageInfo.Source, "deleted") { - log.WithFields(logFields).Warning("staged device linked with deleted path") - return found, false, nil - } - - devFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) - if err != nil { - return found, false, err - } - return found, devFS != "mpath_member", nil - } - - func isReadyToPublishNFS(ctx context.Context, stagingPath string, fs fs.Interface) (bool, error) { - logFields := common.GetLogFields(ctx) - stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) - if err != nil { - return found, err - } - if !found { - log.WithFields(logFields).Warning("staged device not found") - return found, nil - } - - if strings.HasSuffix(stageInfo.Source, "deleted") { - log.WithFields(logFields).Warning("staged device linked with deleted path") - return found, nil - } - - return found, nil - } - \ No newline at end of file +package node + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/dell/csi-powerstore/v2/pkg/array" + "github.com/dell/csi-powerstore/v2/pkg/common" + "github.com/dell/csi-powerstore/v2/pkg/common/fs" + "github.com/dell/gobrick" + "github.com/dell/gopowerstore" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + procMountsPath = "/proc/self/mountinfo" + procMountsRetries = 15 +) + +// VolumeStager allows to node stage a volume +type VolumeStager interface { + Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, logFields log.Fields, fs fs.Interface, id string, isRemote bool) (*csi.NodeStageVolumeResponse, error) +} + +// ReachableEndPoint checks if the endpoint is reachable or not +var ReachableEndPoint = common.ReachableEndPoint + +// SCSIStager implementation of NodeVolumeStager for SCSI based (FC, iSCSI) volumes +type SCSIStager struct { + useFC bool + useNVME bool + iscsiConnector ISCSIConnector + nvmeConnector NVMEConnector + fcConnector FcConnector +} + +// Stage stages volume by connecting it through either FC or iSCSI and creating bind mount to staging path +func (s *SCSIStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, + logFields log.Fields, fs fs.Interface, id string, isRemote bool, +) (*csi.NodeStageVolumeResponse, error) { + // append additional path to be able to do bind mounts + stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) + + publishContext, err := readSCSIInfoFromPublishContext(req.PublishContext, s.useFC, s.useNVME, isRemote) + if err != nil { + return nil, err + } + + logFields["ID"] = id + if s.useNVME { + if s.useFC { + logFields["Targets"] = publishContext.nvmefcTargets + } else { + logFields["Targets"] = publishContext.nvmetcpTargets + } + } else { + logFields["Targets"] = publishContext.iscsiTargets + } + logFields["WWN"] = publishContext.deviceWWN + logFields["Lun"] = publishContext.volumeLUNAddress + logFields["StagingPath"] = stagingPath + ctx = common.SetLogFields(ctx, logFields) + + found, ready, err := isReadyToPublish(ctx, stagingPath, fs) + if err != nil { + return nil, err + } + if ready { + log.WithFields(logFields).Info("device already staged") + return &csi.NodeStageVolumeResponse{}, nil + } else if found { + log.WithFields(logFields).Warning("volume found in staging path but it is not ready for publish," + + "try to unmount it and retry staging again") + _, err := unstageVolume(ctx, stagingPath, id, logFields, err, fs) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to unmount volume: %s", err.Error()) + } + } + + devicePath, err := s.connectDevice(ctx, publishContext) + if err != nil { + return nil, err + } + + logFields["DevicePath"] = devicePath + + log.WithFields(logFields).Info("start staging") + if _, err := fs.MkFileIdempotent(stagingPath); err != nil { + return nil, status.Errorf(codes.Internal, "can't create target file %s: %s", + stagingPath, err.Error()) + } + log.WithFields(logFields).Info("target path successfully created") + + mntFlags := common.GetMountFlags(req.GetVolumeCapability()) + if err := fs.GetUtil().BindMount(ctx, devicePath, stagingPath, mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error bind disk %s to target path: %s", devicePath, err.Error()) + } + + log.WithFields(logFields).Info("stage complete") + return &csi.NodeStageVolumeResponse{}, nil +} + +// NFSStager implementation of NodeVolumeStager for NFS volumes +type NFSStager struct { + array *array.PowerStoreArray +} + +// Stage stages volume by mounting volumes as nfs to the staging path +func (n *NFSStager) Stage(ctx context.Context, req *csi.NodeStageVolumeRequest, + logFields log.Fields, fs fs.Interface, id string, _ bool, +) (*csi.NodeStageVolumeResponse, error) { + // append additional path to be able to do bind mounts + stagingPath := getStagingPath(ctx, req.GetStagingTargetPath(), id) + + hostIP := req.PublishContext[common.KeyHostIP] + exportID := req.PublishContext[common.KeyExportID] + nfsExport := req.PublishContext[common.KeyNfsExportPath] + allowRoot := req.PublishContext[common.KeyAllowRoot] + nasName := req.PublishContext[common.KeyNasName] + + natIP := "" + if ip, ok := req.PublishContext[common.KeyNatIP]; ok { + natIP = ip + } + + logFields["NfsExportPath"] = nfsExport + logFields["StagingPath"] = req.GetStagingTargetPath() + logFields["ID"] = id + logFields["AllowRoot"] = allowRoot + logFields["ExportID"] = exportID + logFields["HostIP"] = hostIP + logFields["NatIP"] = natIP + logFields["NFSv4ACLs"] = req.PublishContext[common.KeyNfsACL] + logFields["NasName"] = nasName + ctx = common.SetLogFields(ctx, logFields) + + found, err := isReadyToPublishNFS(ctx, stagingPath, fs) + if err != nil { + return nil, err + } + + if found { + log.WithFields(logFields).Info("device already staged") + return &csi.NodeStageVolumeResponse{}, nil + } + + if err := fs.MkdirAll(stagingPath, 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create target folder %s: %s", stagingPath, err.Error()) + } + log.WithFields(logFields).Info("stage path successfully created") + + mntFlags := common.GetMountFlags(req.GetVolumeCapability()) + if err := fs.GetUtil().Mount(ctx, nfsExport, stagingPath, "", mntFlags...); err != nil { + return nil, status.Errorf(codes.Internal, + "error mount nfs share %s to target path: %s", nfsExport, err.Error()) + } + + // Create folder with 1777 in nfs share so every user can use it + if err := fs.MkdirAll(filepath.Join(stagingPath, commonNfsVolumeFolder), 0o750); err != nil { + return nil, status.Errorf(codes.Internal, + "can't create common folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) + } + + mode := os.ModePerm + acls := req.PublishContext[common.KeyNfsACL] + aclsConfigured := false + if acls != "" { + if posixMode(acls) { + perm, err := strconv.ParseUint(acls, 8, 32) + if err == nil { + mode = os.FileMode(perm) // #nosec: G115 false positive + } else { + log.WithFields(logFields).Warn("can't parse file mode, invalid mode specified. Default mode permissions will be set.") + } + } else { + aclsConfigured, err = validateAndSetACLs(ctx, &NFSv4ACLs{}, nasName, n.array.GetClient(), acls, filepath.Join(stagingPath, commonNfsVolumeFolder)) + if err != nil || !aclsConfigured { + return nil, err + } + } + } + + if !aclsConfigured { + if err := fs.Chmod(filepath.Join(stagingPath, commonNfsVolumeFolder), os.ModeSticky|mode); err != nil { + return nil, status.Errorf(codes.Internal, + "can't change permissions of folder %s: %s", filepath.Join(stagingPath, "volume"), err.Error()) + } + } + + if allowRoot == "false" { + log.WithFields(logFields).Info("removing allow root from nfs export") + var hostsToRemove []string + var hostsToAdd []string + + hostsToRemove = append(hostsToRemove, hostIP+"/255.255.255.255") + hostsToAdd = append(hostsToAdd, hostIP) + + if natIP != "" { + hostsToRemove = append(hostsToRemove, natIP) + hostsToAdd = append(hostsToAdd, natIP) + } + + // Modify NFS export to RW with `root_squashing` + _, err = n.array.GetClient().ModifyNFSExport(ctx, &gopowerstore.NFSExportModify{ + RemoveRWRootHosts: hostsToRemove, + AddRWHosts: hostsToAdd, + }, exportID) + if err != nil { + if apiError, ok := err.(gopowerstore.APIError); !(ok && apiError.NotFound()) { + return nil, status.Errorf(codes.Internal, "failure when modifying nfs export: %s", err.Error()) + } + } + } + + log.WithFields(logFields).Info("nfs share successfully mounted") + return &csi.NodeStageVolumeResponse{}, nil +} + +type scsiPublishContextData struct { + deviceWWN string + volumeLUNAddress string + iscsiTargets []gobrick.ISCSITargetInfo + nvmetcpTargets []gobrick.NVMeTargetInfo + nvmefcTargets []gobrick.NVMeTargetInfo + fcTargets []gobrick.FCTargetInfo +} + +func readSCSIInfoFromPublishContext(publishContext map[string]string, useFC bool, useNVMe bool, isRemote bool) (scsiPublishContextData, error) { + // Get publishContext + var data scsiPublishContextData + deviceWwnKey := common.PublishContextDeviceWWN + lunAddressKey := common.PublishContextLUNAddress + if isRemote { + deviceWwnKey = common.PublishContextRemoteDeviceWWN + lunAddressKey = common.PublishContextRemoteLUNAddress + } + + deviceWWN, ok := publishContext[deviceWwnKey] + if !ok { + return data, status.Error(codes.InvalidArgument, "deviceWWN must be in publish context") + } + volumeLUNAddress, ok := publishContext[lunAddressKey] + if !ok { + return data, status.Error(codes.InvalidArgument, "volumeLUNAddress must be in publish context") + } + + iscsiTargets := readISCSITargetsFromPublishContext(publishContext, isRemote) + if len(iscsiTargets) == 0 && !useFC && !useNVMe { + return data, status.Error(codes.InvalidArgument, "iscsiTargets data must be in publish context") + } + nvmeTCPTargets := readNVMETCPTargetsFromPublishContext(publishContext, isRemote) + if len(nvmeTCPTargets) == 0 && useNVMe && !useFC { + return data, status.Error(codes.InvalidArgument, "NVMeTCP Targets data must be in publish context") + } + nvmeFCTargets := readNVMEFCTargetsFromPublishContext(publishContext, isRemote) + if len(nvmeFCTargets) == 0 && useNVMe && useFC { + return data, status.Error(codes.InvalidArgument, "NVMeFC Targets data must be in publish context") + } + fcTargets := readFCTargetsFromPublishContext(publishContext, isRemote) + if len(fcTargets) == 0 && useFC && !useNVMe { + return data, status.Error(codes.InvalidArgument, "fcTargets data must be in publish context") + } + return scsiPublishContextData{ + deviceWWN: deviceWWN, volumeLUNAddress: volumeLUNAddress, + iscsiTargets: iscsiTargets, nvmetcpTargets: nvmeTCPTargets, nvmefcTargets: nvmeFCTargets, fcTargets: fcTargets, + }, nil +} + +func readISCSITargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.ISCSITargetInfo { + var targets []gobrick.ISCSITargetInfo + iscsiTargetsKey := common.PublishContextISCSITargetsPrefix + iscsiPortalsKey := common.PublishContextISCSIPortalsPrefix + if isRemote { + iscsiTargetsKey = common.PublishContextRemoteISCSITargetsPrefix + iscsiPortalsKey = common.PublishContextRemoteISCSIPortalsPrefix + } + for i := 0; ; i++ { + target := gobrick.ISCSITargetInfo{} + t, tfound := pc[fmt.Sprintf("%s%d", iscsiTargetsKey, i)] + if tfound { + target.Target = t + } + p, pfound := pc[fmt.Sprintf("%s%d", iscsiPortalsKey, i)] + if pfound { + target.Portal = p + } + if !tfound || !pfound { + break + } + + if ReachableEndPoint(p) { + // if the portals from the context (set in ControllerPublishVolume) is not reachable from the nodes + targets = append(targets, target) + } + } + log.Infof("iSCSI iscsiTargets from context: %v", targets) + return targets +} + +func readNVMETCPTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { + var targets []gobrick.NVMeTargetInfo + nvmeTCPTargetsKey := common.PublishContextNVMETCPTargetsPrefix + nvmeTCPPortalsKey := common.PublishContextNVMETCPPortalsPrefix + if isRemote { + nvmeTCPTargetsKey = common.PublishContextRemoteNVMETCPTargetsPrefix + nvmeTCPPortalsKey = common.PublishContextRemoteNVMETCPPortalsPrefix + } + for i := 0; ; i++ { + target := gobrick.NVMeTargetInfo{} + t, tfound := pc[fmt.Sprintf("%s%d", nvmeTCPTargetsKey, i)] + if tfound { + target.Target = t + } + p, pfound := pc[fmt.Sprintf("%s%d", nvmeTCPPortalsKey, i)] + if pfound { + target.Portal = p + } + if !tfound || !pfound { + break + } + targets = append(targets, target) + } + log.Infof("NVMeTCP Targets from context: %v", targets) + return targets +} + +func readNVMEFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.NVMeTargetInfo { + var targets []gobrick.NVMeTargetInfo + nvmeFcTargetsKey := common.PublishContextNVMEFCTargetsPrefix + nvmeFcPortalsKey := common.PublishContextNVMEFCPortalsPrefix + if isRemote { + nvmeFcTargetsKey = common.PublishContextRemoteNVMEFCTargetsPrefix + nvmeFcPortalsKey = common.PublishContextRemoteNVMEFCPortalsPrefix + } + for i := 0; ; i++ { + target := gobrick.NVMeTargetInfo{} + t, tfound := pc[fmt.Sprintf("%s%d", nvmeFcTargetsKey, i)] + if tfound { + target.Target = t + } + p, pfound := pc[fmt.Sprintf("%s%d", nvmeFcPortalsKey, i)] + if pfound { + target.Portal = p + } + if !tfound || !pfound { + break + } + targets = append(targets, target) + } + log.Infof("NVMeFC Targets from context: %v", targets) + return targets +} + +func readFCTargetsFromPublishContext(pc map[string]string, isRemote bool) []gobrick.FCTargetInfo { + var targets []gobrick.FCTargetInfo + fcWwpnKey := common.PublishContextFCWWPNPrefix + if isRemote { + fcWwpnKey = common.PublishContextRemoteFCWWPNPrefix + } + for i := 0; ; i++ { + wwpn, tfound := pc[fmt.Sprintf("%s%d", fcWwpnKey, i)] + if !tfound { + break + } + targets = append(targets, gobrick.FCTargetInfo{WWPN: wwpn}) + } + log.Infof("FC iscsiTargets from context: %v", targets) + return targets +} + +func (s *SCSIStager) connectDevice(ctx context.Context, data scsiPublishContextData) (string, error) { + logFields := common.GetLogFields(ctx) + var err error + lun, err := strconv.Atoi(data.volumeLUNAddress) + if err != nil { + log.WithFields(logFields).Errorf("failed to convert lun number to int: %s", err.Error()) + return "", status.Errorf(codes.Internal, + "failed to convert lun number to int: %s", err.Error()) + } + wwn := data.deviceWWN + var device gobrick.Device + if s.useNVME { + device, err = s.connectNVMEDevice(ctx, wwn, data, s.useFC) + } else if s.useFC { + device, err = s.connectFCDevice(ctx, lun, data) + } else { + device, err = s.connectISCSIDevice(ctx, lun, data) + } + + if err != nil { + log.WithFields(logFields).Errorf("Unable to find device after multiple discovery attempts: %s", err.Error()) + return "", status.Errorf(codes.Internal, + "unable to find device after multiple discovery attempts: %s", err.Error()) + } + devicePath := path.Join("/dev/", device.Name) + return devicePath, nil +} + +func (s *SCSIStager) connectISCSIDevice(ctx context.Context, + lun int, data scsiPublishContextData, +) (gobrick.Device, error) { + logFields := common.GetLogFields(ctx) + var targets []gobrick.ISCSITargetInfo + for _, t := range data.iscsiTargets { + targets = append(targets, gobrick.ISCSITargetInfo{Target: t.Target, Portal: t.Portal}) + } + // separate context to prevent 15 seconds cancel from kubernetes + connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) + defer cFunc() + + connectorCtx = common.SetLogFields(connectorCtx, logFields) + return s.iscsiConnector.ConnectVolume(connectorCtx, gobrick.ISCSIVolumeInfo{ + Targets: targets, + Lun: lun, + }) +} + +func (s *SCSIStager) connectNVMEDevice(ctx context.Context, + wwn string, data scsiPublishContextData, useFC bool, +) (gobrick.Device, error) { + logFields := common.GetLogFields(ctx) + var targets []gobrick.NVMeTargetInfo + + if useFC { + for _, t := range data.nvmefcTargets { + targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) + } + } else { + for _, t := range data.nvmetcpTargets { + targets = append(targets, gobrick.NVMeTargetInfo{Target: t.Target, Portal: t.Portal}) + } + } + // separate context to prevent 15 seconds cancel from kubernetes + connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) + defer cFunc() + + connectorCtx = common.SetLogFields(connectorCtx, logFields) + return s.nvmeConnector.ConnectVolume(connectorCtx, gobrick.NVMeVolumeInfo{ + Targets: targets, + WWN: wwn, + }, useFC) +} + +func (s *SCSIStager) connectFCDevice(ctx context.Context, + lun int, data scsiPublishContextData, +) (gobrick.Device, error) { + logFields := common.GetLogFields(ctx) + var targets []gobrick.FCTargetInfo + + for _, t := range data.fcTargets { + targets = append(targets, gobrick.FCTargetInfo{WWPN: t.WWPN}) + } + // separate context to prevent 15 seconds cancel from kubernetes + connectorCtx, cFunc := context.WithTimeout(context.Background(), time.Second*120) + defer cFunc() + + connectorCtx = common.SetLogFields(connectorCtx, logFields) + return s.fcConnector.ConnectVolume(connectorCtx, gobrick.FCVolumeInfo{ + Targets: targets, + Lun: lun, + }) +} + +func isReadyToPublish(ctx context.Context, stagingPath string, fs fs.Interface) (bool, bool, error) { + logFields := common.GetLogFields(ctx) + stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) + if err != nil { + return found, false, err + } + if !found { + log.WithFields(logFields).Warning("staged device not found") + return found, false, nil + } + + if strings.HasSuffix(stageInfo.Source, "deleted") { + log.WithFields(logFields).Warning("staged device linked with deleted path") + return found, false, nil + } + + devFS, err := fs.GetUtil().GetDiskFormat(ctx, stagingPath) + if err != nil { + return found, false, err + } + return found, devFS != "mpath_member", nil +} + +func isReadyToPublishNFS(ctx context.Context, stagingPath string, fs fs.Interface) (bool, error) { + logFields := common.GetLogFields(ctx) + stageInfo, found, err := getTargetMount(ctx, stagingPath, fs) + if err != nil { + return found, err + } + if !found { + log.WithFields(logFields).Warning("staged device not found") + return found, nil + } + + if strings.HasSuffix(stageInfo.Source, "deleted") { + log.WithFields(logFields).Warning("staged device linked with deleted path") + return found, nil + } + + return found, nil +} From 61f299054b86696be9b6ccde7286d23f2a286da4 Mon Sep 17 00:00:00 2001 From: Akshay Saini <109056238+AkshaySainiDell@users.noreply.github.com> Date: Tue, 12 Nov 2024 04:02:20 -0600 Subject: [PATCH 3/8] Fix golangci-lint warnings --- pkg/common/common.go | 12 ++++++------ pkg/node/node.go | 4 ++-- pkg/node/publisher.go | 22 +++++++++++----------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/pkg/common/common.go b/pkg/common/common.go index 7037bac8..4842cc03 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -297,8 +297,8 @@ func SetLogFields(ctx context.Context, fields log.Fields) context.Context { // RandomString returns a random string of specified length. // String is generated by using crypto/rand. -func RandomString(len int) string { - b := make([]byte, len) +func RandomString(length int) string { + b := make([]byte, length) _, err := rand.Read(b) if err != nil { log.Errorf("Can't generate random string; error = %v", err) @@ -526,10 +526,10 @@ func ReachableEndPoint(endpoint string) bool { return true } -func GetMountFlags(cap *csi.VolumeCapability) []string { - if cap != nil { - if mountCap := cap.GetMount(); mountCap != nil { - return mountCap.GetMountFlags() +func GetMountFlags(vc *csi.VolumeCapability) []string { + if vc != nil { + if mount := vc.GetMount(); mount != nil { + return mount.GetMountFlags() } } return nil diff --git a/pkg/node/node.go b/pkg/node/node.go index d7d0d46e..de8667c4 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -1074,11 +1074,11 @@ func (s *Service) nodeExpandRawBlockVolume(ctx context.Context, volumeWWN string // NodeGetCapabilities returns supported features by the node service func (s *Service) NodeGetCapabilities(_ context.Context, _ *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { - newCap := func(cap csi.NodeServiceCapability_RPC_Type) *csi.NodeServiceCapability { + newCap := func(capability csi.NodeServiceCapability_RPC_Type) *csi.NodeServiceCapability { return &csi.NodeServiceCapability{ Type: &csi.NodeServiceCapability_Rpc{ Rpc: &csi.NodeServiceCapability_RPC{ - Type: cap, + Type: capability, }, }, } diff --git a/pkg/node/publisher.go b/pkg/node/publisher.go index 805806bc..8ee2e083 100644 --- a/pkg/node/publisher.go +++ b/pkg/node/publisher.go @@ -32,7 +32,7 @@ import ( // VolumePublisher allows to node publish a volume type VolumePublisher interface { Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, - cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) + vc *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) } // SCSIPublisher implementation of NodeVolumePublisher for SCSI based (FC, iSCSI) volumes @@ -41,7 +41,7 @@ type SCSIPublisher struct { } // Publish publishes volume as either raw block or mount by mounting it to the target path -func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { +func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, vc *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) if err != nil { return nil, err @@ -52,9 +52,9 @@ func (sp *SCSIPublisher) Publish(ctx context.Context, logFields log.Fields, fs f } if sp.isBlock { - return sp.publishBlock(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) + return sp.publishBlock(ctx, logFields, fs, vc, isRO, targetPath, stagingPath) } - return sp.publishMount(ctx, logFields, fs, cap, isRO, targetPath, stagingPath) + return sp.publishMount(ctx, logFields, fs, vc, isRO, targetPath, stagingPath) } func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, fs fs.Interface, _ *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { @@ -79,21 +79,21 @@ func (sp *SCSIPublisher) publishBlock(ctx context.Context, logFields log.Fields, return &csi.NodePublishVolumeResponse{}, nil } -func (sp *SCSIPublisher) publishMount(ctx context.Context, logFields log.Fields, fs fs.Interface, cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { - if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { +func (sp *SCSIPublisher) publishMount(ctx context.Context, logFields log.Fields, fs fs.Interface, vc *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string) (*csi.NodePublishVolumeResponse, error) { + if vc.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER { // MULTI_WRITER not supported for mount volumes return nil, status.Error(codes.Unimplemented, "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER") } - if cap.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { + if vc.GetAccessMode().GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { // Warning in case of MULTI_NODE_READER_ONLY for mount volumes log.Warningf("Mount volume with the AccessMode ReadOnlyMany") } var opts []string - mountCap := cap.GetMount() + mountCap := vc.GetMount() mountFsType := mountCap.GetFsType() - mntFlags := common.GetMountFlags(cap) + mntFlags := common.GetMountFlags(vc) if mountFsType == "xfs" { mntFlags = append(mntFlags, "nouuid") } @@ -151,7 +151,7 @@ type NFSPublisher struct{} // Publish publishes nfs volume by mounting it to the target path func (np *NFSPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs.Interface, - cap *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string, + vc *csi.VolumeCapability, isRO bool, targetPath string, stagingPath string, ) (*csi.NodePublishVolumeResponse, error) { published, err := isAlreadyPublished(ctx, targetPath, getRWModeString(isRO), fs) if err != nil { @@ -168,7 +168,7 @@ func (np *NFSPublisher) Publish(ctx context.Context, logFields log.Fields, fs fs } log.WithFields(logFields).Info("target path successfully created") - mntFlags := common.GetMountFlags(cap) + mntFlags := common.GetMountFlags(vc) if isRO { mntFlags = append(mntFlags, "ro") From 0c1c8c38754b3dbfba6696c6975071fded12f68f Mon Sep 17 00:00:00 2001 From: Akshay Saini <109056238+AkshaySainiDell@users.noreply.github.com> Date: Tue, 12 Nov 2024 04:07:35 -0600 Subject: [PATCH 4/8] Fix golangci-lint warnings --- pkg/array/array.go | 6 +++--- pkg/common/fs/fs.go | 4 ++-- pkg/controller/controller.go | 4 ++-- pkg/node/base.go | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/array/array.go b/pkg/array/array.go index faae192a..3bb20c41 100644 --- a/pkg/array/array.go +++ b/pkg/array/array.go @@ -278,7 +278,7 @@ func GetPowerStoreArrays(fs fs.Interface, filePath string) (map[string]*PowerSto // It will do that by querying default powerstore array passed as one of the arguments func ParseVolumeID(ctx context.Context, volumeHandle string, defaultArray *PowerStoreArray, /*legacy support*/ - cap *csi.VolumeCapability, /*legacy support*/ + vc *csi.VolumeCapability, /*legacy support*/ ) (localVolumeID, arrayID, protocol, remoteVolumeID, remoteArrayID string, e error) { if volumeHandle == "" { return "", "", "", "", "", status.Errorf(codes.FailedPrecondition, @@ -305,8 +305,8 @@ func ParseVolumeID(ctx context.Context, volumeHandle string, arrayID = defaultArray.GetGlobalID() // If we have volume capability in request we can check FsType - if cap != nil && cap.GetMount() != nil { - if cap.GetMount().GetFsType() == "nfs" { + if vc != nil && vc.GetMount() != nil { + if vc.GetMount().GetFsType() == "nfs" { protocol = "nfs" } else { protocol = "scsi" diff --git a/pkg/common/fs/fs.go b/pkg/common/fs/fs.go index 9119702f..876f4a7a 100644 --- a/pkg/common/fs/fs.go +++ b/pkg/common/fs/fs.go @@ -117,8 +117,8 @@ func (fs *Fs) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error } // WriteString is a wrapper of file.WriteString -func (fs *Fs) WriteString(file *os.File, string string) (int, error) { - return file.WriteString(string) // #nosec G304 +func (fs *Fs) WriteString(file *os.File, str string) (int, error) { + return file.WriteString(str) // #nosec G304 } // Create is a wrapper of os.Create diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index babf482a..0b0942dd 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -1104,11 +1104,11 @@ func cacheMaximumVolumeSize(key string, value int64) { // ControllerGetCapabilities returns list of capabilities that are supported by the driver. func (s *Service) ControllerGetCapabilities(_ context.Context, _ *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { - newCap := func(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability { + newCap := func(capability csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability { return &csi.ControllerServiceCapability{ Type: &csi.ControllerServiceCapability_Rpc{ Rpc: &csi.ControllerServiceCapability_RPC{ - Type: cap, + Type: capability, }, }, } diff --git a/pkg/node/base.go b/pkg/node/base.go index df2c4ac4..9998c8dc 100644 --- a/pkg/node/base.go +++ b/pkg/node/base.go @@ -312,8 +312,8 @@ func deleteMapping(volID, tmpDir string, fs fs.Interface) error { return err } -func isBlock(cap *csi.VolumeCapability) bool { - _, isBlock := cap.GetAccessType().(*csi.VolumeCapability_Block) +func isBlock(vc *csi.VolumeCapability) bool { + _, isBlock := vc.GetAccessType().(*csi.VolumeCapability_Block) return isBlock } From 5d680b069a7a8bbee54f1026f1d9a2b3c24767d9 Mon Sep 17 00:00:00 2001 From: Akshay Saini <109056238+AkshaySainiDell@users.noreply.github.com> Date: Tue, 12 Nov 2024 04:13:05 -0600 Subject: [PATCH 5/8] Update common github actions --- .github/workflows/actions.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 2812ea58..6c8bef4e 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -6,13 +6,13 @@ on: branches: [ main ] jobs: code-check: - name: Check Go formatting, linting, vetting + name: Check Go formatting, vetting runs-on: ubuntu-latest steps: - name: Checkout the code uses: actions/checkout@v4 - name: Run the formatter, linter, and vetter - uses: dell/common-github-actions/go-code-formatter-linter-vetter@main + uses: dell/common-github-actions/go-code-formatter-vetter@main with: directories: ./... test: From 24f42fd649088cd6beaae95513fb59280a687562 Mon Sep 17 00:00:00 2001 From: Akshay Saini <109056238+AkshaySainiDell@users.noreply.github.com> Date: Tue, 12 Nov 2024 04:34:12 -0600 Subject: [PATCH 6/8] Add UT --- pkg/common/common_test.go | 48 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/pkg/common/common_test.go b/pkg/common/common_test.go index 396f8403..df62ea79 100644 --- a/pkg/common/common_test.go +++ b/pkg/common/common_test.go @@ -439,3 +439,51 @@ func TestReachableEndPoint(t *testing.T) { }) } } + +func TestGetMountFlags(t *testing.T) { + tests := []struct { + name string + vc *csi.VolumeCapability + expected []string + }{ + { + name: "Nil VolumeCapability", + vc: nil, + expected: nil, + }, + { + name: "Nil Mount", + vc: &csi.VolumeCapability{}, + expected: nil, + }, + { + name: "With Mount Flags", + vc: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{ + MountFlags: []string{"ro", "noexec"}, + }, + }, + }, + expected: []string{"ro", "noexec"}, + }, + { + name: "Empty Mount Flags", + vc: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{ + MountFlags: []string{}, + }, + }, + }, + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetMountFlags(tt.vc) + assert.Equal(t, tt.expected, result) + }) + } +} From b9a26661f231e7e69928266b25808f07f0f76b7d Mon Sep 17 00:00:00 2001 From: Akshay Saini <109056238+AkshaySainiDell@users.noreply.github.com> Date: Tue, 12 Nov 2024 04:36:52 -0600 Subject: [PATCH 7/8] Fix UT and github action --- .github/workflows/actions.yml | 2 +- pkg/common/common_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 6c8bef4e..e9944263 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -6,7 +6,7 @@ on: branches: [ main ] jobs: code-check: - name: Check Go formatting, vetting + name: Check Go formatting, linting, vetting runs-on: ubuntu-latest steps: - name: Checkout the code diff --git a/pkg/common/common_test.go b/pkg/common/common_test.go index df62ea79..6a69d47f 100644 --- a/pkg/common/common_test.go +++ b/pkg/common/common_test.go @@ -482,7 +482,7 @@ func TestGetMountFlags(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := GetMountFlags(tt.vc) + result := common.GetMountFlags(tt.vc) assert.Equal(t, tt.expected, result) }) } From af36c04343314a5ffd9d4a8a34b3731b32e55857 Mon Sep 17 00:00:00 2001 From: Akshay Saini Date: Tue, 12 Nov 2024 16:17:52 +0530 Subject: [PATCH 8/8] Fix formatting --- pkg/common/common_test.go | 90 +++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/pkg/common/common_test.go b/pkg/common/common_test.go index 6a69d47f..0f1c8ddd 100644 --- a/pkg/common/common_test.go +++ b/pkg/common/common_test.go @@ -441,49 +441,49 @@ func TestReachableEndPoint(t *testing.T) { } func TestGetMountFlags(t *testing.T) { - tests := []struct { - name string - vc *csi.VolumeCapability - expected []string - }{ - { - name: "Nil VolumeCapability", - vc: nil, - expected: nil, - }, - { - name: "Nil Mount", - vc: &csi.VolumeCapability{}, - expected: nil, - }, - { - name: "With Mount Flags", - vc: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{ - MountFlags: []string{"ro", "noexec"}, - }, - }, - }, - expected: []string{"ro", "noexec"}, - }, - { - name: "Empty Mount Flags", - vc: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{ - MountFlags: []string{}, - }, - }, - }, - expected: []string{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := common.GetMountFlags(tt.vc) - assert.Equal(t, tt.expected, result) - }) - } + tests := []struct { + name string + vc *csi.VolumeCapability + expected []string + }{ + { + name: "Nil VolumeCapability", + vc: nil, + expected: nil, + }, + { + name: "Nil Mount", + vc: &csi.VolumeCapability{}, + expected: nil, + }, + { + name: "With Mount Flags", + vc: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{ + MountFlags: []string{"ro", "noexec"}, + }, + }, + }, + expected: []string{"ro", "noexec"}, + }, + { + name: "Empty Mount Flags", + vc: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{ + MountFlags: []string{}, + }, + }, + }, + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := common.GetMountFlags(tt.vc) + assert.Equal(t, tt.expected, result) + }) + } }