Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[issue-1122] removing done TODOs #1127

Merged
merged 7 commits into from
Mar 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile.validation
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ test-sanity-ci: hack-for-kind
test-sanity-ci-k8s-122: hack-for-kind
cd tests && ${GO_ENV_VARS} CI=true CSI_VERSION=${CSI_VERSION} OPERATOR_VERSION=${OPERATOR_VERSION} go test ${LDFLAGS} -v e2e/baremetal_e2e_test.go -timeout 0 -ginkgo.focus="$(strip $(subst ',, ${SANITY_TEST}))" -ginkgo.v -ginkgo.progress -ginkgo.junit-report=reports/report.xml -ginkgo.skip="block" -all-tests -kubeconfig=${HOME}/.kube/config -chartsDir ${CHARTS_DIR} > log.txt

# Run commnity sanity tests for CSI. TODO https://github.com/dell/csi-baremetal/issues/298 Make sanity test work for expansion
# Run commnity sanity tests for CSI.
# TODO enable tests back - https://github.com/dell/csi-baremetal/issues/371
test-sanity:
# ${GO_ENV_VARS} SANITY=true go test test/sanity/sanity_test.go -ginkgo.skip \
Expand Down
3 changes: 1 addition & 2 deletions api/v1/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ const (
HealthSuspect = "SUSPECT"
HealthBad = "BAD"

// TODO need to split constants by different packages
// Drive status
DriveStatusOnline = "ONLINE"
DriveStatusOffline = "OFFLINE"
Expand Down Expand Up @@ -94,7 +93,7 @@ const (
//Volume expansion annotations
VolumePreviousStatus = "expansion/previous-status"
VolumePreviousCapacity = "expansion/previous-capacity"
// TODO Mount status?

// Volume mode
ModeRAW = "RAW"
ModeRAWPART = "RAW_PART"
Expand Down
2 changes: 1 addition & 1 deletion cmd/scheduling/extender/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ var (
isPatchingEnabled = flag.Bool("isPatchingEnabled", false, "should enable readiness probe")
)

// TODO should be passed as parameters https://github.com/dell/csi-baremetal/issues/78
// Registering stages
const (
FilterPattern string = "/filter"
PrioritizePattern string = "/prioritize"
Expand Down
3 changes: 0 additions & 3 deletions pkg/base/k8s/cr_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ func (cs *CRHelperImpl) DeleteACsByNodeID(nodeID string) error {
isError := false
for _, ac := range acList.Items {
if strings.EqualFold(ac.Spec.NodeId, nodeID) {
// todo fix linter issue - https://github.com/kyoh86/scopelint/issues/5
// nolint:scopelint
if err := cs.k8sClient.DeleteCR(context.Background(), &ac); err != nil {
ll.Warningf("Unable to delete AC %s: %s", ac.Name, err)
Expand Down Expand Up @@ -215,7 +214,6 @@ func (cs *CRHelperImpl) UpdateVolumesOpStatusOnNode(nodeID, opStatus string) err
if volume.Spec.OperationalStatus != opStatus {
volume.Spec.OperationalStatus = opStatus
ctxWithID := context.WithValue(context.Background(), base.RequestUUID, volume.Spec.Id)
// todo fix linter issue - https://github.com/kyoh86/scopelint/issues/5
// nolint:scopelint
if err := cs.k8sClient.UpdateCR(ctxWithID, &volume); err != nil {
ll.Errorf("Unable to update operational status for volume ID %s: %s", volume.Spec.Id, err)
Expand Down Expand Up @@ -293,7 +291,6 @@ func (cs *CRHelperImpl) UpdateDrivesStatusOnNode(nodeID, status string) error {
for _, drive := range drives {
if drive.Spec.Status != status {
drive.Spec.Status = status
// todo fix linter issue - https://github.com/kyoh86/scopelint/issues/5
// nolint:scopelint
if err := cs.k8sClient.UpdateCR(context.Background(), &drive); err != nil {
ll.Errorf("Unable to update status for drive ID %s: %s", drive.Spec.UUID, err)
Expand Down
1 change: 0 additions & 1 deletion pkg/base/util/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ func ConsistentRead(filename string, retry int, timeout time.Duration) ([]byte,
// Receives string name of StorageClass
// Returns string of CSI StorageClass
func ConvertStorageClass(strSC string) string {
// TODO: use map or something else for such conversion https://github.com/dell/csi-baremetal/issues/84
sc := strings.ToUpper(strSC)
switch sc {
case api.StorageClassHDD,
Expand Down
1 change: 0 additions & 1 deletion pkg/controller/node/statemonitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,6 @@ func (n *ServicesStateMonitor) UpdateNodeHealthCache() {
state *serviceState
isExist bool
)
// todo when node is removed from cluster?
if state, isExist = n.nodeHealthMap[nodeID]; !isExist {
state = &serviceState{status: Unknown, time: currentTime}
// add pod to the map - no need to print warning message here since this is cache initialization
Expand Down
1 change: 0 additions & 1 deletion pkg/crcontrollers/reservation/reservationcontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ func (c *Controller) handleReservationUpdate(ctx context.Context, log *logrus.En
volumes[i] = &v1api.Volume{Id: capacity.Name, Size: capacity.Size, StorageClass: capacity.StorageClass, StorageGroup: capacity.StorageGroup}
}

// TODO: do not read all ACs and ACRs for each request: https://github.com/dell/csi-baremetal/issues/89
acReader := capacityplanner.NewACReader(c.client, log, true)
acrReader := capacityplanner.NewACRReader(c.client, log, true)
capManager := c.capacityManagerBuilder.GetCapacityManager(log, acReader, acrReader)
Expand Down
8 changes: 0 additions & 8 deletions pkg/node/volumemgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,6 @@ func (m *VolumeManager) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R
// Failed drive shouldn't be cleaned up - to avoid data loss
case apiV1.Removed, apiV1.Failed:
// we need to update annotation on related drive CRD
// todo can we do polling instead?
ll.Infof("Volume %s is removed. Updating related", volume.Name)
// drive must be present in the system
drive, _ := m.crHelper.GetDriveCRByVolume(volume)
Expand Down Expand Up @@ -372,7 +371,6 @@ func (m *VolumeManager) updateVolumeAndDriveUsageStatus(ctx context.Context, vol
ll.Errorf("Unable to read drive CR, error: %v", err)
return ctrl.Result{Requeue: true}, err
}
// TODO add annotations for additional statuses?
if volumeStatus == apiV1.VolumeUsageReleased {
m.addVolumeStatusAnnotation(drive, volume.Name, apiV1.VolumeUsageReleased)
}
Expand Down Expand Up @@ -680,7 +678,6 @@ func (m *VolumeManager) updateDrivesCRs(ctx context.Context, drivesFromMgr []*ap
toCreateSpec := *drivePtr
toCreateSpec.NodeId = m.nodeID
toCreateSpec.UUID = uuid.New().String()
// TODO: what operational status should be if drivemgr reported drive with not a good health
toCreateSpec.Usage = apiV1.DriveUsageInUse
toCreateSpec.IsClean = true
isSystem, err := m.isDriveSystem(drivePtr.Path)
Expand Down Expand Up @@ -719,7 +716,6 @@ func (m *VolumeManager) updateDrivesCRs(ctx context.Context, drivesFromMgr []*ap
ll.Warnf("Set status %s for drive %v", apiV1.DriveStatusOffline, d.Spec)
previousState := d.DeepCopy()
toUpdate := d
// TODO: which operational status should be in case when there is drive CR that doesn't have corresponding drive from drivemgr response
toUpdate.Spec.Status = apiV1.DriveStatusOffline
if value, ok := d.GetAnnotations()[driveHealthOverrideAnnotation]; ok {
m.overrideDriveHealth(&toUpdate.Spec, value, d.Name)
Expand Down Expand Up @@ -1000,7 +996,6 @@ func (m *VolumeManager) handleDriveStatusChange(ctx context.Context, drive updat
prevHealthState := vol.Spec.Health
vol.Spec.Health = cur.Health
// initiate volume release
// TODO need to check for specific annotation instead
if vol.Spec.Health == apiV1.HealthBad || vol.Spec.Health == apiV1.HealthSuspect {
if vol.Spec.Usage == apiV1.VolumeUsageInUse {
vol.Spec.Usage = apiV1.VolumeUsageReleasing
Expand All @@ -1017,9 +1012,6 @@ func (m *VolumeManager) handleDriveStatusChange(ctx context.Context, drive updat
prevHealthState, vol.Spec.Health, cur.Health, cur.NodeId)
}
}
// Handle resources with LogicalVolumeGroup
// This is not work for the current moment because HAL doesn't monitor disks with LVM
// TODO: Handle disk health which are used by LVGs - https://github.com/dell/csi-baremetal/issues/88
}

func (m *VolumeManager) checkVGErrors(lvg *lvgcrd.LogicalVolumeGroup, drivePath string) {
Expand Down
1 change: 0 additions & 1 deletion pkg/node/volumemgr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ var (
Children: nil,
}

// todo don't hardcode device name
lsblkSingleDeviceCmd = fmt.Sprintf(lsblk.CmdTmpl, "/dev/sda")

testDriveCR = drivecrd.Drive{
Expand Down
1 change: 0 additions & 1 deletion pkg/scheduler/extender/extender.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,6 @@ func (e *Extender) PrioritizeHandler(w http.ResponseWriter, req *http.Request) {
}

// BindHandler does bind of a pod to specific node
// todo - not implemented. Was used for testing purposes ONLY (fault injection)!
func (e *Extender) BindHandler(w http.ResponseWriter, req *http.Request) {
sessionUUID := uuid.New().String()
ll := e.logger.WithFields(logrus.Fields{
Expand Down
1 change: 0 additions & 1 deletion pkg/scheduler/extender/extender_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ import (
"github.com/dell/csi-baremetal/pkg/metrics/common"
)

// todo review all tests. some might not be relevant
var (
testLogger = logrus.New()
testUUID = uuid.New().String()
Expand Down
Loading