From a0359c0a109a7d9d3aafa35a7682d1e43500cdfa Mon Sep 17 00:00:00 2001 From: yati1998 Date: Fri, 2 Feb 2024 16:47:00 +0530 Subject: [PATCH] csi: allow to delete subvolume with retain-snapshot feature This commit includes the retain-snapshot flag to delete the subvolume. Signed-off-by: yati1998 --- .github/workflows/go-test.yaml | 8 ++- cmd/commands/subvolume.go | 20 ++++--- docs/subvolume.md | 27 +++------ pkg/filesystem/subvolume.go | 100 +++++++++++++++++++-------------- 4 files changed, 84 insertions(+), 71 deletions(-) diff --git a/.github/workflows/go-test.yaml b/.github/workflows/go-test.yaml index 928e5084..7c529d84 100644 --- a/.github/workflows/go-test.yaml +++ b/.github/workflows/go-test.yaml @@ -79,9 +79,11 @@ jobs: run: | set -ex kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a + kubectl rook-ceph ceph fs subvolume create myfs test-subvol-1 group-a kubectl rook-ceph subvolume ls kubectl rook-ceph subvolume ls --stale - kubectl rook-ceph subvolume delete test-subvol myfs group-a + kubectl rook-ceph subvolume delete myfs test-subvol group-a + kubectl rook-ceph subvolume delete myfs test-subvol-1 - name: Get mon endpoints run: | @@ -234,10 +236,12 @@ jobs: - name: Subvolume command run: | set -ex + kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol-1 group-a kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale - kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete test-subvol myfs group-a + kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a + kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol-1 - name: Get mon endpoints run: | diff --git a/cmd/commands/subvolume.go b/cmd/commands/subvolume.go index e19a44d3..d189b377 100644 --- a/cmd/commands/subvolume.go +++ b/cmd/commands/subvolume.go @@ -40,17 +40,19 @@ var listCmd = &cobra.Command{ } var deleteCmd = &cobra.Command{ - Use: "delete", - Short: "Deletes a stale subvolume.", - DisableFlagParsing: true, - Args: cobra.ExactArgs(3), - Example: "kubectl rook-ceph delete ", + Use: "delete", + Short: "Deletes a stale subvolume.", + Args: cobra.RangeArgs(2, 3), + Example: "kubectl rook-ceph delete [subvolumegroup]", Run: func(cmd *cobra.Command, args []string) { ctx := cmd.Context() - subList := args[0] - fs := args[1] - svg := args[2] - subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, subList, fs, svg) + fs := args[0] + subvol := args[1] + svg := "csi" + if len(args) > 2 { + svg = args[2] + } + subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, fs, subvol, svg) }, } diff --git a/docs/subvolume.md b/docs/subvolume.md index bf771927..02dc8ed6 100644 --- a/docs/subvolume.md +++ b/docs/subvolume.md @@ -9,12 +9,11 @@ and delete them without impacting other resources and attached volumes. The subvolume command will require the following sub commands: * `ls` : [ls](#ls) lists all the subvolumes * `--stale`: lists only stale subvolumes -* `delete `: - [delete](#delete) stale subvolumes as per user's input. - It will list and delete only the stale subvolumes to prevent any loss of data. - * subvolumes: comma-separated list of subvolumes of same filesystem and subvolumegroup. - * filesystem: filesystem name to which the subvolumes belong. - * subvolumegroup: subvolumegroup name to which the subvolumes belong. +* `delete [subvolumegroup]`: + [delete](#delete) a stale subvolume. + * subvolume: subvolume name. + * filesystem: filesystem name to which the subvolume belongs. + * subvolumegroup: subvolumegroup name to which the subvolume belong(default is "csi") ## ls ```bash @@ -23,8 +22,8 @@ kubectl rook-ceph subvolume ls # Filesystem Subvolume SubvolumeGroup State # ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi in-use # ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi in-use -# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110006 csi in-use # ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi stale +# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi stale-with-snapshot ``` @@ -33,23 +32,15 @@ kubectl rook-ceph subvolume ls --stale # Filesystem Subvolume SubvolumeGroup state # ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi stale -# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi stale +# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi stale-with-snapshot ``` ## delete ```bash -kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004 ocs-storagecluster csi +kubectl rook-ceph subvolume delete ocs-storagecluster csi-vol-427774b4-340b-11ed-8d66-0242ac110004 -# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted - -``` - -```bash -kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004,csi-vol-427774b4-340b-11ed-8d66-0242ac110005 ocs-storagecluster csi - -# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted -# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted +# Info: subvolume "csi-vol-427774b4-340b-11ed-8d66-0242ac110004" deleted ``` \ No newline at end of file diff --git a/pkg/filesystem/subvolume.go b/pkg/filesystem/subvolume.go index dcbe4197..43421fa5 100644 --- a/pkg/filesystem/subvolume.go +++ b/pkg/filesystem/subvolume.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "fmt" - "strings" "github.com/rook/kubectl-rook-ceph/pkg/exec" "github.com/rook/kubectl-rook-ceph/pkg/k8sutil" @@ -34,10 +33,17 @@ type fsStruct struct { } type subVolumeInfo struct { - svg string - fs string + svg string + fs string + state string } +const ( + inUse = "in-use" + stale = "stale" + staleWithSnapshot = "stale-with-snapshot" +) + func List(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string, includeStaleOnly bool) { subvolumeNames := getK8sRefSubvolume(ctx, clientsets) @@ -52,7 +58,7 @@ func getK8sRefSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets) map } subvolumeNames := make(map[string]subVolumeInfo) for _, pv := range pvList.Items { - if pv.Spec.CSI != nil && pv.Spec.CSI.VolumeAttributes["subvolumeName"] != "" { + if pv.Spec.CSI != nil { subvolumeNames[pv.Spec.CSI.VolumeAttributes["subvolumeName"]] = subVolumeInfo{} } } @@ -90,28 +96,61 @@ func listCephFSSubvolumes(ctx context.Context, clientsets *k8sutil.Clientsets, o } // append the subvolume which doesn't have any snapshot attached to it. for _, sv := range subvol { + state := getSubvolumeState(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) + // Assume the volume is stale unless proven otherwise - stale := true + stalevol := true // lookup for subvolume in list of the PV references _, ok := subvolumeNames[sv.Name] - if ok || checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) { - // The volume is not stale if a PV was found, or it has a snapshot - stale = false + if ok { + // The volume is not stale if a PV was found + stalevol = false } - status := "stale" - if !stale { + status := stale + if !stalevol { if includeStaleOnly { continue } - status = "in-use" + status = inUse + } else { + // check the state of the stale subvolume + // if it is snapshot-retained then skip listing it. + if state == "snapshot-retained" { + status = state + continue + } + // check if the stale subvolume has snapshots. + if checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) { + status = staleWithSnapshot + } + } - subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name} + subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name, state} fmt.Println(fs.Name, sv.Name, svg.Name, status) } } } } +// getSubvolumeState returns the state of the subvolume +func getSubvolumeState(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace, fsName, SubVol, SubvolumeGroup string) string { + subVolumeInfo, errvol := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "info", fsName, SubVol, SubvolumeGroup}, operatorNamespace, clusterNamespace, true) + if errvol != nil { + logging.Error(errvol, "failed to get filesystems") + return "" + } + var info map[string]interface{} + err := json.Unmarshal([]byte(subVolumeInfo), &info) + if err != nil { + logging.Fatal(fmt.Errorf("failed to unmarshal: %q", err)) + } + state, ok := info["state"].(string) + if !ok { + logging.Fatal(fmt.Errorf("failed to get the state of subvolume: %q", SubVol)) + } + return state +} + // gets list of filesystem func getFileSystem(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string) ([]fsStruct, error) { fsList, err := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "ls", "--format", "json"}, operatorNamespace, clusterNamespace, true) @@ -166,36 +205,13 @@ func unMarshaljson(list string) []fsStruct { return unmarshal } -func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, subList, fs, svg string) { - subvollist := strings.Split(subList, ",") +func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, fs, subvol, svg string) { k8sSubvolume := getK8sRefSubvolume(ctx, clientsets) - for _, subvolume := range subvollist { - check := checkStaleSubvolume(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg, k8sSubvolume) - if check { - _, err := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvolume, svg}, OperatorNamespace, CephClusterNamespace, true) - if err != nil { - logging.Error(err, "failed to delete stale subvolume %q", subvolume) - continue - } - logging.Info("subvolume %q deleted", subvolume) - } else { - logging.Info("subvolume %q is not stale", subvolume) - } - } -} - -// checkStaleSubvolume checks if there are any stale subvolume to be deleted -func checkStaleSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg string, k8sSubvolume map[string]subVolumeInfo) bool { - _, ok := k8sSubvolume[subvolume] - if !ok { - snapshot := checkSnapshot(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg) - if snapshot { - logging.Error(fmt.Errorf("subvolume %s has snapshots", subvolume)) - return false - } else { - return true - } + _, check := k8sSubvolume[subvol] + if !check { + exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvol, svg, "--retain-snapshots"}, OperatorNamespace, CephClusterNamespace, true) + logging.Info("subvolume %q deleted", subvol) + } else { + logging.Info("subvolume %q is not stale", subvol) } - logging.Error(fmt.Errorf("Subvolume %s is referenced by a PV", subvolume)) - return false }