Skip to content

Commit

Permalink
Allow to delete subvolume with retain-snapshot feature
Browse files Browse the repository at this point in the history
Signed-off-by: yati1998 <[email protected]>
  • Loading branch information
yati1998 committed Feb 2, 2024
1 parent f019c64 commit 8ee3e0c
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 41 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/go-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ jobs:
kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph subvolume ls
kubectl rook-ceph subvolume ls --stale
kubectl rook-ceph subvolume delete test-subvol myfs group-a
kubectl rook-ceph subvolume delete myfs test-subvol group-a
- name: Get mon endpoints
run: |
Expand Down Expand Up @@ -237,7 +237,7 @@ jobs:
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete test-subvol myfs group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a
- name: Get mon endpoints
run: |
Expand Down
13 changes: 7 additions & 6 deletions cmd/commands/subvolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,22 +40,23 @@ var listCmd = &cobra.Command{
}

var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Deletes a stale subvolume.",
DisableFlagParsing: true,
Args: cobra.ExactArgs(3),
Example: "kubectl rook-ceph delete <subvolumes> <filesystem> <subvolumegroup>",
Use: "delete",
Short: "Deletes a stale subvolume.",
Args: cobra.ExactArgs(3),
Example: "kubectl rook-ceph delete <subvolumes> <filesystem> <subvolumegroup>",
Run: func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()
subList := args[0]
fs := args[1]
svg := args[2]
subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, subList, fs, svg)
retainSnapshots, _ := cmd.Flags().GetBool("retain-snapshots")
subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, subList, fs, svg, retainSnapshots)
},
}

func init() {
SubvolumeCmd.AddCommand(listCmd)
SubvolumeCmd.PersistentFlags().Bool("stale", false, "List only stale subvolumes")
SubvolumeCmd.AddCommand(deleteCmd)
SubvolumeCmd.PersistentFlags().Bool("retain-snapshots", false, "Delete subvolume but retain the snapshot")
}
20 changes: 11 additions & 9 deletions docs/subvolume.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,13 @@ and delete them without impacting other resources and attached volumes.
The subvolume command will require the following sub commands:
* `ls` : [ls](#ls) lists all the subvolumes
* `--stale`: lists only stale subvolumes
* `delete <subvolumes> <filesystem> <subvolumegroup>`:
* `delete <filesystem> <subvolumes> <subvolumegroup>`:
[delete](#delete) stale subvolumes as per user's input.
It will list and delete only the stale subvolumes to prevent any loss of data.
* subvolumes: comma-separated list of subvolumes of same filesystem and subvolumegroup.
* subvolumes: subvolume name.
* filesystem: filesystem name to which the subvolumes belong.
* subvolumegroup: subvolumegroup name to which the subvolumes belong.
* `--retain-snapshots`: deletes the subvolume with retain-snapshot feature.
## ls

```bash
Expand All @@ -23,8 +24,9 @@ kubectl rook-ceph subvolume ls
# Filesystem Subvolume SubvolumeGroup State
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110006 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110006 csi snapshot-retained
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi stale and has snapshot

```

Expand All @@ -33,23 +35,23 @@ kubectl rook-ceph subvolume ls --stale

# Filesystem Subvolume SubvolumeGroup state
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi stale and has snapshot
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110006 csi snapshot-retained

```

## delete

```bash
kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004 ocs-storagecluster csi
kubectl rook-ceph subvolume delete ocs-storagecluster csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi

# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume "csi-vol-427774b4-340b-11ed-8d66-0242ac110004" deleted

```

```bash
kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004,csi-vol-427774b4-340b-11ed-8d66-0242ac110005 ocs-storagecluster csi
kubectl rook-ceph subvolume delete ocs-storagecluster csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi --retain-snapshots

# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume "csi-vol-3209f854-84d4-4e20-97e2-e14a68a46a2e" deleted

```
65 changes: 41 additions & 24 deletions pkg/filesystem/subvolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"context"
"encoding/json"
"fmt"
"strings"

"github.com/rook/kubectl-rook-ceph/pkg/exec"
"github.com/rook/kubectl-rook-ceph/pkg/k8sutil"
Expand All @@ -34,8 +33,9 @@ type fsStruct struct {
}

type subVolumeInfo struct {
svg string
fs string
svg string
fs string
state string
}

func List(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string, includeStaleOnly bool) {
Expand All @@ -53,7 +53,9 @@ func getK8sRefSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets) map
subvolumeNames := make(map[string]subVolumeInfo)
for _, pv := range pvList.Items {
if pv.Spec.CSI != nil && pv.Spec.CSI.VolumeAttributes["subvolumeName"] != "" {
subvolumeNames[pv.Spec.CSI.VolumeAttributes["subvolumeName"]] = subVolumeInfo{}
if pv.Spec.CSI.VolumeAttributes["subvolumeName"] != "" {
subvolumeNames[pv.Spec.CSI.VolumeAttributes["subvolumeName"]] = subVolumeInfo{}
}
}
}
return subvolumeNames
Expand All @@ -79,12 +81,14 @@ func listCephFSSubvolumes(ctx context.Context, clientsets *k8sutil.Clientsets, o
}
// append the subvolume which doesn't have any snapshot attached to it.
for _, sv := range subvol {
state := getSubVolumeState(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name)

// Assume the volume is stale unless proven otherwise
stale := true
// lookup for subvolume in list of the PV references
_, ok := subvolumeNames[sv.Name]
if ok || checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) {
// The volume is not stale if a PV was found, or it has a snapshot
if ok {
// The volume is not stale if a PV was found
stale = false
}
status := "stale"
Expand All @@ -93,14 +97,33 @@ func listCephFSSubvolumes(ctx context.Context, clientsets *k8sutil.Clientsets, o
continue
}
status = "in-use"
} else {
// check if the stale subvolume has snapshots.
if checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) {
status = "stale and has snapshot"
}
if state == "snapshot-retained" {
status = state
}
}
subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name}
subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name, state}
fmt.Println(fs.Name, sv.Name, svg.Name, status)
}
}
}
}

func getSubVolumeState(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace, fsName, SubVol, SubvolumeGroup string) string {
subVolumeInfo := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "info", fsName, SubVol, SubvolumeGroup}, operatorNamespace, clusterNamespace, true, false)
var info map[string]interface{}
err := json.Unmarshal([]byte(subVolumeInfo), &info)
if err != nil {
logging.Fatal(fmt.Errorf("failed to unmarshal: %q", err))
}
return info["state"].(string)

}

// gets list of filesystem
func getFileSystem(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string) []fsStruct {
fsList := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "ls", "--format", "json"}, operatorNamespace, clusterNamespace, true, false)
Expand Down Expand Up @@ -143,31 +166,25 @@ func unMarshaljson(list string) []fsStruct {
return unmarshal
}

func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, subList, fs, svg string) {
subvollist := strings.Split(subList, ",")
func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, fs, subvol, svg string, retainSnapshots bool) {
k8sSubvolume := getK8sRefSubvolume(ctx, clientsets)
for _, subvolume := range subvollist {
check := checkStaleSubvolume(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg, k8sSubvolume)
if check {
exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvolume, svg}, OperatorNamespace, CephClusterNamespace, true, false)
logging.Info("subvolume %q deleted", subvolume)
} else {
logging.Info("subvolume %q is not stale", subvolume)
}
check := checkStaleSubvolume(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvol, svg, k8sSubvolume)
if check && !retainSnapshots {
exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvol, svg}, OperatorNamespace, CephClusterNamespace, true, false)
logging.Info("subvolume %q deleted", subvol)
} else if check && retainSnapshots {
exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvol, svg, "--retain-snapshots"}, OperatorNamespace, CephClusterNamespace, true, false)
logging.Info("subvolume %q deleted: snapshot: %q", subvol, retainSnapshots)
} else {
logging.Info("subvolume %q is not stale", subvol)
}
}

// checkStaleSubvolume checks if there are any stale subvolume to be deleted
func checkStaleSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg string, k8sSubvolume map[string]subVolumeInfo) bool {
_, ok := k8sSubvolume[subvolume]
if !ok {
snapshot := checkSnapshot(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg)
if snapshot {
logging.Error(fmt.Errorf("subvolume %s has snapshots", subvolume))
return false
} else {
return true
}
return true
}
logging.Error(fmt.Errorf("Subvolume %s is referenced by a PV", subvolume))
return false
Expand Down

0 comments on commit 8ee3e0c

Please sign in to comment.