Skip to content

Commit

Permalink
Allow to delete subvolume with retain-snapshot feature
Browse files Browse the repository at this point in the history
Signed-off-by: yati1998 <[email protected]>
  • Loading branch information
yati1998 committed Feb 7, 2024
1 parent 8a410df commit 9b47647
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 60 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/go-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ jobs:
kubectl rook-ceph ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph subvolume ls
kubectl rook-ceph subvolume ls --stale
kubectl rook-ceph subvolume delete test-subvol myfs group-a
kubectl rook-ceph subvolume delete myfs test-subvol group-a
- name: Get mon endpoints
run: |
Expand Down Expand Up @@ -237,7 +237,7 @@ jobs:
kubectl rook-ceph --operator-namespace test-operator -n test-cluster ceph fs subvolume create myfs test-subvol group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume ls --stale
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete test-subvol myfs group-a
kubectl rook-ceph --operator-namespace test-operator -n test-cluster subvolume delete myfs test-subvol group-a
- name: Get mon endpoints
run: |
Expand Down
14 changes: 5 additions & 9 deletions cmd/commands/subvolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,17 +40,13 @@ var listCmd = &cobra.Command{
}

var deleteCmd = &cobra.Command{
Use: "delete",
Short: "Deletes a stale subvolume.",
DisableFlagParsing: true,
Args: cobra.ExactArgs(3),
Example: "kubectl rook-ceph delete <subvolumes> <filesystem> <subvolumegroup>",
Use: "delete",
Short: "Deletes a stale subvolume.",
Args: cobra.MinimumNArgs(2),
Example: "kubectl rook-ceph delete <filesystem> <subvolume> <subvolumegroup>",
Run: func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()
subList := args[0]
fs := args[1]
svg := args[2]
subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, subList, fs, svg)
subvolume.Delete(ctx, clientSets, operatorNamespace, cephClusterNamespace, args)
},
}

Expand Down
34 changes: 17 additions & 17 deletions docs/subvolume.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,47 +9,47 @@ and delete them without impacting other resources and attached volumes.
The subvolume command will require the following sub commands:
* `ls` : [ls](#ls) lists all the subvolumes
* `--stale`: lists only stale subvolumes
* `delete <subvolumes> <filesystem> <subvolumegroup>`:
[delete](#delete) stale subvolumes as per user's input.
It will list and delete only the stale subvolumes to prevent any loss of data.
* subvolumes: comma-separated list of subvolumes of same filesystem and subvolumegroup.
* filesystem: filesystem name to which the subvolumes belong.
* subvolumegroup: subvolumegroup name to which the subvolumes belong.
* `delete <filesystem> <subvolume> <subvolumegroup>`:
[delete](#delete) stale subvolume as per user's input.
It will delete only the stale subvolume to prevent any loss of data.
* subvolume: subvolume name.
* filesystem: filesystem name to which the subvolume belong.
* subvolumegroup(optional): subvolumegroup name to which the subvolume belong.
* By default the subvolumegroup is "csi".
## ls

```bash
kubectl rook-ceph subvolume ls

# Filesystem Subvolume SubvolumeGroup State
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110006 csi in-use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi In-Use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi In-Use
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi Stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110007 csi Stale-with-Snapshot

```

```bash
kubectl rook-ceph subvolume ls --stale

# Filesystem Subvolume SubvolumeGroup state
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi Stale
# ocs-storagecluster-cephfilesystem csi-vol-427774b4-340b-11ed-8d66-0242ac110005 csi Stale-with-Snapshot

```

## delete

```bash
kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004 ocs-storagecluster csi
kubectl rook-ceph subvolume delete ocs-storagecluster csi-vol-427774b4-340b-11ed-8d66-0242ac110004 csi

# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume "csi-vol-427774b4-340b-11ed-8d66-0242ac110004" deleted

```

```bash
kubectl rook-ceph subvolume delete csi-vol-427774b4-340b-11ed-8d66-0242ac110004,csi-vol-427774b4-340b-11ed-8d66-0242ac110005 ocs-storagecluster csi
kubectl rook-ceph subvolume delete ocs-storagecluster csi-vol-427774b4-340b-11ed-8d66-0242ac110004

# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume csi-vol-427774b4-340b-11ed-8d66-0242ac110004 deleted
# Info: subvolume "csi-vol-427774b4-340b-11ed-8d66-0242ac110004" deleted

```
97 changes: 65 additions & 32 deletions pkg/filesystem/subvolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"context"
"encoding/json"
"fmt"
"strings"

"github.com/rook/kubectl-rook-ceph/pkg/exec"
"github.com/rook/kubectl-rook-ceph/pkg/k8sutil"
Expand All @@ -34,10 +33,17 @@ type fsStruct struct {
}

type subVolumeInfo struct {
svg string
fs string
svg string
fs string
state string
}

const (
InUse = "In-Use"
Stale = "Stale"
StaleWithSnapshot = "Stale-with-Snapshot"
)

func List(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string, includeStaleOnly bool) {

subvolumeNames := getK8sRefSubvolume(ctx, clientsets)
Expand All @@ -52,7 +58,7 @@ func getK8sRefSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets) map
}
subvolumeNames := make(map[string]subVolumeInfo)
for _, pv := range pvList.Items {
if pv.Spec.CSI != nil && pv.Spec.CSI.VolumeAttributes["subvolumeName"] != "" {
if pv.Spec.CSI != nil {
subvolumeNames[pv.Spec.CSI.VolumeAttributes["subvolumeName"]] = subVolumeInfo{}
}
}
Expand All @@ -79,28 +85,57 @@ func listCephFSSubvolumes(ctx context.Context, clientsets *k8sutil.Clientsets, o
}
// append the subvolume which doesn't have any snapshot attached to it.
for _, sv := range subvol {
state := getSubvolumeState(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name)

// Assume the volume is stale unless proven otherwise
stale := true
// lookup for subvolume in list of the PV references
_, ok := subvolumeNames[sv.Name]
if ok || checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) {
// The volume is not stale if a PV was found, or it has a snapshot
if ok {
// The volume is not stale if a PV was found
stale = false
}
status := "stale"
status := Stale
if !stale {
if includeStaleOnly {
continue
}
status = "in-use"
status = InUse
} else {
// check the state of the stale subvolume
// if it is snapshot-retained then skip listing it.
if state == "snapshot-retained" {
status = state
continue
}
// check if the stale subvolume has snapshots.
if checkSnapshot(ctx, clientsets, operatorNamespace, clusterNamespace, fs.Name, sv.Name, svg.Name) {
status = StaleWithSnapshot
}

}
subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name}
subvolumeNames[sv.Name] = subVolumeInfo{fs.Name, svg.Name, state}
fmt.Println(fs.Name, sv.Name, svg.Name, status)
}
}
}
}

// getSubvolumeState returns the state of the subvolume
func getSubvolumeState(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace, fsName, SubVol, SubvolumeGroup string) string {
subVolumeInfo := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "info", fsName, SubVol, SubvolumeGroup}, operatorNamespace, clusterNamespace, true, false)
var info map[string]interface{}
err := json.Unmarshal([]byte(subVolumeInfo), &info)
if err != nil {
logging.Fatal(fmt.Errorf("failed to unmarshal: %q", err))
}
state, ok := info["state"].(string)
if !ok {
logging.Fatal(fmt.Errorf("failed to get the state of subvolume: %q", SubVol))
}
return state
}

// gets list of filesystem
func getFileSystem(ctx context.Context, clientsets *k8sutil.Clientsets, operatorNamespace, clusterNamespace string) []fsStruct {
fsList := exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "ls", "--format", "json"}, operatorNamespace, clusterNamespace, true, false)
Expand Down Expand Up @@ -143,32 +178,30 @@ func unMarshaljson(list string) []fsStruct {
return unmarshal
}

func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, subList, fs, svg string) {
subvollist := strings.Split(subList, ",")
func Delete(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace string, args []string) {
k8sSubvolume := getK8sRefSubvolume(ctx, clientsets)
for _, subvolume := range subvollist {
check := checkStaleSubvolume(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg, k8sSubvolume)
if check {
exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvolume, svg}, OperatorNamespace, CephClusterNamespace, true, false)
logging.Info("subvolume %q deleted", subvolume)
} else {
logging.Info("subvolume %q is not stale", subvolume)
}
fs := args[0]
subvol := args[1]
var svg string
if len(args) == 2 {
svg = "csi"
} else {
svg = args[2]
}

check := isStaleSubvolume(subvol, k8sSubvolume)

if !check {
exec.RunCommandInOperatorPod(ctx, clientsets, "ceph", []string{"fs", "subvolume", "rm", fs, subvol, svg, "--retain-snapshots"}, OperatorNamespace, CephClusterNamespace, true, false)
logging.Info("subvolume %q deleted", subvol)
} else {
logging.Info("subvolume %q is not stale", subvol)
}
}

// checkStaleSubvolume checks if there are any stale subvolume to be deleted
func checkStaleSubvolume(ctx context.Context, clientsets *k8sutil.Clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg string, k8sSubvolume map[string]subVolumeInfo) bool {
// isStaleSubvolume checks if there are any stale subvolume to be deleted
func isStaleSubvolume(subvolume string, k8sSubvolume map[string]subVolumeInfo) bool {
_, ok := k8sSubvolume[subvolume]
if !ok {
snapshot := checkSnapshot(ctx, clientsets, OperatorNamespace, CephClusterNamespace, fs, subvolume, svg)
if snapshot {
logging.Error(fmt.Errorf("subvolume %s has snapshots", subvolume))
return false
} else {
return true
}
}
logging.Error(fmt.Errorf("Subvolume %s is referenced by a PV", subvolume))
return false

return ok
}

0 comments on commit 9b47647

Please sign in to comment.