Skip to content

Commit 696a8ce

Browse files
committed
feat(pre-sync-mode): copy RWX PVCs without actually swapping them
permits to reduce the downtime for the actual migration can typically be used with RWX PVCs to create all destination PVCs and already rsync the data, without having to scale down the pods/deployments/statefulsets. at a later stage, the actual swap of the PVC can be done, with a lesser time duration, as most of the data will already have been rsync'ed Signed-off-by: Clément Nussbaumer <[email protected]> fix(pre-sync-mode): skip volumes not supporting RWX access mode Signed-off-by: Clément Nussbaumer <[email protected]> fix: proper matching PVCs count Signed-off-by: Clément Nussbaumer <[email protected]> feat: add max-pvs=n flags Signed-off-by: Clément Nussbaumer <[email protected]> feat(pre-sync-mode): run a "prefetch" copy before actually migrating Signed-off-by: Clément Nussbaumer <[email protected]>
1 parent 6c58960 commit 696a8ce

File tree

4 files changed

+80
-46
lines changed

4 files changed

+80
-46
lines changed

cmd/main.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,9 @@ func main() {
3939
flag.StringVar(&options.Namespace, "namespace", "", "only migrate PVCs within this namespace")
4040
flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest")
4141
flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs")
42+
flag.BoolVar(&options.PreSyncMode, "pre-sync-mode", false, "create the new PVC and copy the data, then scale down, run another copy and finally swap the PVCs")
4243
flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist")
44+
flag.IntVar(&options.MaxPVs, "max-pvs", 0, "maximum number of PVs to process. default to 0 (unlimited)")
4345
flag.IntVar(&podReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for validation pod(s) to go into Ready phase")
4446
flag.IntVar(&deletePVTimeout, "delete-pv-timeout", 300, "length of time to wait (in seconds) for backing PV to be removed when temporary PVC is deleted")
4547
flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip preflight migration validation on the destination storage provider")

pkg/k8sutil/truncate.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,14 @@ package k8sutil
22

33
import "fmt"
44

5-
const nameSuffix = "-pvcmigrate"
5+
const PVCNameSuffix = "-pvcmigrate"
66

77
// if the length after adding the suffix is more than 253 characters, we need to reduce that to fit within k8s limits
88
// pruning from the end runs the risk of dropping the '0'/'1'/etc of a statefulset's PVC name
99
// pruning from the front runs the risk of making a-replica-... and b-replica-... collide
1010
// so this removes characters from the middle of the string
1111
func NewPvcName(originalName string) string {
12-
candidate := originalName + nameSuffix
12+
candidate := originalName + PVCNameSuffix
1313
if len(candidate) <= 253 {
1414
return candidate
1515
}

pkg/migrate/migrate.go

Lines changed: 64 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"io"
99
"log"
1010
"strconv"
11+
"strings"
1112
"text/tabwriter"
1213
"time"
1314

@@ -50,6 +51,8 @@ type Options struct {
5051
Namespace string
5152
SetDefaults bool
5253
VerboseCopy bool
54+
PreSyncMode bool
55+
MaxPVs int
5356
SkipSourceValidation bool
5457
PodReadyTimeout time.Duration
5558
DeletePVTimeout time.Duration
@@ -62,17 +65,28 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
6265
return err
6366
}
6467

65-
matchingPVCs, namespaces, err := getPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.Namespace)
68+
if options.PreSyncMode {
69+
w.Println("\nRunning in pre-sync-mode: we first copy the PVC live, without scaling down pods. Once that pre-sync is completed, we scale down, do another copy/sync and finally swap the PVCs.")
70+
}
71+
72+
matchingPVCs, namespaces, err := getPVCs(ctx, w, clientset, &options)
6673
if err != nil {
6774
return err
6875
}
6976

70-
updatedMatchingPVCs, err := scaleDownPods(ctx, w, clientset, matchingPVCs, time.Second*5)
77+
if options.PreSyncMode {
78+
err = copyAllPVCs(ctx, w, clientset, &options, matchingPVCs, 1*time.Second)
79+
if err != nil {
80+
return err
81+
}
82+
}
83+
84+
err = scaleDownPods(ctx, w, clientset, matchingPVCs, time.Second*5)
7185
if err != nil {
7286
return fmt.Errorf("failed to scale down pods: %w", err)
7387
}
7488

75-
err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Second, options.RsyncFlags)
89+
err = copyAllPVCs(ctx, w, clientset, &options, matchingPVCs, 1*time.Second)
7690
if err != nil {
7791
return err
7892
}
@@ -169,15 +183,15 @@ func swapDefaultStorageClasses(ctx context.Context, w *log.Logger, clientset k8s
169183
return nil
170184
}
171185

172-
func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, rsyncImage string, matchingPVCs map[string][]*corev1.PersistentVolumeClaim, verboseCopy bool, waitTime time.Duration, rsyncFlags []string) error {
186+
func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, options *Options, matchingPVCs map[string][]*corev1.PersistentVolumeClaim, waitTime time.Duration) error {
173187
// create a pod for each PVC migration, and wait for it to finish
174-
w.Printf("\nCopying data from %s PVCs to %s PVCs\n", sourceSCName, destSCName)
188+
w.Printf("\nCopying data from %s PVCs to %s PVCs\n", options.SourceSCName, options.DestSCName)
175189
for ns, nsPvcs := range matchingPVCs {
176190
for _, nsPvc := range nsPvcs {
177191
sourcePvcName, destPvcName := nsPvc.Name, k8sutil.NewPvcName(nsPvc.Name)
178192
w.Printf("Copying data from %s (%s) to %s in %s\n", sourcePvcName, nsPvc.Spec.VolumeName, destPvcName, ns)
179193

180-
err := copyOnePVC(ctx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, waitTime, rsyncFlags)
194+
err := copyOnePVC(ctx, w, clientset, ns, sourcePvcName, destPvcName, options, waitTime)
181195
if err != nil {
182196
return fmt.Errorf("failed to copy PVC %s in %s: %w", nsPvc.Name, ns, err)
183197
}
@@ -186,15 +200,15 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa
186200
return nil
187201
}
188202

189-
func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, waitTime time.Duration, rsyncFlags []string) error {
203+
func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, options *Options, waitTime time.Duration) error {
190204
w.Printf("Determining the node to migrate PVC %s on\n", sourcePvcName)
191205
nodeName, err := getDesiredNode(ctx, clientset, ns, sourcePvcName)
192206
if err != nil {
193207
return fmt.Errorf("failed to get node for PVC %s in %s: %w", sourcePvcName, ns, err)
194208
}
195209

196210
w.Printf("Creating pvc migrator pod on node %s\n", nodeName)
197-
createdPod, err := createMigrationPod(ctx, clientset, ns, sourcePvcName, destPvcName, rsyncImage, nodeName, rsyncFlags)
211+
createdPod, err := createMigrationPod(ctx, clientset, ns, sourcePvcName, destPvcName, options.RsyncImage, nodeName, options.RsyncFlags)
198212
if err != nil {
199213
return err
200214
}
@@ -278,13 +292,13 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac
278292
w.Printf("failed to read pod logs: %v\n", err)
279293
break
280294
}
281-
if verboseCopy {
295+
if options.VerboseCopy {
282296
w.Printf(" %s\n", line)
283297
} else {
284298
_, _ = fmt.Fprintf(w.Writer(), ".") // one dot per line of output
285299
}
286300
}
287-
if !verboseCopy {
301+
if !options.VerboseCopy {
288302
_, _ = fmt.Fprintf(w.Writer(), "done!\n") // add a newline at the end of the dots if not showing pod logs
289303
}
290304

@@ -416,7 +430,7 @@ func createMigrationPod(ctx context.Context, clientset k8sclient.Interface, ns s
416430
// a map of namespaces to arrays of original PVCs
417431
// an array of namespaces that the PVCs were found within
418432
// an error, if one was encountered
419-
func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName, destSCName string, Namespace string) (map[string][]*corev1.PersistentVolumeClaim, []string, error) {
433+
func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, opts *Options) (map[string][]*corev1.PersistentVolumeClaim, []string, error) {
420434
// get PVs using the specified storage provider
421435
pvs, err := clientset.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
422436
if err != nil {
@@ -425,26 +439,38 @@ func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
425439
matchingPVs := []corev1.PersistentVolume{}
426440
pvsByName := map[string]corev1.PersistentVolume{}
427441
for _, pv := range pvs.Items {
428-
if pv.Spec.StorageClassName == sourceSCName {
442+
if pv.Spec.StorageClassName == opts.SourceSCName {
429443
matchingPVs = append(matchingPVs, pv)
430444
pvsByName[pv.Name] = pv
431445
} else {
432-
w.Printf("PV %s does not match source SC %s, not migrating\n", pv.Name, sourceSCName)
446+
w.Printf("PV %s does not match source SC %s, not migrating\n", pv.Name, opts.SourceSCName)
433447
}
434448
}
435449

436450
// get PVCs using specified PVs
451+
matchingPVCsCount := 0
437452
matchingPVCs := map[string][]*corev1.PersistentVolumeClaim{}
438453
for _, pv := range matchingPVs {
454+
if opts.MaxPVs > 0 && matchingPVCsCount >= opts.MaxPVs {
455+
break
456+
}
439457
if pv.Spec.ClaimRef != nil {
458+
if len(opts.Namespace) > 0 && pv.Spec.ClaimRef.Namespace != opts.Namespace {
459+
continue // early continue, to prevent logging info regarding PV/PVCs in other namespaces
460+
}
461+
462+
if strings.HasSuffix(pv.Spec.ClaimRef.Name, k8sutil.PVCNameSuffix) {
463+
w.Printf("Skipping PV %s as the claiming PVC has the %s suffix", pv.Name, k8sutil.PVCNameSuffix)
464+
continue
465+
}
466+
440467
pvc, err := clientset.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(ctx, pv.Spec.ClaimRef.Name, metav1.GetOptions{})
441468
if err != nil {
442469
return nil, nil, fmt.Errorf("failed to get PVC for PV %s in %s: %w", pv.Spec.ClaimRef.Name, pv.Spec.ClaimRef.Namespace, err)
443470
}
444471

445-
if pv.Spec.ClaimRef.Namespace == Namespace || Namespace == "" {
446-
matchingPVCs[pv.Spec.ClaimRef.Namespace] = append(matchingPVCs[pv.Spec.ClaimRef.Namespace], pvc)
447-
}
472+
matchingPVCsCount++
473+
matchingPVCs[pv.Spec.ClaimRef.Namespace] = append(matchingPVCs[pv.Spec.ClaimRef.Namespace], pvc)
448474

449475
} else {
450476
return nil, nil, fmt.Errorf("PV %s does not have an associated PVC - resolve this before rerunning", pv.Name)
@@ -456,7 +482,7 @@ func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
456482
pvcNamespaces = append(pvcNamespaces, idx)
457483
}
458484

459-
w.Printf("\nFound %d matching PVCs to migrate across %d namespaces:\n", len(matchingPVCs), len(pvcNamespaces))
485+
w.Printf("\nFound %d matching PVCs to migrate across %d namespaces:\n", matchingPVCsCount, len(pvcNamespaces))
460486
tw := tabwriter.NewWriter(w.Writer(), 2, 2, 1, ' ', 0)
461487
_, _ = fmt.Fprintf(tw, "namespace:\tpvc:\tpv:\tsize:\t\n")
462488
for ns, nsPvcs := range matchingPVCs {
@@ -471,7 +497,7 @@ func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
471497
}
472498

473499
// create new PVCs for each matching PVC
474-
w.Printf("\nCreating new PVCs to migrate data to using the %s StorageClass\n", destSCName)
500+
w.Printf("\nCreating new PVCs to migrate data to using the %s StorageClass\n", opts.DestSCName)
475501
for ns, nsPvcs := range matchingPVCs {
476502
for _, nsPvc := range nsPvcs {
477503
newName := k8sutil.NewPvcName(nsPvc.Name)
@@ -493,7 +519,7 @@ func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
493519
return nil, nil, fmt.Errorf("failed to find existing PVC: %w", err)
494520
}
495521
} else if existingPVC != nil {
496-
if existingPVC.Spec.StorageClassName != nil && *existingPVC.Spec.StorageClassName == destSCName {
522+
if existingPVC.Spec.StorageClassName != nil && *existingPVC.Spec.StorageClassName == opts.DestSCName {
497523
existingSize := existingPVC.Spec.Resources.Requests.Storage().String()
498524

499525
if existingSize == desiredPvStorage.String() {
@@ -525,7 +551,7 @@ func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
525551
},
526552
},
527553
Spec: corev1.PersistentVolumeClaimSpec{
528-
StorageClassName: &destSCName,
554+
StorageClassName: &opts.DestSCName,
529555
Resources: corev1.VolumeResourceRequirements{
530556
Requests: map[corev1.ResourceName]resource.Quantity{
531557
corev1.ResourceStorage: desiredPvStorage,
@@ -675,17 +701,15 @@ func mutateSC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
675701
// if a pod is not created by pvmigrate, and is not controlled by a statefulset/deployment, this function will return an error.
676702
// if waitForCleanup is true, after scaling down deployments/statefulsets it will wait for all pods to be deleted.
677703
// It returns a map of namespace to PVCs and any errors encountered.
678-
func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, matchingPVCs map[string][]*corev1.PersistentVolumeClaim, checkInterval time.Duration) (map[string][]*corev1.PersistentVolumeClaim, error) {
679-
// build new map with complete pvcCtx
680-
updatedPVCs := matchingPVCs
704+
func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, matchingPVCs map[string][]*corev1.PersistentVolumeClaim, checkInterval time.Duration) error {
681705

682706
// get pods using specified PVCs
683707
matchingPods := map[string][]corev1.Pod{}
684708
matchingPodsCount := 0
685709
for ns, nsPvcs := range matchingPVCs {
686710
nsPods, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
687711
if err != nil {
688-
return nil, fmt.Errorf("failed to get pods in %s: %w", ns, err)
712+
return fmt.Errorf("failed to get pods in %s: %w", ns, err)
689713
}
690714
for _, nsPod := range nsPods.Items {
691715
perPodLoop:
@@ -706,7 +730,7 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter
706730
for ns, nsPvcs := range matchingPVCs {
707731
nsPods, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
708732
if err != nil {
709-
return nil, fmt.Errorf("failed to get pods in %s: %w", ns, err)
733+
return fmt.Errorf("failed to get pods in %s: %w", ns, err)
710734
}
711735
for _, nsPod := range nsPods.Items {
712736
for _, podVol := range nsPod.Spec.Volumes {
@@ -728,7 +752,7 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter
728752
return volume.Annotations[sourceNodeAnnotation] == nsPod.Spec.NodeName
729753
})
730754
if err != nil {
731-
return nil, fmt.Errorf("failed to annotate pv %s (backs pvc %s) with node name %s: %w", nsPvClaim.Spec.VolumeName, nsPvClaim.ObjectMeta.Name, nsPod.Spec.NodeName, err)
755+
return fmt.Errorf("failed to annotate pv %s (backs pvc %s) with node name %s: %w", nsPvClaim.Spec.VolumeName, nsPvClaim.ObjectMeta.Name, nsPod.Spec.NodeName, err)
732756
}
733757
}
734758
}
@@ -747,7 +771,7 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter
747771
}
748772
err := tw.Flush()
749773
if err != nil {
750-
return nil, fmt.Errorf("failed to print Pods: %w", err)
774+
return fmt.Errorf("failed to print Pods: %w", err)
751775
}
752776

753777
// get owners controlling specified pods
@@ -771,11 +795,11 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter
771795
// this pod was created by pvmigrate, so it can be deleted by pvmigrate
772796
err := clientset.CoreV1().Pods(ns).Delete(ctx, nsPod.Name, metav1.DeleteOptions{})
773797
if err != nil {
774-
return nil, fmt.Errorf("migration pod %s in %s leftover from a previous run failed to delete, please delete it before retrying: %w", nsPod.Name, ns, err)
798+
return fmt.Errorf("migration pod %s in %s leftover from a previous run failed to delete, please delete it before retrying: %w", nsPod.Name, ns, err)
775799
}
776800
} else {
777801
// TODO: handle properly
778-
return nil, fmt.Errorf("pod %s in %s did not have any owners!\nPlease delete it before retrying", nsPod.Name, ns)
802+
return fmt.Errorf("pod %s in %s did not have any owners!\nPlease delete it before retrying", nsPod.Name, ns)
779803
}
780804
}
781805
}
@@ -791,7 +815,7 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter
791815
case "StatefulSet":
792816
ss, err := clientset.AppsV1().StatefulSets(ns).Get(ctx, ownerName, metav1.GetOptions{})
793817
if err != nil {
794-
return nil, fmt.Errorf("failed to get statefulset %s scale in %s: %w", ownerName, ns, err)
818+
return fmt.Errorf("failed to get statefulset %s scale in %s: %w", ownerName, ns, err)
795819
}
796820

797821
formerScale := int32(1)
@@ -812,24 +836,24 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter
812836
w.Printf("scaling StatefulSet %s from %d to 0 in %s\n", ownerName, formerScale, ns)
813837
_, err = clientset.AppsV1().StatefulSets(ns).Update(ctx, ss, metav1.UpdateOptions{})
814838
if err != nil {
815-
return nil, fmt.Errorf("failed to scale statefulset %s to zero in %s: %w", ownerName, ns, err)
839+
return fmt.Errorf("failed to scale statefulset %s to zero in %s: %w", ownerName, ns, err)
816840
}
817841
case "ReplicaSet":
818842
rs, err := clientset.AppsV1().ReplicaSets(ns).Get(ctx, ownerName, metav1.GetOptions{})
819843
if err != nil {
820-
return nil, fmt.Errorf("failed to get replicaset %s in %s: %w", ownerName, ns, err)
844+
return fmt.Errorf("failed to get replicaset %s in %s: %w", ownerName, ns, err)
821845
}
822846

823847
if len(rs.OwnerReferences) != 1 {
824-
return nil, fmt.Errorf("expected 1 owner for replicaset %s in %s, found %d instead", ownerName, ns, len(rs.OwnerReferences))
848+
return fmt.Errorf("expected 1 owner for replicaset %s in %s, found %d instead", ownerName, ns, len(rs.OwnerReferences))
825849
}
826850
if rs.OwnerReferences[0].Kind != "Deployment" {
827-
return nil, fmt.Errorf("expected owner for replicaset %s in %s to be a deployment, found %s of kind %s instead", ownerName, ns, rs.OwnerReferences[0].Name, rs.OwnerReferences[0].Kind)
851+
return fmt.Errorf("expected owner for replicaset %s in %s to be a deployment, found %s of kind %s instead", ownerName, ns, rs.OwnerReferences[0].Name, rs.OwnerReferences[0].Kind)
828852
}
829853

830854
dep, err := clientset.AppsV1().Deployments(ns).Get(ctx, rs.OwnerReferences[0].Name, metav1.GetOptions{})
831855
if err != nil {
832-
return nil, fmt.Errorf("failed to get deployment %s scale in %s: %w", ownerName, ns, err)
856+
return fmt.Errorf("failed to get deployment %s scale in %s: %w", ownerName, ns, err)
833857
}
834858

835859
formerScale := int32(1)
@@ -850,10 +874,10 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter
850874
w.Printf("scaling Deployment %s from %d to 0 in %s\n", ownerName, formerScale, ns)
851875
_, err = clientset.AppsV1().Deployments(ns).Update(ctx, dep, metav1.UpdateOptions{})
852876
if err != nil {
853-
return nil, fmt.Errorf("failed to scale statefulset %s to zero in %s: %w", ownerName, ns, err)
877+
return fmt.Errorf("failed to scale statefulset %s to zero in %s: %w", ownerName, ns, err)
854878
}
855879
default:
856-
return nil, fmt.Errorf("scaling pods controlled by a %s is not supported, please delete the pods controlled by %s in %s before retrying", ownerKind, ownerKind, ns)
880+
return fmt.Errorf("scaling pods controlled by a %s is not supported, please delete the pods controlled by %s in %s before retrying", ownerKind, ownerKind, ns)
857881
}
858882
}
859883
}
@@ -867,15 +891,15 @@ checkPvcPodLoop:
867891
for ns, nsPvcs := range matchingPVCs {
868892
nsPods, err := clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
869893
if err != nil {
870-
return nil, fmt.Errorf("failed to get pods in %s: %w", ns, err)
894+
return fmt.Errorf("failed to get pods in %s: %w", ns, err)
871895
}
872896
for _, nsPod := range nsPods.Items {
873897
for _, podVol := range nsPod.Spec.Volumes {
874898
if podVol.PersistentVolumeClaim != nil {
875899
for _, nsClaim := range nsPvcs {
876900
if podVol.PersistentVolumeClaim.ClaimName == nsClaim.Name {
877901
if nsPod.CreationTimestamp.After(migrationStartTime) {
878-
return nil, fmt.Errorf("pod %s in %s mounting %s was created at %s, after scale-down started at %s. It is likely that there is some other operator scaling this back up", nsPod.Name, ns, nsClaim.Name, nsPod.CreationTimestamp.Format(time.RFC3339), migrationStartTime.Format(time.RFC3339))
902+
return fmt.Errorf("pod %s in %s mounting %s was created at %s, after scale-down started at %s. It is likely that there is some other operator scaling this back up", nsPod.Name, ns, nsClaim.Name, nsPod.CreationTimestamp.Format(time.RFC3339), migrationStartTime.Format(time.RFC3339))
879903
}
880904

881905
w.Printf("Found pod %s in %s mounting to-be-migrated PVC %s, waiting\n", nsPod.Name, ns, nsClaim.Name)
@@ -892,7 +916,7 @@ checkPvcPodLoop:
892916
}
893917

894918
w.Printf("All pods removed successfully\n")
895-
return updatedPVCs, nil
919+
return nil
896920
}
897921

898922
func scaleUpPods(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, namespaces []string) error {

0 commit comments

Comments
 (0)