diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java index 17575076444d..974a4533ff97 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersCmd.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupProvider; +import org.apache.cloudstack.backup.BackupProviderNameUtils; import com.cloud.user.Account; @@ -70,11 +71,12 @@ private void setupResponse(final List providers) { final ListResponse response = new ListResponse<>(); final List responses = new ArrayList<>(); for (final BackupProvider provider : providers) { - if (provider == null || (getName() != null && !provider.getName().equals(getName()))) { + final String displayName = provider == null ? null : BackupProviderNameUtils.toDisplayName(provider.getName()); + if (provider == null || (getName() != null && !displayName.equalsIgnoreCase(getName()))) { continue; } final BackupProviderResponse backupProviderResponse = new BackupProviderResponse(); - backupProviderResponse.setName(provider.getName()); + backupProviderResponse.setName(displayName); backupProviderResponse.setDescription(provider.getDescription()); backupProviderResponse.setObjectName("providers"); responses.add(backupProviderResponse); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java index 8d4fa8eba502..7f16fa42bdc3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ListBackupProvidersForZoneCmd.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupProvider; +import org.apache.cloudstack.backup.BackupProviderNameUtils; import com.cloud.user.Account; @@ -75,7 +76,7 @@ private void setupResponse(final List providers) { continue; } final BackupProviderResponse backupProviderResponse = new BackupProviderResponse(); - backupProviderResponse.setName(provider.getName()); + backupProviderResponse.setName(BackupProviderNameUtils.toDisplayName(provider.getName())); backupProviderResponse.setDescription(provider.getDescription()); backupProviderResponse.setObjectName("providers"); responses.add(backupProviderResponse); diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index 9b8c17a95f25..0247ccd83807 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.command.user.backup.ListBackupsCmd; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.ValidatedConfigKey; import org.apache.cloudstack.framework.config.Configurable; import com.cloud.exception.ResourceUnavailableException; @@ -54,11 +53,10 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer "false", "Is backup and recovery framework enabled.", false, ConfigKey.Scope.Zone); - ConfigKey BackupProviderPlugin = new ValidatedConfigKey<>("Advanced", String.class, + ConfigKey BackupProviderPlugin = new ConfigKey<>("Advanced", String.class, "backup.framework.provider.plugin", "dummy", - "The backup and recovery provider plugin. Valid plugin values: dummy, veeam, networker and nas", - true, ConfigKey.Scope.Zone, BackupFrameworkEnabled.key(), value -> validateBackupProviderConfig((String)value)); + "The backup and recovery provider plugin.", true, ConfigKey.Scope.Zone, BackupFrameworkEnabled.key()); ConfigKey BackupSyncPollingInterval = new ConfigKey<>("Advanced", Long.class, "backup.framework.sync.interval", @@ -70,6 +68,23 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer "false", "Enable volume attach/detach operations for VMs that are assigned to Backup Offerings.", true); + ConfigKey KvmIncrementalBackup = new ConfigKey<>("Advanced", Boolean.class, + "kvm.incremental.backup", + "false", + "Enable KVM incremental backups for supported backup providers.", + false, + ConfigKey.Scope.Cluster, + null); + + ConfigKey BackupDeltaMax = new ConfigKey<>(Integer.class, + "backup.delta.max", + "Advanced", + "10", + "Max incremental backups between two full backups for KVM backup providers.", + true, + ConfigKey.Scope.Global, + null); + ConfigKey DefaultMaxAccountBackups = new ConfigKey("Account Defaults", Long.class, "max.account.backups", "20", @@ -253,13 +268,4 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer void checkAndRemoveBackupOfferingBeforeExpunge(VirtualMachine vm); - static void validateBackupProviderConfig(String value) { - if (value != null && (value.contains(",") || value.trim().contains(" "))) { - throw new IllegalArgumentException("Multiple backup provider plugins are not supported. Please provide a single plugin value."); - } - List validPlugins = List.of("dummy", "veeam", "networker", "nas"); - if (value != null && !validPlugins.contains(value)) { - throw new IllegalArgumentException("Invalid backup provider plugin: " + value + ". Valid plugin values are: " + String.join(", ", validPlugins)); - } - } } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProviderNameUtils.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProviderNameUtils.java new file mode 100644 index 000000000000..9788398cc3d4 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProviderNameUtils.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import org.apache.commons.lang3.StringUtils; + +public final class BackupProviderNameUtils { + public static final String NAS = "nas"; + public static final String COMMVAULT = "commvault"; + public static final String ABLESTACK_NAS = "ablestack-nas"; + public static final String ABLESTACK_COMMVAULT = "ablestack-commvault"; + + private BackupProviderNameUtils() { + } + + public static String canonicalize(final String providerName) { + if (StringUtils.isBlank(providerName)) { + return providerName; + } + if (NAS.equalsIgnoreCase(providerName) || ABLESTACK_NAS.equalsIgnoreCase(providerName)) { + return ABLESTACK_NAS; + } + if (COMMVAULT.equalsIgnoreCase(providerName) || ABLESTACK_COMMVAULT.equalsIgnoreCase(providerName)) { + return ABLESTACK_COMMVAULT; + } + return providerName; + } + + public static String toDisplayName(final String providerName) { + if (StringUtils.isBlank(providerName)) { + return providerName; + } + if (ABLESTACK_NAS.equalsIgnoreCase(providerName) || NAS.equalsIgnoreCase(providerName)) { + return NAS; + } + if (ABLESTACK_COMMVAULT.equalsIgnoreCase(providerName) || COMMVAULT.equalsIgnoreCase(providerName)) { + return COMMVAULT; + } + return providerName; + } + + public static boolean isNasFamily(final String providerName) { + return ABLESTACK_NAS.equalsIgnoreCase(canonicalize(providerName)); + } + + public static boolean isCommvaultFamily(final String providerName) { + return ABLESTACK_COMMVAULT.equalsIgnoreCase(canonicalize(providerName)); + } +} diff --git a/client/pom.xml b/client/pom.xml index d54cb4493d17..f7600cde2ce6 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -614,7 +614,12 @@ org.apache.cloudstack - cloud-plugin-backup-commvault + cloud-plugin-backup-ablestasck-nas + ${project.version} + + + org.apache.cloudstack + cloud-plugin-backup-ablestack-commvault ${project.version} diff --git a/core/src/main/java/org/apache/cloudstack/backup/CommvaultRestoreBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultRestoreBackupCommand.java similarity index 81% rename from core/src/main/java/org/apache/cloudstack/backup/CommvaultRestoreBackupCommand.java rename to core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultRestoreBackupCommand.java index fbcff2070801..1ab9a8d2fe4d 100644 --- a/core/src/main/java/org/apache/cloudstack/backup/CommvaultRestoreBackupCommand.java +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultRestoreBackupCommand.java @@ -26,10 +26,12 @@ import java.util.List; -public class CommvaultRestoreBackupCommand extends Command { +public class AblestackCommvaultRestoreBackupCommand extends Command { private String vmName; private String backupPath; private List backupVolumesUUIDs; + private List backupFiles; + private List backupFileChains; private List restoreVolumePools; private List restoreVolumePaths; private String diskType; @@ -39,8 +41,9 @@ public class CommvaultRestoreBackupCommand extends Command { private Integer timeout; private String cacheMode; private String hostName; + private List backupSourceHosts; - protected CommvaultRestoreBackupCommand() { + protected AblestackCommvaultRestoreBackupCommand() { super(); } @@ -124,6 +127,22 @@ public void setBackupVolumesUUIDs(List backupVolumesUUIDs) { this.backupVolumesUUIDs = backupVolumesUUIDs; } + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + + public List getBackupFileChains() { + return backupFileChains; + } + + public void setBackupFileChains(List backupFileChains) { + this.backupFileChains = backupFileChains; + } + public Integer getTimeout() { return this.timeout == null ? 0 : this.timeout; } @@ -147,4 +166,12 @@ public String getHostName() { public void setHostName(String hostName) { this.hostName = hostName; } + + public List getBackupSourceHosts() { + return backupSourceHosts; + } + + public void setBackupSourceHosts(List backupSourceHosts) { + this.backupSourceHosts = backupSourceHosts; + } } diff --git a/core/src/main/java/org/apache/cloudstack/backup/CommvaultTakeBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultTakeBackupCommand.java similarity index 58% rename from core/src/main/java/org/apache/cloudstack/backup/CommvaultTakeBackupCommand.java rename to core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultTakeBackupCommand.java index f24f41d98675..5c22a8d8cfc7 100644 --- a/core/src/main/java/org/apache/cloudstack/backup/CommvaultTakeBackupCommand.java +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultTakeBackupCommand.java @@ -24,14 +24,20 @@ import java.util.List; -public class CommvaultTakeBackupCommand extends Command { +public class AblestackCommvaultTakeBackupCommand extends Command { private String vmName; private String backupPath; private List volumePools; private List volumePaths; private Boolean quiesce; - - public CommvaultTakeBackupCommand(String vmName, String backupPath) { + private String backupType; + private String checkpointName; + private String parentBackupPath; + private String parentCheckpointName; + private String parentCheckpointPath; + private List backupFiles; + + public AblestackCommvaultTakeBackupCommand(String vmName, String backupPath) { super(); this.vmName = vmName; this.backupPath = backupPath; @@ -77,6 +83,54 @@ public void setQuiesce(Boolean quiesce) { this.quiesce = quiesce; } + public String getBackupType() { + return backupType; + } + + public void setBackupType(String backupType) { + this.backupType = backupType; + } + + public String getCheckpointName() { + return checkpointName; + } + + public void setCheckpointName(String checkpointName) { + this.checkpointName = checkpointName; + } + + public String getParentBackupPath() { + return parentBackupPath; + } + + public void setParentBackupPath(String parentBackupPath) { + this.parentBackupPath = parentBackupPath; + } + + public String getParentCheckpointName() { + return parentCheckpointName; + } + + public void setParentCheckpointName(String parentCheckpointName) { + this.parentCheckpointName = parentCheckpointName; + } + + public String getParentCheckpointPath() { + return parentCheckpointPath; + } + + public void setParentCheckpointPath(String parentCheckpointPath) { + this.parentCheckpointPath = parentCheckpointPath; + } + + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + @Override public boolean executeInSequence() { return true; diff --git a/core/src/main/java/org/apache/cloudstack/backup/AblestackDeleteBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackDeleteBackupCommand.java new file mode 100644 index 000000000000..7d892b22a12c --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackDeleteBackupCommand.java @@ -0,0 +1,113 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; + +public class AblestackDeleteBackupCommand extends Command { + private String backupPath; + private String backupRepoType; + private String backupRepoAddress; + private String backupProvider; + private String checkpointName; + private String diskPaths; + private boolean forced; + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + + public AblestackDeleteBackupCommand(String backupPath, String backupRepoType, String backupRepoAddress, String mountOptions, boolean forced) { + super(); + this.backupPath = backupPath; + this.backupRepoType = backupRepoType; + this.backupRepoAddress = backupRepoAddress; + this.mountOptions = mountOptions; + this.forced = forced; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public String getBackupProvider() { + return backupProvider; + } + + public void setBackupProvider(String backupProvider) { + this.backupProvider = backupProvider; + } + + public String getCheckpointName() { + return checkpointName; + } + + public void setCheckpointName(String checkpointName) { + this.checkpointName = checkpointName; + } + + public String getDiskPaths() { + return diskPaths; + } + + public void setDiskPaths(String diskPaths) { + this.diskPaths = diskPaths; + } + + public String getMountOptions() { + return mountOptions == null ? "" : mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public boolean isForced() { + return forced; + } + + public void setForced(boolean forced) { + this.forced = forced; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/AblestackNasRestoreBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasRestoreBackupCommand.java new file mode 100644 index 000000000000..ca2c07a0a01d --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasRestoreBackupCommand.java @@ -0,0 +1,185 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; + +import java.util.List; + +public class AblestackNasRestoreBackupCommand extends Command { + private String vmName; + private String backupPath; + private String backupRepoType; + private String backupRepoAddress; + private List backupVolumesUUIDs; + private List restoreVolumePools; + private List restoreVolumePaths; + private List volumePaths; + private List backupFiles; + private List backupFileChains; + private String diskType; + private Boolean vmExists; + private VirtualMachine.State vmState; + private Integer mountTimeout; + private String cacheMode; + + protected AblestackNasRestoreBackupCommand() { + super(); + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public List getRestoreVolumePools() { + return restoreVolumePools; + } + + public void setRestoreVolumePools(List restoreVolumePools) { + this.restoreVolumePools = restoreVolumePools; + } + + public List getRestoreVolumePaths() { + return restoreVolumePaths; + } + + public void setRestoreVolumePaths(List restoreVolumePaths) { + this.restoreVolumePaths = restoreVolumePaths; + } + + public List getVolumePaths() { + return volumePaths; + } + + public void setVolumePaths(List volumePaths) { + this.volumePaths = volumePaths; + } + + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + + public List getBackupFileChains() { + return backupFileChains; + } + + public void setBackupFileChains(List backupFileChains) { + this.backupFileChains = backupFileChains; + } + + public Boolean isVmExists() { + return vmExists; + } + + public void setVmExists(Boolean vmExists) { + this.vmExists = vmExists; + } + + public String getDiskType() { + return diskType; + } + + public void setDiskType(String diskType) { + this.diskType = diskType; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public VirtualMachine.State getVmState() { + return vmState; + } + + public void setVmState(VirtualMachine.State vmState) { + this.vmState = vmState; + } + + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + @Override + + public boolean executeInSequence() { + return true; + } + + public List getBackupVolumesUUIDs() { + return backupVolumesUUIDs; + } + + public void setBackupVolumesUUIDs(List backupVolumesUUIDs) { + this.backupVolumesUUIDs = backupVolumesUUIDs; + } + + public Integer getMountTimeout() { + return this.mountTimeout == null ? 0 : this.mountTimeout; + } + + public void setMountTimeout(Integer mountTimeout) { + this.mountTimeout = mountTimeout; + } + + public String getCacheMode() { + return cacheMode; + } + + public void setCacheMode(String cacheMode) { + this.cacheMode = cacheMode; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/AblestackNasTakeBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasTakeBackupCommand.java new file mode 100644 index 000000000000..56b132176913 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/AblestackNasTakeBackupCommand.java @@ -0,0 +1,167 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; + +import java.util.List; + +public class AblestackNasTakeBackupCommand extends Command { + private String vmName; + private String backupPath; + private String backupType; + private String checkpointName; + private String parentBackupPath; + private String parentCheckpointName; + private String parentCheckpointPath; + private String backupRepoType; + private String backupRepoAddress; + private List volumePools; + private List volumePaths; + private List backupFiles; + private Boolean quiesce; + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + + public AblestackNasTakeBackupCommand(String vmName, String backupPath) { + super(); + this.vmName = vmName; + this.backupPath = backupPath; + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupType() { + return backupType; + } + + public void setBackupType(String backupType) { + this.backupType = backupType; + } + + public String getCheckpointName() { + return checkpointName; + } + + public void setCheckpointName(String checkpointName) { + this.checkpointName = checkpointName; + } + + public String getParentBackupPath() { + return parentBackupPath; + } + + public void setParentBackupPath(String parentBackupPath) { + this.parentBackupPath = parentBackupPath; + } + + public String getParentCheckpointName() { + return parentCheckpointName; + } + + public void setParentCheckpointName(String parentCheckpointName) { + this.parentCheckpointName = parentCheckpointName; + } + + public String getParentCheckpointPath() { + return parentCheckpointPath; + } + + public void setParentCheckpointPath(String parentCheckpointPath) { + this.parentCheckpointPath = parentCheckpointPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public List getVolumePools() { + return volumePools; + } + + public void setVolumePools(List volumePools) { + this.volumePools = volumePools; + } + + public List getVolumePaths() { + return volumePaths; + } + + public void setVolumePaths(List volumePaths) { + this.volumePaths = volumePaths; + } + + public List getBackupFiles() { + return backupFiles; + } + + public void setBackupFiles(List backupFiles) { + this.backupFiles = backupFiles; + } + + public Boolean getQuiesce() { + return quiesce; + } + + public void setQuiesce(Boolean quiesce) { + this.quiesce = quiesce; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java index de79e8c18e1f..4ba41f9e3569 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java @@ -24,6 +24,7 @@ import com.cloud.domain.dao.DomainDao; import org.apache.cloudstack.api.response.BackupOfferingResponse; import org.apache.cloudstack.backup.BackupOffering; +import org.apache.cloudstack.backup.BackupProviderNameUtils; import org.apache.cloudstack.backup.BackupOfferingVO; import com.cloud.dc.DataCenterVO; @@ -67,7 +68,7 @@ public BackupOfferingResponse newBackupOfferingResponse(BackupOffering offering, response.setName(offering.getName()); response.setDescription(offering.getDescription()); response.setExternalId(offering.getExternalId()); - response.setProvider(offering.getProvider()); + response.setProvider(BackupProviderNameUtils.toDisplayName(offering.getProvider())); response.setUserDrivenBackups(offering.isUserDrivenBackupAllowed()); if (zone != null) { response.setZoneId(zone.getUuid()); @@ -91,7 +92,7 @@ public BackupOfferingResponse newBackupOfferingResponse(BackupOffering offering, if (offering.getRetentionPeriod() != null) { response.setRetentionPeriod(offering.getRetentionPeriod()); } - response.setProvider(offering.getProvider()); + response.setProvider(BackupProviderNameUtils.toDisplayName(offering.getProvider())); response.setCreated(offering.getCreated()); response.setObjectName("backupoffering"); return response; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql index 97c223dda2d7..a3209a605e81 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql @@ -41,6 +41,13 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.ldap_configuration', 'uuid', 'VARCHA -- Populate uuid for existing rows where uuid is NULL or empty UPDATE `cloud`.`ldap_configuration` SET uuid = UUID() WHERE uuid IS NULL OR uuid = ''; +-- Add vm_id column to usage_event table for volume usage events +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"'); +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"'); + +-- Add vm_id column to cloud_usage.usage_volume table +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_volume','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with the volume usage"'); + -- Add the column cross_zone_instance_creation to cloud.backup_repository. if enabled it means that new Instance can be created on all Zones from Backups on this Repository. CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_repository', 'cross_zone_instance_creation', 'TINYINT(1) DEFAULT NULL COMMENT ''Backup Repository can be used for disaster recovery on another zone'''); diff --git a/plugins/backup/ablestack-commvault/pom.xml b/plugins/backup/ablestack-commvault/pom.xml new file mode 100644 index 000000000000..6b622e7a5a6e --- /dev/null +++ b/plugins/backup/ablestack-commvault/pom.xml @@ -0,0 +1,54 @@ + + + 4.0.0 + cloud-plugin-backup-ablestack-commvault + Ablestack Plugin - KVM Commvault Backup and Recovery Plugin + + cloudstack-plugins + org.apache.cloudstack + 4.22.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-hypervisor-kvm + ${project.version} + + + org.apache.commons + commons-lang3 + ${cs.commons-lang3.version} + + + com.fasterxml.jackson.core + jackson-databind + ${cs.jackson.version} + + + com.github.tomakehurst + wiremock-standalone + ${cs.wiremock.version} + test + + + diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/CommvaultBackupProvider.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultBackupProvider.java similarity index 59% rename from plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/CommvaultBackupProvider.java rename to plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultBackupProvider.java index a5e0f1e00a7c..075dc8ce98ca 100644 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/CommvaultBackupProvider.java +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/AblestackCommvaultBackupProvider.java @@ -51,23 +51,24 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; -import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.dao.VMSnapshotDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.backup.commvault.CommvaultClient; +import org.apache.cloudstack.backup.commvault.AblestackCommvaultClient; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.backup.dao.BackupOfferingDaoImpl; +import org.apache.cloudstack.backup.dao.BackupScheduleDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.xml.utils.URI; @@ -78,6 +79,7 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -93,11 +95,27 @@ import java.util.regex.Pattern; import javax.inject.Inject; +import static org.apache.cloudstack.backup.BackupManager.BackupDeltaMax; import static org.apache.cloudstack.backup.BackupManager.BackupFrameworkEnabled; - -public class CommvaultBackupProvider extends AdapterBase implements BackupProvider, Configurable { - - private static final Logger LOG = LogManager.getLogger(CommvaultBackupProvider.class); +import static org.apache.cloudstack.backup.BackupManager.KvmIncrementalBackup; + +public class AblestackCommvaultBackupProvider extends AdapterBase implements BackupProvider, Configurable { + + private static final Logger LOG = LogManager.getLogger(AblestackCommvaultBackupProvider.class); + private static final String BACKUP_TYPE_FULL = "FULL"; + private static final String BACKUP_TYPE_INCREMENTAL = "INCREMENTAL"; + private static final String BACKUP_ENGINE_QCOW2 = "QCOW2"; + private static final String BACKUP_ENGINE_RBD_DIFF = "RBD_DIFF"; + private static final String DETAIL_CHECKPOINT_NAME = "commvault.checkpoint.name"; + private static final String DETAIL_CHECKPOINT_PATH = "commvault.checkpoint.path"; + private static final String DETAIL_PARENT_BACKUP_UUID = "commvault.parent.backup.uuid"; + private static final String DETAIL_PARENT_BACKUP_PATH = "commvault.parent.backup.path"; + private static final String DETAIL_PARENT_CHECKPOINT_NAME = "commvault.parent.checkpoint.name"; + private static final String DETAIL_PARENT_CHECKPOINT_PATH = "commvault.parent.checkpoint.path"; + private static final String DETAIL_BACKUP_ENGINE = "commvault.backup.engine"; + private static final String DETAIL_RBD_DISK_PATHS = "commvault.rbd.disk.paths"; + private static final String MISSING_PARENT_RBD_SNAPSHOT_ERROR = "Parent RBD snapshot"; + private static final String DETAIL_STAGE_HOST = "commvault.stage.host"; private static final String RM_COMMAND = "rm -rf %s"; private static final int BASE_MAJOR = 11; private static final int BASE_FR = 32; @@ -143,8 +161,8 @@ public class CommvaultBackupProvider extends AdapterBase implements BackupProvid private ConfigKey CommvaultBackupRestoreTimeout = new ConfigKey<>("Advanced", Integer.class, "commvault.backup.restore.timeout", - "30", - "Timeout in seconds after which qemu-img execute when restoring", + "1800", + "Timeout in seconds after which Commvault backup restore operations fail.", true, BackupFrameworkEnabled.key()); @@ -199,6 +217,9 @@ public class CommvaultBackupProvider extends AdapterBase implements BackupProvid @Inject private DiskOfferingDao diskOfferingDao; + @Inject + private BackupScheduleDao backupScheduleDao; + private Long getClusterIdFromRootVolume(VirtualMachine vm) { VolumeVO rootVolume = volumeDao.getInstanceRootVolume(vm.getId()); StoragePoolVO rootDiskPool = primaryDataStoreDao.findById(rootVolume.getPoolId()); @@ -259,11 +280,7 @@ protected Host getVMHypervisorHostForBackup(VirtualMachine vm) { public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { final Host vmHost = getVMHypervisorHostForBackup(vm); final HostVO vmHostVO = hostDao.findById(vmHost.getId()); - if (CollectionUtils.isNotEmpty(vmSnapshotDao.findByVmAndByType(vm.getId(), VMSnapshot.Type.DiskAndMemory))) { - LOG.debug("Commvault backup provider cannot take backups of a VM [{}] with disk-and-memory VM snapshots. Restoring the backup will corrupt any newer disk-and-memory " + - "VM snapshots.", vm); - throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has disk-and-memory VM snapshots.", vm.getUuid())); - } + validateNoVmSnapshots(vm); try { String commvaultServer = getUrlDomain(CommvaultUrl.value()); @@ -271,7 +288,7 @@ public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); } // 백업 중인 작업 조회 - final CommvaultClient client = getClient(vm.getDataCenterId()); + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); boolean activeJob = client.getActiveJob(vm.getInstanceName()); if (activeJob) { throw new CloudRuntimeException("There are backup jobs running on the virtual machine. Please try again later."); @@ -291,20 +308,220 @@ public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { } } - final Date creationDate = new Date(); - final String backupPath = String.format("%s/%s/%s", COMMVAULT_DIRECTORY, vm.getInstanceName(), - new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss").format(creationDate)); + final String backupPath = buildBackupPath(vm); + final String backupContentPath = buildBackupContentPath(vm); + List vmVolumes = volumeDao.findByInstance(vm.getId()); + vmVolumes.sort(Comparator.comparing(Volume::getDeviceId)); + Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(vmVolumes); + validateVolumePoolTypes(volumePoolsAndPaths.first()); + final Backup latestBackup = getLatestBackedUpBackup(vm); + final boolean incrementalBackup = shouldUseIncrementalBackup(vm, latestBackup, vmHost); + BackupExecutionResult result = executeBackup(vm, quiesceVM, vmHost, vmHostVO, client, planId, backupPath, backupContentPath, vmVolumes, volumePoolsAndPaths, + latestBackup, incrementalBackup, incrementalBackup && vmVolumes.size() > 1); + if (!result.success && incrementalBackup && shouldRetryAsFullAfterIncrementalFailure(result, vmVolumes)) { + cleanupFailedBackupForFullRetry(result.backup); + LOG.warn("Incremental backup failed for VM [{}] due to [{}]. Retrying as full backup.", vm, result.details); + String fallbackBackupPath = buildBackupPath(vm); + result = executeBackup(vm, quiesceVM, vmHost, vmHostVO, client, planId, fallbackBackupPath, backupContentPath, vmVolumes, volumePoolsAndPaths, + null, false, false); + } + return new Pair<>(result.success, result.backup); + } - BackupVO backupVO = createBackupObject(vm, backupPath); - CommvaultTakeBackupCommand command = new CommvaultTakeBackupCommand(vm.getInstanceName(), backupPath); - command.setQuiesce(quiesceVM); + private Backup getLatestBackedUpBackup(VirtualMachine vm) { + List backups = backupDao.listByVmId(null, vm.getId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(b -> Backup.Status.BackedUp.equals(b.getStatus())) + .peek(backupDao::loadDetails) + .max(Comparator.comparing(BackupVO::getDate)) + .orElse(null); + } - if (VirtualMachine.State.Stopped.equals(vm.getState())) { - List vmVolumes = volumeDao.findByInstance(vm.getId()); - vmVolumes.sort(Comparator.comparing(Volume::getDeviceId)); - Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(vmVolumes); - command.setVolumePools(volumePoolsAndPaths.first()); - command.setVolumePaths(volumePoolsAndPaths.second()); + private boolean shouldUseIncrementalBackup(VirtualMachine vm, Backup latestBackup, Host vmHost) { + if (latestBackup == null) { + return false; + } + loadBackupDetailsIfNeeded(latestBackup); + + Long clusterId = getClusterIdFromRootVolume(vm); + if (clusterId == null) { + return false; + } + + if (!Boolean.TRUE.equals(KvmIncrementalBackup.valueIn(clusterId))) { + return false; + } + + return canContinueIncrementalChain(vm, latestBackup, vmHost) && getBackupChainSize(vm, latestBackup) < getEffectiveIncrementalLimit(vm); + } + + private int getEffectiveIncrementalLimit(VirtualMachine vm) { + int effectiveLimit = BackupDeltaMax.value(); + List schedules = backupScheduleDao.listByVM(vm.getId()); + for (BackupScheduleVO schedule : schedules) { + if (schedule != null && schedule.getMaxBackups() > 0) { + effectiveLimit = Math.min(effectiveLimit, schedule.getMaxBackups()); + } + } + return effectiveLimit; + } + + private boolean canContinueIncrementalChain(VirtualMachine vm, Backup latestBackup, Host vmHost) { + final String backupEngine = getBackupDetail(latestBackup, DETAIL_BACKUP_ENGINE); + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + LOG.debug("Allowing Commvault incremental backup for VM [{}] on host [{}] using RBD chain from previous stage host [{}]", + vm.getInstanceName(), vmHost.getName(), getBackupDetail(latestBackup, DETAIL_STAGE_HOST)); + return true; + } + + String stageHost = getBackupDetail(latestBackup, DETAIL_STAGE_HOST); + return Objects.equals(stageHost, vmHost.getName()); + } + + private int getBackupChainSize(VirtualMachine vm, Backup latestBackup) { + List backups = backupDao.listByVmId(null, vm.getId()).stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .peek(backupDao::loadDetails) + .collect(Collectors.toList()); + Map backupsByUuid = backups.stream().collect(Collectors.toMap(BackupVO::getUuid, backup -> backup, (left, right) -> left)); + int chainSize = 1; + Backup current = latestBackup; + while (current != null) { + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; + } + current = backupsByUuid.get(parentBackupUuid); + if (current != null) { + chainSize++; + } + } + return chainSize; + } + + private boolean hasDependentBackups(Backup backup) { + List backups = backupDao.listByVmId(null, backup.getVmId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(candidate -> !Objects.equals(candidate.getId(), backup.getId())) + .peek(backupDao::loadDetails) + .anyMatch(candidate -> Objects.equals(getBackupDetail(candidate, DETAIL_PARENT_BACKUP_UUID), backup.getUuid())); + } + + private BackupVO createBackupObject(VirtualMachine vm, String backupPath, String backupType, Map details) { + BackupVO backup = new BackupVO(); + backup.setVmId(vm.getId()); + backup.setExternalId(backupPath); + backup.setType(backupType); + backup.setDate(new Date()); + long virtualSize = 0L; + for (final Volume volume: volumeDao.findByInstance(vm.getId())) { + if (Volume.State.Ready.equals(volume.getState())) { + virtualSize += volume.getSize(); + } + } + backup.setProtectedSize(virtualSize); + backup.setStatus(Backup.Status.BackingUp); + backup.setBackupOfferingId(vm.getBackupOfferingId()); + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + backup.setName(backupManager.getBackupNameFromVM(vm)); + backup.setDetails(details); + + return backupDao.persist(backup); + } + + private Map getBackupDetails(VirtualMachine vm, String backupPath, String checkpointName, String backupEngine, Backup latestBackup, + boolean incrementalBackup, String stageHost) { + Map details = backupManager.getBackupDetailsFromVM(vm); + details.put(DETAIL_BACKUP_ENGINE, backupEngine); + details.put(DETAIL_STAGE_HOST, stageHost); + details.put(DETAIL_CHECKPOINT_NAME, checkpointName); + details.put(DETAIL_CHECKPOINT_PATH, getCheckpointPath(backupPath, checkpointName, backupEngine)); + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + details.put(DETAIL_RBD_DISK_PATHS, String.join(",", getVolumePoolsAndPaths(volumeDao.findByInstance(vm.getId())).second())); + } + if (!incrementalBackup) { + return details; + } + + details.put(DETAIL_PARENT_BACKUP_UUID, latestBackup.getUuid()); + details.put(DETAIL_PARENT_BACKUP_PATH, latestBackup.getExternalId().substring(0, latestBackup.getExternalId().lastIndexOf(','))); + details.put(DETAIL_PARENT_CHECKPOINT_NAME, getBackupDetail(latestBackup, DETAIL_CHECKPOINT_NAME)); + details.put(DETAIL_PARENT_CHECKPOINT_PATH, getBackupDetail(latestBackup, DETAIL_CHECKPOINT_PATH)); + return details; + } + + private String getCheckpointPath(String backupPath, String checkpointName, String backupEngine) { + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + return String.format("%s/checkpoints/%s.meta", backupPath, checkpointName); + } + return String.format("%s/checkpoints/%s.xml", backupPath, checkpointName); + } + + private String getBackupDetail(Backup backup, String key) { + return backup == null ? null : backup.getDetail(key); + } + + private String getBackupDetail(Backup backup, String key, String defaultValue) { + String value = getBackupDetail(backup, key); + return value == null ? defaultValue : value; + } + + private Pair parseExternalId(String externalId) { + if (StringUtils.isBlank(externalId)) { + throw new CloudRuntimeException("Backup externalId is empty"); + } + + final int separatorIndex = externalId.lastIndexOf(','); + if (separatorIndex < 0) { + throw new CloudRuntimeException(String.format("Invalid Commvault backup externalId format: [%s]", externalId)); + } + + final String path = externalId.substring(0, separatorIndex); + final String jobId = externalId.substring(separatorIndex + 1).trim(); + if (StringUtils.isAnyBlank(path, jobId)) { + throw new CloudRuntimeException(String.format("Invalid Commvault backup externalId format: [%s]", externalId)); + } + return new Pair<>(path, jobId); + } + + private void validateNoVmSnapshots(VirtualMachine vm) { + if (CollectionUtils.isNotEmpty(vmSnapshotDao.findByVm(vm.getId()))) { + LOG.debug("Commvault backup provider cannot take backups of a VM [{}] with VM snapshots.", vm); + throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has VM snapshots.", vm.getUuid())); + } + } + + private BackupExecutionResult executeBackup(VirtualMachine vm, Boolean quiesceVM, Host vmHost, HostVO vmHostVO, AblestackCommvaultClient client, + String planId, String backupPath, String backupContentPath, List vmVolumes, + Pair, List> volumePoolsAndPaths, Backup latestBackup, + boolean incrementalBackup, boolean retryAsFullOnFailure) { + final String requestedBackupType = incrementalBackup ? BACKUP_TYPE_INCREMENTAL : BACKUP_TYPE_FULL; + final String checkpointName = backupPath.substring(backupPath.lastIndexOf("/") + 1); + final String backupEngine = areAllVolumesOnRbdPool(volumePoolsAndPaths.first()) ? BACKUP_ENGINE_RBD_DIFF : BACKUP_ENGINE_QCOW2; + final List backupFiles = buildBackupFileNames(vmVolumes, backupEngine, incrementalBackup); + final Map backupDetails = getBackupDetails(vm, backupPath, checkpointName, backupEngine, latestBackup, incrementalBackup, vmHost.getName()); + + BackupVO backupVO = createBackupObject(vm, backupPath, requestedBackupType, backupDetails); + AblestackCommvaultTakeBackupCommand command = new AblestackCommvaultTakeBackupCommand(vm.getInstanceName(), backupPath); + command.setQuiesce(quiesceVM); + command.setVolumePools(volumePoolsAndPaths.first()); + command.setVolumePaths(volumePoolsAndPaths.second()); + command.setBackupType(requestedBackupType); + command.setCheckpointName(checkpointName); + command.setBackupFiles(backupFiles); + if (incrementalBackup && latestBackup != null) { + command.setParentBackupPath(getBackupDetail(latestBackup, DETAIL_PARENT_BACKUP_PATH, + latestBackup.getExternalId().substring(0, latestBackup.getExternalId().lastIndexOf(',')))); + command.setParentCheckpointName(getBackupDetail(latestBackup, DETAIL_CHECKPOINT_NAME)); + command.setParentCheckpointPath(getBackupDetail(latestBackup, DETAIL_CHECKPOINT_PATH)); } BackupAnswer answer; @@ -326,11 +543,10 @@ public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); Ternary credentials = getKVMHyperisorCredentials(vmHostVO); String cmd = String.format(RM_COMMAND, backupPath); - // 생성된 백업 폴더 경로로 해당 백업 세트의 백업 콘텐츠 경로 업데이트 String clientId = client.getClientId(vmHost.getName()); String subClientEntity = client.getSubclient(clientId, vm.getInstanceName()); if (subClientEntity == null) { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to get subclient info commvault api"); + LOG.error("Failed to take backup for VM {} to get subclient info commvault api", vm.getInstanceName()); } else { JSONObject jsonObject = new JSONObject(subClientEntity); String subclientId = String.valueOf(jsonObject.get("subclientId")); @@ -348,15 +564,15 @@ public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { String subclientGUID = String.valueOf(jsonObject.get("subclientGUID")); String subclientName = String.valueOf(jsonObject.get("subclientName")); String csGUID = String.valueOf(jsonObject.get("csGUID")); - boolean upResult = client.updateBackupSet(backupPath, subclientId, clientId, planId, applicationId, backupsetId, instanceId, subclientName, backupsetName); + boolean upResult = client.updateBackupSet(backupContentPath, subclientId, clientId, planId, applicationId, backupsetId, instanceId, subclientName, backupsetName); if (upResult) { String planName = client.getPlanName(planId); String storagePolicyId = client.getStoragePolicyId(planName); if (planName == null || storagePolicyId == null) { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to get storage policy id commvault api"); + LOG.error("Failed to take backup for VM {} to get storage policy id commvault api", vm.getInstanceName()); } else { - // 백업 실행 - String jobId = client.createBackup(subclientId, storagePolicyId, displayName, commCellName, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, subclientGUID, subclientName, csGUID, backupsetName); + String jobId = client.createBackup(subclientId, storagePolicyId, displayName, commCellName, clientId, companyId, companyName, instanceName, appName, + applicationId, clientName, backupsetId, instanceId, subclientGUID, subclientName, csGUID, backupsetName, requestedBackupType); if (jobId != null) { String jobStatus = client.getJobStatus(jobId); String externalId = backupPath + "," + jobId; @@ -382,72 +598,329 @@ public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { } backupVO.setSize(Long.parseLong(size)); backupVO.setStatus(Backup.Status.BackedUp); - List vols = new ArrayList<>(volumeDao.findByInstance(vm.getId())); - backupVO.setBackedUpVolumes(backupManager.createVolumeInfoFromVolumes(vols)); + backupVO.setDetails(backupDetails); + backupVO.setBackedUpVolumes(createVolumeInfoFromVolumes(vmVolumes, backupFiles)); if (backupDao.update(backupVO.getId(), backupVO)) { - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, cmd); - return new Pair<>(true, backupVO); - } else { - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, cmd); - throw new CloudRuntimeException("Failed to update backup"); + return BackupExecutionResult.success(backupVO); } - } else { - backupVO.setExternalId(externalId); - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to get details job commvault api"); + throw new CloudRuntimeException("Failed to update backup"); } + backupVO.setExternalId(externalId); + LOG.error("Failed to take backup for VM {} to get details job commvault api", vm.getInstanceName()); } else { backupVO.setExternalId(externalId); - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to create backup job status is " + jobStatus); + LOG.error("Failed to take backup for VM {} to create backup job status is {}", vm.getInstanceName(), jobStatus); } } else { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to create backup job commvault api"); + LOG.error("Failed to take backup for VM {} to create backup job commvault api", vm.getInstanceName()); } } } else { - LOG.error("Failed to take backup for VM " + vm.getInstanceName() + " to update backupset content path commvault api"); + LOG.error("Failed to take backup for VM {} to update backupset content path commvault api", vm.getInstanceName()); } } backupVO.setStatus(Backup.Status.Failed); backupDao.remove(backupVO.getId()); executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, cmd); - return new Pair<>(false, null); + return BackupExecutionResult.failure("Failed to complete Commvault backup job", backupVO); + } + + final String details = answer != null ? answer.getDetails() : "No answer received"; + LOG.error("Failed to take backup for VM {}: {}", vm.getInstanceName(), details); + if (retryAsFullOnFailure) { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } else if (answer != null && answer.getNeedsCleanup()) { + LOG.error("Backup cleanup failed for VM {}. Leaving the backup in Error state.", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Error); + backupDao.update(backupVO.getId(), backupVO); } else { - LOG.error("Failed to take backup for VM {}: {}", vm.getInstanceName(), answer != null ? answer.getDetails() : "No answer received"); - if (answer.getNeedsCleanup()) { - LOG.error("Backup cleanup failed for VM {}. Leaving the backup in Error state.", vm.getInstanceName()); - backupVO.setStatus(Backup.Status.Error); - backupDao.update(backupVO.getId(), backupVO); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } + return BackupExecutionResult.failure(details, backupVO); + } + + private boolean shouldRetryAsFullAfterIncrementalFailure(BackupExecutionResult result, List vmVolumes) { + if (result == null || result.success) { + return false; + } + if (StringUtils.contains(result.details, MISSING_PARENT_RBD_SNAPSHOT_ERROR)) { + return true; + } + return vmVolumes.size() > 1; + } + + private void cleanupFailedBackupForFullRetry(Backup backup) { + if (backup == null) { + return; + } + backupDao.remove(backup.getId()); + } + + private static final class BackupExecutionResult { + private final boolean success; + private final Backup backup; + private final String details; + + private BackupExecutionResult(boolean success, Backup backup, String details) { + this.success = success; + this.backup = backup; + this.details = details; + } + + private static BackupExecutionResult success(Backup backup) { + return new BackupExecutionResult(true, backup, null); + } + + private static BackupExecutionResult failure(String details, Backup backup) { + return new BackupExecutionResult(false, backup, details); + } + } + + private String buildBackupPath(VirtualMachine vm) { + return String.format("%s/%s/%s", COMMVAULT_DIRECTORY, vm.getInstanceName(), + new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss.SSS").format(new Date())); + } + + private String buildBackupContentPath(VirtualMachine vm) { + return String.format("%s/%s", COMMVAULT_DIRECTORY, vm.getInstanceName()); + } + + private void validateVolumePoolTypes(List volumePools) { + boolean hasRbd = volumePools.stream().anyMatch(pool -> pool.getPoolType() == Storage.StoragePoolType.RBD); + boolean hasNonRbd = volumePools.stream().anyMatch(pool -> pool.getPoolType() != Storage.StoragePoolType.RBD); + if (hasRbd && hasNonRbd) { + throw new CloudRuntimeException("Commvault incremental backup does not support VMs with mixed RBD and non-RBD volumes"); + } + } + + private boolean areAllVolumesOnRbdPool(List volumePools) { + return volumePools.stream().allMatch(pool -> pool.getPoolType() == Storage.StoragePoolType.RBD); + } + + private List buildBackupFileNames(List volumes, String backupEngine, boolean incrementalBackup) { + List backupFiles = new ArrayList<>(); + for (VolumeVO volume : volumes) { + String suffix; + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + suffix = incrementalBackup ? ".rbdiff" : ".raw"; } else { - backupVO.setStatus(Backup.Status.Failed); - backupDao.remove(backupVO.getId()); + suffix = ".qcow2"; } - return new Pair<>(false, null); + backupFiles.add(String.format("volume-%s%s", volume.getUuid(), suffix)); } + return backupFiles; } - private BackupVO createBackupObject(VirtualMachine vm, String backupPath) { - BackupVO backup = new BackupVO(); - backup.setVmId(vm.getId()); - backup.setExternalId(backupPath); - backup.setType("FULL"); - backup.setDate(new Date()); - long virtualSize = 0L; - for (final Volume volume: volumeDao.findByInstance(vm.getId())) { - if (Volume.State.Ready.equals(volume.getState())) { - virtualSize += volume.getSize(); + private String createVolumeInfoFromVolumes(List volumes, List backupFiles) { + List infoList = new ArrayList<>(); + for (int i = 0; i < volumes.size(); i++) { + VolumeVO vol = volumes.get(i); + DiskOffering diskOffering = diskOfferingDao.findById(vol.getDiskOfferingId()); + String diskOfferingUuid = diskOffering != null ? diskOffering.getUuid() : null; + infoList.add(new Backup.VolumeInfo(vol.getUuid(), backupFiles.get(i), vol.getVolumeType(), vol.getSize(), + vol.getDeviceId(), diskOfferingUuid, vol.getMinIops(), vol.getMaxIops())); + } + return new com.google.gson.Gson().toJson(infoList.toArray(), Backup.VolumeInfo[].class); + } + + private List getBackupFileChains(List backupVolumes, Backup backup) { + return backupVolumes.stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(volume -> getBackupFileChain(volume, backup)) + .collect(Collectors.toList()); + } + + private String getBackupFileChain(Backup.VolumeInfo backupVolume, Backup backup) { + loadBackupDetailsIfNeeded(backup); + List chain = getBackupChain(backupVolume, backup); + return String.join(";", chain); + } + + private List getBackupChain(Backup.VolumeInfo backupVolume, Backup backup) { + loadBackupDetailsIfNeeded(backup); + List chain = new ArrayList<>(); + Backup current = backup; + while (current != null) { + loadBackupDetailsIfNeeded(current); + Backup.VolumeInfo currentVolumeInfo = current.getBackedUpVolumes().stream() + .filter(volume -> Objects.equals(volume.getUuid(), backupVolume.getUuid())) + .findFirst() + .orElse(null); + if (currentVolumeInfo == null) { + break; + } + chain.add(0, getRestoreBackupFilePath(current, currentVolumeInfo)); + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; } + current = backupDao.findByUuid(parentBackupUuid); } - backup.setProtectedSize(virtualSize); - backup.setStatus(Backup.Status.BackingUp); - backup.setBackupOfferingId(vm.getBackupOfferingId()); - backup.setAccountId(vm.getAccountId()); - backup.setDomainId(vm.getDomainId()); - backup.setZoneId(vm.getDataCenterId()); - backup.setName(backupManager.getBackupNameFromVM(vm)); - Map details = backupManager.getBackupDetailsFromVM(vm); - backup.setDetails(details); + if (chain.isEmpty()) { + chain.add(backupVolume.getPath()); + } + return chain; + } - return backupDao.persist(backup); + private LinkedHashMap getBackupChainStageHosts(Backup backup) { + LinkedHashMap stageHosts = new LinkedHashMap<>(); + Backup current = backup; + while (current != null) { + loadBackupDetailsIfNeeded(current); + String stageHost = getBackupDetail(current, DETAIL_STAGE_HOST); + if (StringUtils.isNotBlank(stageHost)) { + stageHosts.putIfAbsent(stageHost, current); + } + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; + } + current = backupDao.findByUuid(parentBackupUuid); + } + return stageHosts; + } + + private List getRestoreSourcePathsForStageHost(Backup backup, String stageHost) { + if (!BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return Collections.singletonList(getRestoreBackupRootPath(backup)); + } + + List restoreSourcePaths = new ArrayList<>(); + Backup current = backup; + while (current != null) { + loadBackupDetailsIfNeeded(current); + String currentStageHost = getBackupDetail(current, DETAIL_STAGE_HOST); + if (Objects.equals(currentStageHost, stageHost)) { + String backupPath = parseExternalId(current.getExternalId()).first(); + if (!restoreSourcePaths.contains(backupPath)) { + restoreSourcePaths.add(0, backupPath); + } + } + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; + } + current = backupDao.findByUuid(parentBackupUuid); + } + + if (restoreSourcePaths.isEmpty()) { + restoreSourcePaths.add(getRestoreBackupRootPath(backup)); + } + return restoreSourcePaths; + } + + private void loadBackupDetailsIfNeeded(Backup backup) { + if (backup instanceof BackupVO && backup.getDetails() == null) { + backupDao.loadDetails((BackupVO) backup); + } + } + + private String getRestoreBackupRootPath(Backup backup) { + final String backupPath = parseExternalId(backup.getExternalId()).first(); + if (BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return java.nio.file.Path.of(backupPath).getParent().toString(); + } + return backupPath; + } + + private String getRestoreBackupFilePath(Backup backup, Backup.VolumeInfo volumeInfo) { + final String backupPath = parseExternalId(backup.getExternalId()).first(); + final String filePath = volumeInfo.getPath(); + if (BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return java.nio.file.Path.of(backupPath).getFileName().resolve(filePath).toString(); + } + return filePath; + } + + private boolean isLegacyBackup(Backup backup) { + return getBackupDetail(backup, DETAIL_BACKUP_ENGINE) == null; + } + + private List restoreBackupSourcesOnAdditionalHosts(AblestackCommvaultClient client, Backup backup, String executionHostName) { + if (!BACKUP_ENGINE_RBD_DIFF.equals(getBackupDetail(backup, DETAIL_BACKUP_ENGINE))) { + return Collections.emptyList(); + } + + List additionalHosts = new ArrayList<>(); + for (Map.Entry entry : getBackupChainStageHosts(backup).entrySet()) { + String stageHost = entry.getKey(); + if (StringUtils.isBlank(stageHost) || Objects.equals(stageHost, executionHostName)) { + continue; + } + restoreBackupPathsOnStageHost(client, entry.getValue(), getRestoreSourcePathsForStageHost(backup, stageHost)); + additionalHosts.add(stageHost); + } + return additionalHosts; + } + + private void restoreBackupPathsOnStageHost(AblestackCommvaultClient client, Backup backup, List restoreSourcePaths) { + final Pair externalIdParts = parseExternalId(backup.getExternalId()); + final String jobId = externalIdParts.second(); + String jobDetails = client.getJobDetails(jobId); + if (jobDetails == null) { + throw new CloudRuntimeException("Failed to get job details commvault api"); + } + + JSONObject jsonObject = new JSONObject(jobDetails); + String endTime = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("detailInfo").get("endTime")); + String subclientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("subclientId")); + String displayName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("displayName")); + String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); + String companyId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyId")); + String companyName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("company").get("companyName")); + String instanceName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceName")); + String appName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("appName")); + String applicationId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("applicationId")); + String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); + String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); + String instanceId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("instanceId")); + String backupsetName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetName")); + String commCellId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("commcell").get("commCellId")); + String backupsetGUID = client.getVmBackupSetGuid(clientName, backupsetName); + if (backupsetGUID == null) { + throw new CloudRuntimeException("Failed to get vm backup set guid commvault api"); + } + + String restoreJobId = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, + appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, restoreSourcePaths); + if (restoreJobId == null) { + throw new CloudRuntimeException("Failed to restore Full VM commvault api"); + } + + String jobStatus = client.getJobStatus(restoreJobId); + if (!jobStatus.equalsIgnoreCase("Completed")) { + throw new CloudRuntimeException("Failed to restore Full VM commvault api resulted in " + jobStatus); + } + } + + private void cleanupBackupPathOnAdditionalHosts(List hostNames, String backupPath) { + if (hostNames == null || hostNames.isEmpty()) { + return; + } + int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + String command = String.format(RM_COMMAND, backupPath); + for (String hostName : hostNames) { + if (StringUtils.isBlank(hostName)) { + continue; + } + HostVO host = hostDao.findByName(hostName); + if (host == null) { + continue; + } + try { + Ternary credentials = getKVMHyperisorCredentials(host); + executeDeleteBackupPathCommand(host, credentials.first(), credentials.second(), sshPort, command); + } catch (Exception e) { + LOG.warn("Failed to cleanup Commvault restore source path [{}] on host [{}]", backupPath, hostName, e); + } + } + } + + private String getLegacyBackupFileName(Backup.VolumeInfo backupVolumeInfo) { + String diskType = Volume.Type.ROOT.equals(backupVolumeInfo.getType()) ? "root" : "datadisk"; + return String.format("%s.%s.qcow2", diskType, backupVolumeInfo.getUuid()); } // 백업에서 새 인스턴스 생성 @@ -463,15 +936,18 @@ public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { } private Pair restoreVMBackup(VirtualMachine vm, Backup backup) { + loadBackupDetailsIfNeeded(backup); try { String commvaultServer = getUrlDomain(CommvaultUrl.value()); } catch (URISyntaxException e) { throw new CloudRuntimeException(String.format("Failed to convert API to HOST : %s", e)); } - final CommvaultClient client = getClient(vm.getDataCenterId()); + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); final String externalId = backup.getExternalId(); - String jobId = externalId.substring(externalId.lastIndexOf(',') + 1).trim(); - final String path = externalId.substring(0, externalId.lastIndexOf(',')); + final Pair externalIdParts = parseExternalId(externalId); + final String path = externalIdParts.first(); + final String restoreSourcePath = getRestoreBackupRootPath(backup); + final String jobId = externalIdParts.second(); String jobDetails = client.getJobDetails(jobId); if (jobDetails == null) { throw new CloudRuntimeException("Failed to get job details commvault api"); @@ -498,12 +974,14 @@ private Pair restoreVMBackup(VirtualMachine vm, Backup backup) // 복원된 호스트 정의 final HostVO restoreHost = hostDao.findByName(clientName); final HostVO restoreHostVO = hostDao.findById(restoreHost.getId()); + final List additionalSourceHosts = restoreBackupSourcesOnAdditionalHosts(client, backup, clientName); + final List restoreSourcePaths = getRestoreSourcePathsForStageHost(backup, clientName); LOG.info(String.format("Restoring vm %s from backup %s on the Commvault Backup Provider", vm, backup)); - // 복원 실행 - String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, path); - if (jobId2 != null) { - String jobStatus = client.getJobStatus(jobId2); - if (jobStatus.equalsIgnoreCase("Completed")) { + try { + String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, restoreSourcePaths); + if (jobId2 != null) { + String jobStatus = client.getJobStatus(jobId2); + if (jobStatus.equalsIgnoreCase("Completed")) { List backedVolumesUUIDs = backup.getBackedUpVolumes().stream() .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) .map(Backup.VolumeInfo::getUuid) @@ -514,26 +992,35 @@ private Pair restoreVMBackup(VirtualMachine vm, Backup backup) .collect(Collectors.toList()); LOG.debug("Restoring vm {} from backup {} on the Commvault Backup Provider", vm, backup); - // 가상머신이 실행중인 호스트 정의 - final Host vmHost = getVMHypervisorHost(vm); - final HostVO vmHostVO = hostDao.findById(vmHost.getId()); - CommvaultRestoreBackupCommand restoreCommand = new CommvaultRestoreBackupCommand(); - LOG.info(path); - restoreCommand.setBackupPath(path); + AblestackCommvaultRestoreBackupCommand restoreCommand = new AblestackCommvaultRestoreBackupCommand(); + LOG.info(restoreSourcePath); + restoreCommand.setBackupPath(restoreSourcePath); restoreCommand.setVmName(vm.getName()); restoreCommand.setBackupVolumesUUIDs(backedVolumesUUIDs); + if (isLegacyBackup(backup)) { + restoreCommand.setBackupFiles(backup.getBackedUpVolumes().stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(this::getLegacyBackupFileName) + .collect(Collectors.toList())); + } else { + restoreCommand.setBackupFiles(backup.getBackedUpVolumes().stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(volume -> getRestoreBackupFilePath(backup, volume)) + .collect(Collectors.toList())); + restoreCommand.setBackupFileChains(getBackupFileChains(backup.getBackedUpVolumes(), backup)); + } Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(restoreVolumes); restoreCommand.setRestoreVolumePools(volumePoolsAndPaths.first()); restoreCommand.setRestoreVolumePaths(volumePoolsAndPaths.second()); restoreCommand.setVmExists(vm.getRemoved() == null); restoreCommand.setVmState(vm.getState()); restoreCommand.setTimeout(CommvaultBackupRestoreTimeout.value()); - // 복원된 호스트와 가상머신이 실행중인 호스트가 같은 경우 null, 다른 경우 추가 - restoreCommand.setHostName(restoreHost.getId() == vmHost.getId() ? null : restoreHost.getName()); + restoreCommand.setHostName(null); + restoreCommand.setBackupSourceHosts(additionalSourceHosts); BackupAnswer answer; try { - answer = (BackupAnswer) agentManager.send(vmHost.getId(), restoreCommand); + answer = (BackupAnswer) agentManager.send(restoreHost.getId(), restoreCommand); } catch (AgentUnavailableException e) { throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); } catch (OperationTimedoutException e) { @@ -541,21 +1028,19 @@ private Pair restoreVMBackup(VirtualMachine vm, Backup backup) } if (!answer.getResult()) { int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); - Ternary credentials = getKVMHyperisorCredentials(vmHostVO); - String command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, command); - if (restoreHost.getId() != vmHost.getId()) { - credentials = getKVMHyperisorCredentials(restoreHostVO); - command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); - } + Ternary credentials = getKVMHyperisorCredentials(restoreHostVO); + String command = String.format(RM_COMMAND, restoreSourcePath); + executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); } return new Pair<>(answer.getResult(), answer.getDetails()); + } else { + throw new CloudRuntimeException("Failed to restore Full VM commvault api resulted in " + jobStatus); + } } else { - throw new CloudRuntimeException("Failed to restore Full VM commvault api resulted in " + jobStatus); + throw new CloudRuntimeException("Failed to restore Full VM commvault api"); } - } else { - throw new CloudRuntimeException("Failed to restore Full VM commvault api"); + } finally { + cleanupBackupPathOnAdditionalHosts(additionalSourceHosts, restoreSourcePath); } } @@ -593,6 +1078,7 @@ private String getVolumePathPrefix(StoragePoolVO storagePool) { // 백업 볼륨 복원 및 연결 @Override public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + loadBackupDetailsIfNeeded(backup); try { String commvaultServer = getUrlDomain(CommvaultUrl.value()); } catch (URISyntaxException e) { @@ -600,9 +1086,11 @@ public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeI } final String externalId = backup.getExternalId(); final Long zoneId = backup.getZoneId(); - final CommvaultClient client = getClient(zoneId); - String jobId = externalId.substring(externalId.lastIndexOf(',') + 1).trim(); - final String path = externalId.substring(0, externalId.lastIndexOf(',')); + final AblestackCommvaultClient client = getClient(zoneId); + final Pair externalIdParts = parseExternalId(externalId); + final String path = externalIdParts.first(); + final String restoreSourcePath = getRestoreBackupRootPath(backup); + final String jobId = externalIdParts.second(); String jobDetails = client.getJobDetails(jobId); if (jobDetails == null) { throw new CloudRuntimeException("Failed to get job details commvault api"); @@ -626,12 +1114,13 @@ public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeI if (backupsetGUID == null) { throw new CloudRuntimeException("Failed to get vm backup set guid commvault api"); } - // 복원 실행 - String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, path); - if (jobId2 != null) { - String jobStatus = client.getJobStatus(jobId2); - if (jobStatus.equalsIgnoreCase("Completed")) { - final int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + final List restoreSourcePaths = getRestoreSourcePathsForStageHost(backup, clientName); + final List additionalSourceHosts = restoreBackupSourcesOnAdditionalHosts(client, backup, clientName); + try { + String jobId2 = client.restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, restoreSourcePaths); + if (jobId2 != null) { + String jobStatus = client.getJobStatus(jobId2); + if (jobStatus.equalsIgnoreCase("Completed")) { final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); String cacheMode = null; @@ -645,9 +1134,6 @@ public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeI } } final StoragePoolVO pool = primaryDataStoreDao.findByUuid(dataStoreUuid); - // 백업 볼륨 복원 및 연결 시 연결할 가상머신이 실행중인 경우 해당 호스트, 정지중인 경우 랜덤 호스트 정의백업 - final HostVO vmHost = hostDao.findByIp(hostIp); - final HostVO vmHostVO = hostDao.findById(vmHost.getId()); // 복원된 호스트 정의 final HostVO restoreHost = hostDao.findByName(clientName); final HostVO restoreHostVO = hostDao.findById(restoreHost.getId()); @@ -676,9 +1162,13 @@ public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeI restoredVolume.setFormat(Storage.ImageFormat.RAW); } - CommvaultRestoreBackupCommand restoreCommand = new CommvaultRestoreBackupCommand(); - restoreCommand.setBackupPath(path); + AblestackCommvaultRestoreBackupCommand restoreCommand = new AblestackCommvaultRestoreBackupCommand(); + restoreCommand.setBackupPath(restoreSourcePath); restoreCommand.setVmName(vmNameAndState.first()); + restoreCommand.setBackupFiles(Collections.singletonList(isLegacyBackup(backup) ? getLegacyBackupFileName(backupVolumeInfo) : getRestoreBackupFilePath(backup, backupVolumeInfo))); + if (!isLegacyBackup(backup)) { + restoreCommand.setBackupFileChains(Collections.singletonList(getBackupFileChain(backupVolumeInfo, backup))); + } restoreCommand.setRestoreVolumePaths(Collections.singletonList(String.format("%s/%s", getVolumePathPrefix(pool), volumeUUID))); DataStore dataStore = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); restoreCommand.setRestoreVolumePools(Collections.singletonList(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null)); @@ -688,12 +1178,12 @@ public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeI restoreCommand.setRestoreVolumeUUID(backupVolumeInfo.getUuid()); restoreCommand.setTimeout(CommvaultBackupRestoreTimeout.value()); restoreCommand.setCacheMode(cacheMode); - // 복원된 호스트와 가상머신이 실행중인 호스트가 같은 경우 null, 다른 경우 추가 - restoreCommand.setHostName(restoreHost.getId() == vmHost.getId() ? null : restoreHost.getName()); + restoreCommand.setHostName(null); + restoreCommand.setBackupSourceHosts(additionalSourceHosts); BackupAnswer answer; try { - answer = (BackupAnswer) agentManager.send(vmHost.getId(), restoreCommand); + answer = (BackupAnswer) agentManager.send(restoreHost.getId(), restoreCommand); } catch (AgentUnavailableException e) { throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); } catch (OperationTimedoutException e) { @@ -706,27 +1196,21 @@ public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeI } catch (Exception e) { throw new CloudRuntimeException("Unable to create restored volume due to: " + e); } - if (restoreHost.getId() != vmHost.getId()) { - Ternary credentials = getKVMHyperisorCredentials(restoreHostVO); - String command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); - } return new Pair<>(answer.getResult(), answer.getDetails()); } else { - Ternary credentials = getKVMHyperisorCredentials(vmHostVO); - String command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(vmHostVO, credentials.first(), credentials.second(), sshPort, command); - if (restoreHost.getId() != vmHost.getId()) { - credentials = getKVMHyperisorCredentials(restoreHostVO); - command = String.format(RM_COMMAND, path); - executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); - } + final int sshPort = NumbersUtil.parseInt(configDao.getValue("kvm.ssh.port"), 22); + Ternary credentials = getKVMHyperisorCredentials(restoreHostVO); + String command = String.format(RM_COMMAND, restoreSourcePath); + executeDeleteBackupPathCommand(restoreHostVO, credentials.first(), credentials.second(), sshPort, command); + } + } else { + LOG.error("Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job status is " + jobStatus); } } else { - LOG.error("Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job status is " + jobStatus); + LOG.error("Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job commvault api"); } - } else { - LOG.error("Failed to restore backup for VM " + vmNameAndState.first() + " to restore backup job commvault api"); + } finally { + cleanupBackupPathOnAdditionalHosts(additionalSourceHosts, restoreSourcePath); } return new Pair<>(false, null); } @@ -739,11 +1223,16 @@ private Optional getBackedUpVolumeInfo(List externalIdParts = parseExternalId(externalId); + final String path = externalIdParts.first(); + final String jobId = externalIdParts.second(); + final AblestackCommvaultClient client = getClient(zoneId); String jobDetails = client.getJobDetails(jobId); if (jobDetails != null) { JSONObject jsonObject = new JSONObject(jobDetails); @@ -753,7 +1242,11 @@ public boolean deleteBackup(Backup backup, boolean forced) { String clientId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientId")); String clientName = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("clientName")); String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); - return client.deleteBackup(subclientId, applicationId, applicationId, clientId, clientName, backupsetId, path); + boolean result = client.deleteBackup(subclientId, applicationId, applicationId, clientId, clientName, backupsetId, path); + if (result) { + cleanupBackupPathOnStageHost(clientName, path, forced, getBackupDetail(backup, DETAIL_CHECKPOINT_NAME), getBackupDetail(backup, DETAIL_RBD_DISK_PATHS)); + } + return result; } else { throw new CloudRuntimeException("Failed to request backup job detail commvault api"); } @@ -774,7 +1267,7 @@ public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoi @Override public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { - final CommvaultClient client = getClient(vm.getDataCenterId()); + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); final Host host = getVMHypervisorHostForBackup(vm); String clientId = client.getClientId(host.getName()); String applicationId = client.getApplicationId(clientId); @@ -783,7 +1276,7 @@ public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backup @Override public boolean removeVMFromBackupOffering(VirtualMachine vm) { - final CommvaultClient client = getClient(vm.getDataCenterId()); + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); List Hosts = hostDao.findByDataCenterId(vm.getDataCenterId()); boolean allDeleted = true; for (final HostVO host : Hosts) { @@ -865,7 +1358,7 @@ public ConfigKey[] getConfigKeys() { @Override public String getName() { - return "commvault"; + return "ablestack-commvault"; } @Override @@ -885,11 +1378,19 @@ public void syncBackups(VirtualMachine vm) { } catch (URISyntaxException e) { return; } - final CommvaultClient client = getClient(vm.getDataCenterId()); + final AblestackCommvaultClient client = getClient(vm.getDataCenterId()); for (final Backup backup: backupDao.listByVmId(vm.getDataCenterId(), vm.getId())) { - String externalId = backup.getExternalId(); - String jobId = externalId.substring(externalId.lastIndexOf(',') + 1).trim(); - String path = externalId.substring(0, externalId.lastIndexOf(',')); + loadBackupDetailsIfNeeded(backup); + final String externalId = backup.getExternalId(); + final Pair externalIdParts; + try { + externalIdParts = parseExternalId(externalId); + } catch (CloudRuntimeException e) { + LOG.warn("Skipping Commvault backup sync for backup [{}] due to invalid externalId [{}]", backup.getUuid(), externalId); + continue; + } + final String jobId = externalIdParts.second(); + final String path = externalIdParts.first(); String jobDetails = client.getJobDetails(jobId); if (jobDetails != null) { JSONObject jsonObject = new JSONObject(jobDetails); @@ -911,6 +1412,7 @@ public void syncBackups(VirtualMachine vm) { String backupsetId = String.valueOf(jsonObject.getJSONObject("job").getJSONObject("jobDetail").getJSONObject("generalInfo").getJSONObject("subclient").get("backupsetId")); boolean result = client.deleteBackup(subclientId, applicationId, applicationId, clientId, clientName, backupsetId, path); if (result) { + cleanupBackupPathOnStageHost(clientName, path, false, getBackupDetail(backup, DETAIL_CHECKPOINT_NAME), getBackupDetail(backup, DETAIL_RBD_DISK_PATHS)); backupDao.remove(backup.getId()); } } @@ -922,7 +1424,7 @@ public void syncBackups(VirtualMachine vm) { @Override public boolean checkBackupAgent(final Long zoneId) { Map checkResult = new HashMap<>(); - final CommvaultClient client = getClient(zoneId); + final AblestackCommvaultClient client = getClient(zoneId); String csVersionInfo = client.getCvtVersion(); boolean version = versionCheck(csVersionInfo); if (version) { @@ -952,7 +1454,7 @@ public boolean checkBackupAgent(final Long zoneId) { @Override public boolean installBackupAgent(final Long zoneId) { Map failResult = new HashMap<>(); - final CommvaultClient client = getClient(zoneId); + final AblestackCommvaultClient client = getClient(zoneId); List Hosts = hostDao.findByDataCenterId(zoneId); for (final HostVO host : Hosts) { if (host.getStatus() == Status.Up && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) { @@ -1007,7 +1509,7 @@ public boolean installBackupAgent(final Long zoneId) { @Override public boolean importBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { - final CommvaultClient client = getClient(zoneId); + final AblestackCommvaultClient client = getClient(zoneId); // 선택한 백업 정책의 RPO 편집 Commvault API 호출 String type = "deleteRpo"; String taskId = client.getScheduleTaskId(type, externalId); @@ -1057,7 +1559,7 @@ public boolean importBackupPlan(final Long zoneId, final String retentionPeriod, @Override public boolean updateBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { - final CommvaultClient client = getClient(zoneId); + final AblestackCommvaultClient client = getClient(zoneId); String type = "updateRpo"; String planEntity = client.getScheduleTaskId(type, externalId); JSONObject jsonObject = new JSONObject(planEntity); @@ -1085,9 +1587,9 @@ private static String getUrlDomain(String url) throws URISyntaxException { return uri.getHost(); } - private CommvaultClient getClient(final Long zoneId) { + private AblestackCommvaultClient getClient(final Long zoneId) { try { - return new CommvaultClient(CommvaultUrl.valueIn(zoneId), CommvaultUsername.valueIn(zoneId), CommvaultPassword.valueIn(zoneId), + return new AblestackCommvaultClient(CommvaultUrl.valueIn(zoneId), CommvaultUsername.valueIn(zoneId), CommvaultPassword.valueIn(zoneId), CommvaultValidateSSLSecurity.valueIn(zoneId), CommvaultApiRequestTimeout.valueIn(zoneId)); } catch (URISyntaxException e) { throw new CloudRuntimeException("Failed to parse Commvault API URL: " + e.getMessage()); @@ -1130,6 +1632,28 @@ private boolean executeDeleteBackupPathCommand(HostVO host, String username, Str return false; } + private void cleanupBackupPathOnStageHost(String clientName, String path, boolean forced, String checkpointName, String diskPaths) { + HostVO stageHost = hostDao.findByName(clientName); + if (stageHost == null) { + throw new CloudRuntimeException(String.format("Unable to find stage host [%s] for backup cleanup", clientName)); + } + AblestackDeleteBackupCommand command = new AblestackDeleteBackupCommand(path, null, null, null, forced); + command.setBackupProvider("ablestack-commvault"); + command.setCheckpointName(checkpointName); + command.setDiskPaths(diskPaths); + try { + BackupAnswer answer = (BackupAnswer) agentManager.send(stageHost.getId(), command); + if (answer == null || !answer.getResult()) { + throw new CloudRuntimeException(String.format("Failed to delete Commvault backup path on host %s due to: %s", + stageHost.getName(), answer != null ? answer.getDetails() : "no answer received")); + } + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to delete Commvault backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to delete Commvault backup timed out, please try again"); + } + } + public static boolean isRetentionExpired(String retainedUntil) { if (retainedUntil == null || retainedUntil.trim().isEmpty() || "null".equals(retainedUntil)) { return false; @@ -1171,4 +1695,4 @@ public static boolean versionCheck(String csVersionInfo) { return true; } -} \ No newline at end of file +} diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultBackupOffering.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultBackupOffering.java similarity index 90% rename from plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultBackupOffering.java rename to plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultBackupOffering.java index c72cd6cd2aa3..9565f9961ff6 100644 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultBackupOffering.java +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultBackupOffering.java @@ -19,12 +19,12 @@ import org.apache.cloudstack.backup.BackupOffering; import java.util.Date; -public class CommvaultBackupOffering implements BackupOffering { +public class AblestackCommvaultBackupOffering implements BackupOffering { private String name; private String uid; - public CommvaultBackupOffering(String name, String uid) { + public AblestackCommvaultBackupOffering(String name, String uid) { this.name = name; this.uid = uid; } @@ -56,7 +56,7 @@ public boolean isUserDrivenBackupAllowed() { @Override public String getProvider() { - return "commvault"; + return "ablestack-commvault"; } @Override diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultClient.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultClient.java similarity index 97% rename from plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultClient.java rename to plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultClient.java index 9623b2e34049..66c75b17ca4c 100644 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultClient.java +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultClient.java @@ -66,8 +66,8 @@ import java.util.List; import java.util.Set; -public class CommvaultClient { - private static final Logger LOG = LogManager.getLogger(CommvaultClient.class); +public class AblestackCommvaultClient { + private static final Logger LOG = LogManager.getLogger(AblestackCommvaultClient.class); private final URI apiURI; private final String apiName; private final String apiPassword; @@ -78,7 +78,7 @@ public class CommvaultClient { private String cvtServerPassword; private final int cvtServerPort = 22; - public CommvaultClient(final String url, final String username, final String password, final boolean validateCertificate, final int timeout) throws URISyntaxException, NoSuchAlgorithmException, KeyManagementException { + public AblestackCommvaultClient(final String url, final String username, final String password, final boolean validateCertificate, final int timeout) throws URISyntaxException, NoSuchAlgorithmException, KeyManagementException { apiName = username; apiPassword = password; @@ -251,7 +251,7 @@ public List listPlans() { if (!planDetails.isMissingNode()) { String planId = planDetails.path("planId").asText(); String planName = planDetails.path("planName").asText(); - offerings.add(new CommvaultBackupOffering(planName, planId)); + offerings.add(new AblestackCommvaultBackupOffering(planName, planId)); } } } @@ -964,8 +964,14 @@ public boolean updateBackupSet(String path, String subclientId, String clientId, // POST https:///commandcenter/api/subclient//action/backup 테스트 시 Incremental 백업으로 반환되어 사용 x // POST https:///commandcenter/api/createtask // 백업 실행 API - public String createBackup(String subclientId, String storagePolicyId, String displayName, String commCellName, String clientId, String companyId, String companyName, String instanceName, String appName, String applicationId, String clientName, String backupsetId, String instanceId, String subclientGUID, String subclientName, String csGUID, String backupsetName) { + public String createBackup(String subclientId, String storagePolicyId, String displayName, String commCellName, String clientId, String companyId, String companyName, String instanceName, + String appName, String applicationId, String clientName, String backupsetId, String instanceId, String subclientGUID, String subclientName, String csGUID, + String backupsetName, String backupType) { HttpURLConnection connection = null; + final boolean incrementalBackup = "INCREMENTAL".equalsIgnoreCase(backupType); + final String backupLevel = incrementalBackup ? "INCREMENTAL" : "FULL"; + final String runIncrementalBackup = incrementalBackup ? "true" : "false"; + final String forceFullBackup = incrementalBackup ? "false" : "true"; String postUrl = apiURI.toString() + "/createtask"; try { URL url = new URL(postUrl); @@ -1012,9 +1018,9 @@ public String createBackup(String subclientId, String storagePolicyId, String di "}," + "\"options\":{" + "\"backupOpts\":{" + - "\"backupLevel\":\"FULL\"," + - "\"runIncrementalBackup\":false," + - "\"forceFullBackup\":true" + + "\"backupLevel\":\"%s\"," + + "\"runIncrementalBackup\":%s," + + "\"forceFullBackup\":%s" + "}," + "\"commonOpts\":{" + "\"overrideStoragePolicySettings\":true," + @@ -1027,7 +1033,8 @@ public String createBackup(String subclientId, String storagePolicyId, String di Integer.parseInt(subclientId), Integer.parseInt(storagePolicyId), displayName, commCellName, Integer.parseInt(clientId), Integer.parseInt(companyId), companyName, instanceName, appName, Integer.parseInt(applicationId), clientName, Integer.parseInt(backupsetId), - Integer.parseInt(instanceId), subclientGUID, subclientName, csGUID, backupsetName + Integer.parseInt(instanceId), subclientGUID, subclientName, csGUID, backupsetName, + backupLevel, runIncrementalBackup, forceFullBackup ); try (OutputStream os = connection.getOutputStream()) { byte[] input = jsonBody.getBytes(StandardCharsets.UTF_8); @@ -1379,9 +1386,9 @@ public String restoreFullVM(String subclientId, String displayName, String backu + "}," + "\"commonOptions\":{" + "\"overwriteFiles\":true," - + "\"unconditionalOverwrite\":false," + + "\"unconditionalOverwrite\":true," + "\"stripLevelType\":\"PRESERVE_LEVEL\"," - + "\"preserveLevel\":1," + + "\"preserveLevel\":0," + "\"isFromBrowseBackup\":true" + "}" + "}" @@ -1423,6 +1430,13 @@ public String restoreFullVM(String subclientId, String displayName, String backu return null; } + public String restoreFullVM(String subclientId, String displayName, String backupsetGUID, String clientId, String companyId, String companyName, String instanceName, + String appName, String applicationId, String clientName, String backupsetId, String instanceId, String backupsetName, + String commCellId, String endTime, List paths) { + return restoreFullVM(subclientId, displayName, backupsetGUID, clientId, companyId, companyName, instanceName, appName, + applicationId, clientName, backupsetId, instanceId, backupsetName, commCellId, endTime, String.join(",", paths)); + } + // GET https:///commandcenter/api/commcell/properties // 에이전트 설치를 위한 commcell 정보 조회 API public String getCommcell() { @@ -1727,4 +1741,4 @@ private String convertPathToJsonArray(String path) { jsonArray.append("]"); return jsonArray.toString(); } -} \ No newline at end of file +} diff --git a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultObject.java b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultObject.java similarity index 91% rename from plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultObject.java rename to plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultObject.java index a0fe576786d3..def6234db775 100644 --- a/plugins/backup/commvault/src/main/java/org/apache/cloudstack/backup/commvault/CommvaultObject.java +++ b/plugins/backup/ablestack-commvault/src/main/java/org/apache/cloudstack/backup/commvault/AblestackCommvaultObject.java @@ -18,10 +18,10 @@ import java.util.List; -public interface CommvaultObject { +public interface AblestackCommvaultObject { String getUuid(); String getName(); String getHref(); String getType(); - List getLinks(); + List getLinks(); } diff --git a/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/module.properties b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/module.properties new file mode 100644 index 000000000000..de14814cfa68 --- /dev/null +++ b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=ablestack-commvault +parent=backup diff --git a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/spring-backup-commvault-context.xml b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/spring-backup-commvault-context.xml similarity index 85% rename from plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/spring-backup-commvault-context.xml rename to plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/spring-backup-commvault-context.xml index 11b0848c8577..ca2b2cd38cbe 100644 --- a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/spring-backup-commvault-context.xml +++ b/plugins/backup/ablestack-commvault/src/main/resources/META-INF/cloudstack/ablestack-commvault/spring-backup-commvault-context.xml @@ -20,7 +20,7 @@ http://www.springframework.org/schema/beans/spring-beans-3.0.xsd" > - - + + diff --git a/plugins/backup/commvault/pom.xml b/plugins/backup/ablestack-nas/pom.xml similarity index 93% rename from plugins/backup/commvault/pom.xml rename to plugins/backup/ablestack-nas/pom.xml index f824d49eb423..97ed28f479aa 100644 --- a/plugins/backup/commvault/pom.xml +++ b/plugins/backup/ablestack-nas/pom.xml @@ -20,8 +20,8 @@ 4.0.0 - cloud-plugin-backup-commvault - Apache CloudStack Plugin - KVM Commvault Backup and Recovery Plugin + cloud-plugin-backup-ablestack-nas + Ablestack Plugin - KVM NAS Backup and Recovery Plugin cloudstack-plugins org.apache.cloudstack diff --git a/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupOffering.java b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupOffering.java new file mode 100644 index 000000000000..b7b6beab6ce9 --- /dev/null +++ b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupOffering.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import java.util.Date; + +public class AblestackNasBackupOffering implements BackupOffering { + + private String name; + private String uid; + + public AblestackNasBackupOffering(String name, String uid) { + this.name = name; + this.uid = uid; + } + + @Override + public String getExternalId() { + return uid; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getDescription() { + return "NAS Backup Offering (Repository)"; + } + + @Override + public long getZoneId() { + return -1; + } + + @Override + public boolean isUserDrivenBackupAllowed() { + return true; + } + + @Override + public String getProvider() { + return "ablestack-nas"; + } + + @Override + public Date getCreated() { + return null; + } + + @Override + public String getUuid() { + return uid; + } + + @Override + public long getId() { + return -1; + } + + @Override + public String getRetentionPeriod() { + return null; + } +} diff --git a/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupProvider.java b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupProvider.java new file mode 100644 index 000000000000..e6efc16d3435 --- /dev/null +++ b/plugins/backup/ablestack-nas/src/main/java/org/apache/cloudstack/backup/AblestackNasBackupProvider.java @@ -0,0 +1,1052 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.AgentManager; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.offering.DiskOffering; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.Volume; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeApiServiceImpl; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; + + +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; +import org.apache.cloudstack.backup.dao.BackupScheduleDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import javax.inject.Inject; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.stream.Collectors; + +import static org.apache.cloudstack.backup.BackupManager.BackupDeltaMax; +import static org.apache.cloudstack.backup.BackupManager.BackupFrameworkEnabled; +import static org.apache.cloudstack.backup.BackupManager.KvmIncrementalBackup; + +public class AblestackNasBackupProvider extends AdapterBase implements BackupProvider, Configurable { + private static final Logger LOG = LogManager.getLogger(AblestackNasBackupProvider.class); + private static final String BACKUP_TYPE_FULL = "FULL"; + private static final String BACKUP_TYPE_INCREMENTAL = "INCREMENTAL"; + private static final String BACKUP_ENGINE_QCOW2 = "QCOW2"; + private static final String BACKUP_ENGINE_RBD_DIFF = "RBD_DIFF"; + private static final String DETAIL_CHECKPOINT_NAME = "nas.checkpoint.name"; + private static final String DETAIL_CHECKPOINT_PATH = "nas.checkpoint.path"; + private static final String DETAIL_PARENT_BACKUP_UUID = "nas.parent.backup.uuid"; + private static final String DETAIL_PARENT_BACKUP_PATH = "nas.parent.backup.path"; + private static final String DETAIL_PARENT_CHECKPOINT_NAME = "nas.parent.checkpoint.name"; + private static final String DETAIL_PARENT_CHECKPOINT_PATH = "nas.parent.checkpoint.path"; + private static final String DETAIL_BACKUP_ENGINE = "nas.backup.engine"; + private static final String DETAIL_RBD_DISK_PATHS = "nas.rbd.disk.paths"; + private static final String MISSING_PARENT_RBD_SNAPSHOT_ERROR = "Parent RBD snapshot"; + + ConfigKey NASBackupRestoreMountTimeout = new ConfigKey<>("Advanced", Integer.class, + "nas.backup.restore.mount.timeout", + "30", + "Timeout in seconds after which backup repository mount for restore fails.", + true, + BackupFrameworkEnabled.key()); + + ConfigKey NASBackupRestoreTimeout = new ConfigKey<>("Advanced", Integer.class, + "nas.backup.restore.timeout", + "1800", + "Timeout in seconds after which NAS backup restore operations fail.", + true, + BackupFrameworkEnabled.key()); + + @Inject + private BackupDao backupDao; + + @Inject + private BackupRepositoryDao backupRepositoryDao; + + @Inject + private BackupRepositoryService backupRepositoryService; + + @Inject + private HostDao hostDao; + + @Inject + private VolumeDao volumeDao; + + @Inject + private StoragePoolHostDao storagePoolHostDao; + + @Inject + private VMInstanceDao vmInstanceDao; + + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + + @Inject + DataStoreManager dataStoreMgr; + + @Inject + private AgentManager agentManager; + + @Inject + private VMSnapshotDao vmSnapshotDao; + + @Inject + private VMSnapshotDetailsDao vmSnapshotDetailsDao; + + @Inject + BackupManager backupManager; + + @Inject + ResourceManager resourceManager; + + @Inject + private DiskOfferingDao diskOfferingDao; + + @Inject + private BackupScheduleDao backupScheduleDao; + + private Long getClusterIdFromRootVolume(VirtualMachine vm) { + VolumeVO rootVolume = volumeDao.getInstanceRootVolume(vm.getId()); + StoragePoolVO rootDiskPool = primaryDataStoreDao.findById(rootVolume.getPoolId()); + if (rootDiskPool == null) { + return null; + } + return rootDiskPool.getClusterId(); + } + + protected Host getVMHypervisorHost(VirtualMachine vm) { + Long hostId = vm.getLastHostId(); + Long clusterId = null; + + if (hostId != null) { + Host host = hostDao.findById(hostId); + if (host.getStatus() == Status.Up) { + return host; + } + // Try to find any Up host in the same cluster + clusterId = host.getClusterId(); + } else { + // Try to find any Up host in the same cluster as the root volume + clusterId = getClusterIdFromRootVolume(vm); + } + + if (clusterId != null) { + for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(clusterId)) { + if (hostInCluster.getStatus() == Status.Up) { + LOG.debug("Found Host {} in cluster {}", hostInCluster, clusterId); + return hostInCluster; + } + } + } + + // Try to find any Host in the zone + return resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, vm.getDataCenterId()); + } + + protected Host getVMHypervisorHostForBackup(VirtualMachine vm) { + Long hostId = vm.getHostId(); + if (hostId == null && VirtualMachine.State.Running.equals(vm.getState())) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for %s. Make sure the virtual machine is running", vm.getName())); + } + if (VirtualMachine.State.Stopped.equals(vm.getState())) { + hostId = vm.getLastHostId(); + } + if (hostId == null) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for stopped VM: %s", vm)); + } + final Host host = hostDao.findById(hostId); + if (host == null || !Status.Up.equals(host.getStatus()) || !Hypervisor.HypervisorType.KVM.equals(host.getHypervisorType())) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } + return host; + } + + @Override + public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM) { + final Host host = getVMHypervisorHostForBackup(vm); + + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException("No valid backup repository found for the VM, please check the attached backup offering"); + } + + validateNoVmSnapshots(vm); + List vmVolumes = volumeDao.findByInstance(vm.getId()); + vmVolumes.sort(Comparator.comparing(Volume::getDeviceId)); + Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(vmVolumes); + validateVolumePoolTypes(volumePoolsAndPaths.first()); + final BackupVO latestBackup = getLatestBackedUpBackup(vm); + final boolean incrementalBackup = shouldUseIncrementalBackup(vm, latestBackup); + BackupExecutionResult result = executeBackup(vm, quiesceVM, host, backupRepository, vmVolumes, volumePoolsAndPaths, latestBackup, incrementalBackup, + incrementalBackup && vmVolumes.size() > 1); + if (!result.success && incrementalBackup && shouldRetryAsFullAfterIncrementalFailure(result, vmVolumes)) { + cleanupFailedBackupForFullRetry(result.backup); + LOG.warn("Incremental backup failed for VM [{}] due to [{}]. Retrying as full backup.", vm, result.details); + result = executeBackup(vm, quiesceVM, host, backupRepository, vmVolumes, volumePoolsAndPaths, null, false, false); + } + return new Pair<>(result.success, result.backup); + } + + private BackupExecutionResult executeBackup(VirtualMachine vm, Boolean quiesceVM, Host host, BackupRepository backupRepository, + List vmVolumes, Pair, List> volumePoolsAndPaths, + Backup parentBackup, boolean incrementalBackup, boolean retryAsFullOnFailure) { + final String backupPath = buildBackupPath(vm); + final String checkpointName = backupPath.substring(backupPath.lastIndexOf("/") + 1); + final String backupEngine = areAllVolumesOnRbdPool(volumePoolsAndPaths.first()) ? BACKUP_ENGINE_RBD_DIFF : BACKUP_ENGINE_QCOW2; + final List backupFiles = buildBackupFileNames(vmVolumes, backupEngine, incrementalBackup); + + BackupVO backupVO = createBackupObject(vm, backupPath, incrementalBackup ? BACKUP_TYPE_INCREMENTAL : BACKUP_TYPE_FULL, + checkpointName, backupEngine, incrementalBackup ? parentBackup : null, volumePoolsAndPaths.second()); + AblestackNasTakeBackupCommand command = new AblestackNasTakeBackupCommand(vm.getInstanceName(), backupPath); + command.setBackupType(backupVO.getType()); + command.setCheckpointName(checkpointName); + command.setBackupFiles(backupFiles); + command.setVolumePools(volumePoolsAndPaths.first()); + command.setVolumePaths(volumePoolsAndPaths.second()); + if (incrementalBackup && parentBackup != null) { + command.setParentBackupPath(parentBackup.getExternalId()); + command.setParentCheckpointName(getBackupDetail(parentBackup, DETAIL_CHECKPOINT_NAME)); + command.setParentCheckpointPath(getBackupDetail(parentBackup, DETAIL_CHECKPOINT_PATH)); + } + command.setBackupRepoType(backupRepository.getType()); + command.setBackupRepoAddress(backupRepository.getAddress()); + command.setMountOptions(backupRepository.getMountOptions()); + command.setQuiesce(quiesceVM); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), command); + } catch (AgentUnavailableException e) { + logger.error("Unable to contact backend control plane to initiate backup for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + logger.error("Operation to initiate backup timed out for VM {}", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + } + + if (answer != null && answer.getResult()) { + backupVO.setDate(new Date()); + backupVO.setSize(answer.getSize()); + backupVO.setStatus(Backup.Status.BackedUp); + backupVO.setBackedUpVolumes(createVolumeInfoFromVolumes(vmVolumes, backupFiles)); + if (backupDao.update(backupVO.getId(), backupVO)) { + return BackupExecutionResult.success(backupVO); + } + throw new CloudRuntimeException("Failed to update backup"); + } + + final String details = answer != null ? answer.getDetails() : "No answer received"; + logger.error("Failed to take backup for VM {}: {}", vm.getInstanceName(), details); + if (retryAsFullOnFailure) { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } else if (answer != null && answer.getNeedsCleanup()) { + logger.error("Backup cleanup failed for VM {}. Leaving the backup in Error state.", vm.getInstanceName()); + backupVO.setStatus(Backup.Status.Error); + backupDao.update(backupVO.getId(), backupVO); + } else { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } + return BackupExecutionResult.failure(details, backupVO); + } + + private boolean shouldRetryAsFullAfterIncrementalFailure(BackupExecutionResult result, List vmVolumes) { + if (result == null || result.success) { + return false; + } + if (StringUtils.contains(result.details, MISSING_PARENT_RBD_SNAPSHOT_ERROR)) { + return true; + } + return vmVolumes.size() > 1; + } + + private void cleanupFailedBackupForFullRetry(Backup backup) { + if (backup == null) { + return; + } + backupDao.remove(backup.getId()); + } + + private static final class BackupExecutionResult { + private final boolean success; + private final Backup backup; + private final String details; + + private BackupExecutionResult(boolean success, Backup backup, String details) { + this.success = success; + this.backup = backup; + this.details = details; + } + + private static BackupExecutionResult success(Backup backup) { + return new BackupExecutionResult(true, backup, null); + } + + private static BackupExecutionResult failure(String details, Backup backup) { + return new BackupExecutionResult(false, backup, details); + } + } + + private String buildBackupPath(VirtualMachine vm) { + return String.format("%s/%s", vm.getInstanceName(), + new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss.SSS").format(new Date())); + } + + private void validateNoVmSnapshots(VirtualMachine vm) { + if (CollectionUtils.isNotEmpty(vmSnapshotDao.findByVm(vm.getId()))) { + logger.debug("NAS backup provider cannot take backups of a VM [{}] with VM snapshots.", vm); + throw new CloudRuntimeException(String.format("Cannot take backup of VM [%s] as it has VM snapshots.", vm.getUuid())); + } + } + + private BackupVO createBackupObject(VirtualMachine vm, String backupPath, String backupType, String checkpointName, String backupEngine, Backup parentBackup, + List diskPaths) { + BackupVO backup = new BackupVO(); + backup.setVmId(vm.getId()); + backup.setExternalId(backupPath); + backup.setType(backupType); + backup.setDate(new Date()); + long virtualSize = 0L; + for (final Volume volume: volumeDao.findByInstance(vm.getId())) { + if (Volume.State.Ready.equals(volume.getState())) { + virtualSize += volume.getSize(); + } + } + backup.setProtectedSize(virtualSize); + backup.setStatus(Backup.Status.BackingUp); + backup.setBackupOfferingId(vm.getBackupOfferingId()); + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + backup.setName(backupManager.getBackupNameFromVM(vm)); + Map details = new HashMap<>(); + Map backupDetails = backupManager.getBackupDetailsFromVM(vm); + if (backupDetails != null) { + details.putAll(backupDetails); + } + details.put(DETAIL_CHECKPOINT_NAME, checkpointName); + details.put(DETAIL_CHECKPOINT_PATH, getCheckpointPath(backupPath, checkpointName, backupEngine)); + details.put(DETAIL_BACKUP_ENGINE, backupEngine); + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine) && CollectionUtils.isNotEmpty(diskPaths)) { + details.put(DETAIL_RBD_DISK_PATHS, String.join(",", diskPaths)); + } + if (parentBackup != null) { + details.put(DETAIL_PARENT_BACKUP_UUID, parentBackup.getUuid()); + details.put(DETAIL_PARENT_BACKUP_PATH, parentBackup.getExternalId()); + details.put(DETAIL_PARENT_CHECKPOINT_NAME, getBackupDetail(parentBackup, DETAIL_CHECKPOINT_NAME)); + details.put(DETAIL_PARENT_CHECKPOINT_PATH, getBackupDetail(parentBackup, DETAIL_CHECKPOINT_PATH)); + } + backup.setDetails(details); + + return backupDao.persist(backup); + } + + private String getCheckpointPath(String backupPath, String checkpointName, String backupEngine) { + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + return String.format("%s/checkpoints/%s.meta", backupPath, checkpointName); + } + return String.format("%s/checkpoints/%s.xml", backupPath, checkpointName); + } + + private BackupVO getLatestBackedUpBackup(VirtualMachine vm) { + List backups = backupDao.listByVmIdAndOffering(vm.getDataCenterId(), vm.getId(), vm.getBackupOfferingId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .peek(backupDao::loadDetails) + .filter(backup -> getBackupDetail(backup, DETAIL_CHECKPOINT_NAME) != null) + .max(Comparator.comparing(BackupVO::getDate)) + .orElse(null); + } + + private boolean shouldUseIncrementalBackup(VirtualMachine vm, Backup latestBackup) { + if (latestBackup == null) { + return false; + } + + final Long clusterId = getClusterIdFromRootVolume(vm); + if (clusterId == null) { + LOG.debug("Unable to resolve cluster for VM [{}], fallback to full backup.", vm); + return false; + } + + if (!KvmIncrementalBackup.valueIn(clusterId)) { + return false; + } + + return getBackupChainSize(vm, latestBackup) < getEffectiveIncrementalLimit(vm); + } + + private int getEffectiveIncrementalLimit(VirtualMachine vm) { + int effectiveLimit = BackupDeltaMax.value(); + List schedules = backupScheduleDao.listByVM(vm.getId()); + for (BackupScheduleVO schedule : schedules) { + if (schedule != null && schedule.getMaxBackups() > 0) { + effectiveLimit = Math.min(effectiveLimit, schedule.getMaxBackups()); + } + } + return effectiveLimit; + } + + private int getBackupChainSize(VirtualMachine vm, Backup latestBackup) { + List backups = backupDao.listByVmIdAndOffering(vm.getDataCenterId(), vm.getId(), vm.getBackupOfferingId()).stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(backup -> Backup.Status.BackedUp.equals(backup.getStatus())) + .peek(backupDao::loadDetails) + .collect(Collectors.toList()); + Map backupsByUuid = backups.stream().collect(Collectors.toMap(BackupVO::getUuid, backup -> backup, (left, right) -> left)); + int chainSize = 1; + Backup current = latestBackup; + while (current != null) { + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + if (parentBackupUuid == null) { + break; + } + current = backupsByUuid.get(parentBackupUuid); + if (current != null) { + chainSize++; + } + } + return chainSize; + } + + private boolean hasDependentBackups(Backup backup) { + List backups = backupDao.listByVmIdAndOffering(backup.getZoneId(), backup.getVmId(), backup.getBackupOfferingId()); + return backups.stream() + .filter(BackupVO.class::isInstance) + .map(BackupVO.class::cast) + .filter(candidate -> !Objects.equals(candidate.getId(), backup.getId())) + .peek(backupDao::loadDetails) + .anyMatch(candidate -> Objects.equals(getBackupDetail(candidate, DETAIL_PARENT_BACKUP_UUID), backup.getUuid())); + } + + private String getBackupDetail(Backup backup, String key) { + Map details = backup.getDetails(); + return details != null ? details.get(key) : null; + } + + private void validateVolumePoolTypes(List volumePools) { + boolean hasRbd = volumePools.stream().anyMatch(pool -> pool != null && Storage.StoragePoolType.RBD.equals(pool.getPoolType())); + boolean hasNonRbd = volumePools.stream().anyMatch(pool -> pool != null && !Storage.StoragePoolType.RBD.equals(pool.getPoolType())); + if (hasRbd && hasNonRbd) { + throw new CloudRuntimeException("NAS incremental backup does not support VMs with mixed RBD and non-RBD volumes"); + } + } + + private boolean areAllVolumesOnRbdPool(List volumePools) { + return CollectionUtils.isNotEmpty(volumePools) && + volumePools.stream().allMatch(pool -> pool != null && Storage.StoragePoolType.RBD.equals(pool.getPoolType())); + } + + private List buildBackupFileNames(List volumes, String backupEngine, boolean incrementalBackup) { + List backupFiles = new ArrayList<>(); + for (VolumeVO volume : volumes) { + String suffix; + if (BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + suffix = incrementalBackup ? ".rbdiff" : ".raw"; + } else { + suffix = ".qcow2"; + } + backupFiles.add(String.format("volume-%s%s", volume.getUuid(), suffix)); + } + return backupFiles; + } + + private String createVolumeInfoFromVolumes(List volumes, List backupFiles) { + List infoList = new ArrayList<>(); + for (int i = 0; i < volumes.size(); i++) { + VolumeVO vol = volumes.get(i); + DiskOffering diskOffering = diskOfferingDao.findById(vol.getDiskOfferingId()); + String diskOfferingUuid = diskOffering != null ? diskOffering.getUuid() : null; + infoList.add(new Backup.VolumeInfo(vol.getUuid(), backupFiles.get(i), vol.getVolumeType(), vol.getSize(), + vol.getDeviceId(), diskOfferingUuid, vol.getMinIops(), vol.getMaxIops())); + } + return new com.google.gson.Gson().toJson(infoList.toArray(), Backup.VolumeInfo[].class); + } + + @Override + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + return restoreVMBackup(vm, backup); + } + + @Override + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + return restoreVMBackup(vm, backup).first(); + } + + private Pair restoreVMBackup(VirtualMachine vm, Backup backup) { + List backupVolumes = backup.getBackedUpVolumes(); + List backedVolumesUUIDs = backupVolumes.stream() + .sorted(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)) + .map(Backup.VolumeInfo::getUuid) + .collect(Collectors.toList()); + + List restoreVolumes = volumeDao.findByInstance(vm.getId()).stream() + .sorted(Comparator.comparingLong(VolumeVO::getDeviceId)) + .collect(Collectors.toList()); + + LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup); + BackupRepository backupRepository = getBackupRepository(backup); + + final Host host = getVMHypervisorHost(vm); + AblestackNasRestoreBackupCommand restoreCommand = new AblestackNasRestoreBackupCommand(); + restoreCommand.setBackupPath(backup.getExternalId()); + restoreCommand.setBackupRepoType(backupRepository.getType()); + restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); + restoreCommand.setMountOptions(backupRepository.getMountOptions()); + restoreCommand.setVmName(vm.getName()); + restoreCommand.setBackupVolumesUUIDs(backedVolumesUUIDs); + Pair, List> volumePoolsAndPaths = getVolumePoolsAndPaths(restoreVolumes); + restoreCommand.setRestoreVolumePools(volumePoolsAndPaths.first()); + restoreCommand.setRestoreVolumePaths(volumePoolsAndPaths.second()); + restoreCommand.setVolumePaths(getVolumePaths(restoreVolumes)); + restoreCommand.setBackupFiles(getBackupFiles(backupVolumes, backup)); + restoreCommand.setBackupFileChains(getBackupFileChains(backupVolumes, backup)); + restoreCommand.setVmExists(vm.getRemoved() == null); + restoreCommand.setVmState(vm.getState()); + restoreCommand.setMountTimeout(NASBackupRestoreMountTimeout.value()); + restoreCommand.setWait(NASBackupRestoreTimeout.value()); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to restore backup timed out, please try again"); + } + return new Pair<>(answer.getResult(), answer.getDetails()); + } + + private List getBackupFiles(List backedVolumes, Backup backup) { + List backupFiles = new ArrayList<>(); + List sortedVolumes = new ArrayList<>(backedVolumes); + sortedVolumes.sort(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)); + for (Backup.VolumeInfo backedVolume : sortedVolumes) { + if (isLegacyBackup(backup)) { + backupFiles.add(getLegacyBackupFileName(backedVolume)); + } else { + backupFiles.add(backedVolume.getPath()); + } + } + return backupFiles; + } + + private List getBackupFileChains(List backedVolumes, Backup backup) { + List backupFileChains = new ArrayList<>(); + List sortedVolumes = new ArrayList<>(backedVolumes); + sortedVolumes.sort(Comparator.comparingLong(Backup.VolumeInfo::getDeviceId)); + for (Backup.VolumeInfo backedVolume : sortedVolumes) { + backupFileChains.add(String.join(";", getBackupFileChain(backedVolume.getUuid(), backup))); + } + return backupFileChains; + } + + private List getBackupFileChain(String volumeUuid, Backup backup) { + loadBackupDetailsIfNeeded(backup); + if (isLegacyBackup(backup)) { + Backup.VolumeInfo volumeInfo = getBackedUpVolumeInfo(backup.getBackedUpVolumes(), volumeUuid); + return volumeInfo != null ? getLegacyBackupFileCandidates(volumeInfo) : List.of(); + } + + String backupEngine = getBackupDetail(backup, DETAIL_BACKUP_ENGINE); + if (!BACKUP_ENGINE_RBD_DIFF.equals(backupEngine)) { + Backup.VolumeInfo volumeInfo = getBackedUpVolumeInfo(backup.getBackedUpVolumes(), volumeUuid); + return volumeInfo != null ? List.of(volumeInfo.getPath()) : List.of(); + } + + List chain = getBackupChain(backup); + List files = new ArrayList<>(); + for (Backup chainBackup : chain) { + Backup.VolumeInfo volumeInfo = getBackedUpVolumeInfo(chainBackup.getBackedUpVolumes(), volumeUuid); + if (volumeInfo != null) { + files.add(String.format("%s/%s", chainBackup.getExternalId(), volumeInfo.getPath())); + } + } + return files; + } + + private List getBackupChain(Backup backup) { + loadBackupDetailsIfNeeded(backup); + List backups = backupDao.listByVmIdAndOffering(backup.getZoneId(), backup.getVmId(), backup.getBackupOfferingId()); + Map backupsByUuid = new HashMap<>(); + for (Backup candidate : backups) { + if (candidate instanceof BackupVO) { + backupDao.loadDetails((BackupVO) candidate); + } + backupsByUuid.put(candidate.getUuid(), candidate); + } + + List chain = new ArrayList<>(); + Backup current = backup; + while (current != null) { + chain.add(current); + String parentBackupUuid = getBackupDetail(current, DETAIL_PARENT_BACKUP_UUID); + current = parentBackupUuid != null ? backupsByUuid.get(parentBackupUuid) : null; + } + Collections.reverse(chain); + return chain; + } + + private void loadBackupDetailsIfNeeded(Backup backup) { + if (backup instanceof BackupVO && backup.getDetails() == null) { + backupDao.loadDetails((BackupVO) backup); + } + } + + private boolean isLegacyBackup(Backup backup) { + return getBackupDetail(backup, DETAIL_BACKUP_ENGINE) == null; + } + + private String getLegacyBackupFileName(Backup.VolumeInfo volumeInfo) { + String volumePath = volumeInfo.getPath(); + if (StringUtils.isNotBlank(volumePath) && + (volumePath.endsWith(".qcow2") || volumePath.endsWith(".raw") || volumePath.endsWith(".rbdiff"))) { + return volumePath; + } + String diskPrefix = Volume.Type.ROOT.equals(volumeInfo.getType()) ? "root" : "datadisk"; + return String.format("%s.%s.qcow2", diskPrefix, volumeInfo.getPath()); + } + + private List getLegacyBackupFileCandidates(Backup.VolumeInfo volumeInfo) { + List candidates = new ArrayList<>(); + String volumePath = volumeInfo.getPath(); + if (StringUtils.isNotBlank(volumePath)) { + candidates.add(volumePath); + if (volumePath.contains("/")) { + String baseName = volumePath.substring(volumePath.lastIndexOf('/') + 1); + if (!Objects.equals(volumePath, baseName)) { + candidates.add(baseName); + } + } + } + + String legacyFileName = getLegacyBackupFileName(volumeInfo); + if (!candidates.contains(legacyFileName)) { + candidates.add(legacyFileName); + } + + if (volumePath != null && volumePath.contains("/")) { + String baseName = volumePath.substring(volumePath.lastIndexOf('/') + 1); + String diskPrefix = Volume.Type.ROOT.equals(volumeInfo.getType()) ? "root" : "datadisk"; + String baseNameLegacyFile = String.format("%s.%s.qcow2", diskPrefix, baseName); + if (!candidates.contains(baseNameLegacyFile)) { + candidates.add(baseNameLegacyFile); + } + } + + return candidates; + } + + private List getVolumePaths(List volumes) { + List volumePaths = new ArrayList<>(); + for (VolumeVO volume : volumes) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); + if (Objects.isNull(storagePool)) { + throw new CloudRuntimeException("Unable to find storage pool associated to the volume"); + } + String volumePathPrefix; + if (ScopeType.HOST.equals(storagePool.getScope())) { + volumePathPrefix = storagePool.getPath(); + } else if (Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType())) { + volumePathPrefix = storagePool.getPath(); + } else { + volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid()); + } + volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath())); + } + return volumePaths; + } + + private Pair, List> getVolumePoolsAndPaths(List volumes) { + List volumePools = new ArrayList<>(); + List volumePaths = new ArrayList<>(); + for (VolumeVO volume : volumes) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); + if (Objects.isNull(storagePool)) { + throw new CloudRuntimeException("Unable to find storage pool associated to the volume"); + } + + DataStore dataStore = dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary); + volumePools.add(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null); + + String volumePathPrefix = getVolumePathPrefix(storagePool); + volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath())); + } + return new Pair<>(volumePools, volumePaths); + } + + private String getVolumePathPrefix(StoragePoolVO storagePool) { + String volumePathPrefix; + if (ScopeType.HOST.equals(storagePool.getScope()) || + Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType()) || + Storage.StoragePoolType.RBD.equals(storagePool.getPoolType())) { + volumePathPrefix = storagePool.getPath(); + } else { + // Should be Storage.StoragePoolType.NetworkFilesystem + volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid()); + } + return volumePathPrefix; + } + + @Override + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); + final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); + String cacheMode = null; + final VMInstanceVO vm = vmInstanceDao.findVMByInstanceName(vmNameAndState.first()); + List listVolumes = volumeDao.findByInstanceAndType(vm.getId(), Type.ROOT); + if(CollectionUtils.isNotEmpty(listVolumes)) { + VolumeVO rootDisk = listVolumes.get(0); + DiskOffering baseDiskOffering = diskOfferingDao.findById(rootDisk.getDiskOfferingId()); + if (baseDiskOffering.getCacheMode() != null) { + cacheMode = baseDiskOffering.getCacheMode().toString(); + } + } + StoragePoolVO pool = primaryDataStoreDao.findByUuid(dataStoreUuid); + if (pool == null) { + List pools = primaryDataStoreDao.findPoolByName(dataStoreUuid); + pool = pools.get(0); + } + HostVO hostVO = hostDao.findByIp(hostIp); + if (hostVO == null) { + hostVO = hostDao.findByName(hostIp); + } + + Backup.VolumeInfo matchingVolume = getBackedUpVolumeInfo(backup.getBackedUpVolumes(), volume.getUuid()); + if (matchingVolume == null) { + throw new CloudRuntimeException(String.format("Unable to find volume %s in the list of backed up volumes for backup %s, cannot proceed with restore", volume.getUuid(), backup)); + } + Long backedUpVolumeSize = matchingVolume.getSize(); + + LOG.debug("Restoring vm volume {} from backup {} on the NAS Backup Provider", volume, backup); + BackupRepository backupRepository = getBackupRepository(backup); + + VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), + backup.getDomainId(), backup.getAccountId(), 0, null, + backup.getSize(), null, null, null); + String volumeUUID = UUID.randomUUID().toString(); + String volumeName = volume != null ? volume.getName() : backupVolumeInfo.getUuid(); + restoredVolume.setName("RestoredVol-" + volumeName); + restoredVolume.setProvisioningType(diskOffering.getProvisioningType()); + restoredVolume.setUpdated(new Date()); + restoredVolume.setUuid(volumeUUID); + restoredVolume.setRemoved(null); + restoredVolume.setDisplayVolume(true); + restoredVolume.setPoolId(pool.getId()); + restoredVolume.setPoolType(pool.getPoolType()); + restoredVolume.setPath(restoredVolume.getUuid()); + restoredVolume.setState(Volume.State.Copying); + restoredVolume.setSize(backupVolumeInfo.getSize()); + restoredVolume.setDiskOfferingId(diskOffering.getId()); + if (pool.getPoolType() != Storage.StoragePoolType.RBD) { + restoredVolume.setFormat(Storage.ImageFormat.QCOW2); + } else { + restoredVolume.setFormat(Storage.ImageFormat.RAW); + } + + AblestackNasRestoreBackupCommand restoreCommand = new AblestackNasRestoreBackupCommand(); + restoreCommand.setBackupPath(backup.getExternalId()); + restoreCommand.setBackupRepoType(backupRepository.getType()); + restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); + restoreCommand.setVmName(vmNameAndState.first()); + restoreCommand.setRestoreVolumePaths(Collections.singletonList(String.format("%s/%s", getVolumePathPrefix(pool), volumeUUID))); + DataStore dataStore = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + restoreCommand.setRestoreVolumePools(Collections.singletonList(dataStore != null ? (PrimaryDataStoreTO)dataStore.getTO() : null)); + restoreCommand.setDiskType(backupVolumeInfo.getType().name().toLowerCase(Locale.ROOT)); + restoreCommand.setMountOptions(backupRepository.getMountOptions()); + restoreCommand.setVmExists(null); + restoreCommand.setVmState(vmNameAndState.second()); + restoreCommand.setMountTimeout(NASBackupRestoreMountTimeout.value()); + restoreCommand.setWait(NASBackupRestoreTimeout.value()); + restoreCommand.setCacheMode(cacheMode); + restoreCommand.setVolumePaths(Collections.singletonList(String.format("%s/%s", pool.getPath(), volumeUUID))); + restoreCommand.setBackupFiles(getBackupFiles(Collections.singletonList(matchingVolume), backup)); + restoreCommand.setBackupFileChains(Collections.singletonList(String.join(";", getBackupFileChain(matchingVolume.getUuid(), backup)))); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(hostVO.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to restore backed up volume timed out, please try again"); + } + + if (answer.getResult()) { + try { + volumeDao.persist(restoredVolume); + } catch (Exception e) { + throw new CloudRuntimeException("Unable to create restored volume due to: " + e); + } + } + + return new Pair<>(answer.getResult(), answer.getDetails()); + } + + private BackupRepository getBackupRepository(Backup backup) { + BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException(String.format("No valid backup repository found for the backup %s, please check the attached backup offering", backup.getUuid())); + } + return backupRepository; + } + + private Backup.VolumeInfo getBackedUpVolumeInfo(List backedUpVolumes, String volumeUuid) { + return backedUpVolumes.stream() + .filter(v -> v.getUuid().equals(volumeUuid)) + .findFirst() + .orElse(null); + } + + @Override + public boolean deleteBackup(Backup backup, boolean forced) { + if (backup instanceof BackupVO && backup.getDetails() == null) { + backupDao.loadDetails((BackupVO) backup); + } + if (!forced && hasDependentBackups(backup)) { + throw new CloudRuntimeException(String.format("Backup [%s] cannot be deleted because one or more incremental backups depend on it.", backup.getUuid())); + } + + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException("No valid backup repository found for the VM, please check the attached backup offering"); + } + + final Host host; + final VirtualMachine vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); + if (vm != null) { + host = getVMHypervisorHost(vm); + } else { + host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, backup.getZoneId()); + } + + AblestackDeleteBackupCommand command = new AblestackDeleteBackupCommand(backup.getExternalId(), backupRepository.getType(), + backupRepository.getAddress(), backupRepository.getMountOptions(), forced); + command.setBackupProvider("ablestack-nas"); + command.setCheckpointName(getBackupDetail(backup, DETAIL_CHECKPOINT_NAME)); + command.setDiskPaths(getBackupDetail(backup, DETAIL_RBD_DISK_PATHS)); + + BackupAnswer answer; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), command); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to delete backup timed out, please try again"); + } + + if (answer != null && answer.getResult()) { + return true; + } + + logger.debug("There was an error removing the backup with id {}", backup.getId()); + return false; + } + + public void syncBackupMetrics(Long zoneId) { + } + + @Override + public List listRestorePoints(VirtualMachine vm) { + return null; + } + + @Override + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint restorePoint, VirtualMachine vm) { + return null; + } + + @Override + public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { + for (VMSnapshotVO vmSnapshotVO : vmSnapshotDao.findByVmAndByType(vm.getId(), VMSnapshot.Type.Disk)) { + List vmSnapshotDetails = vmSnapshotDetailsDao.listDetails(vmSnapshotVO.getId()); + if (vmSnapshotDetails.stream().anyMatch(vmSnapshotDetailsVO -> VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT.equals(vmSnapshotDetailsVO.getName()))) { + logger.warn("VM [{}] has VM snapshots using the KvmFileBasedStorageVmSnapshot Strategy; this provider does not support backups on VMs with these snapshots!"); + return false; + } + } + + return Hypervisor.HypervisorType.KVM.equals(vm.getHypervisorType()); + } + + @Override + public boolean removeVMFromBackupOffering(VirtualMachine vm) { + return true; + } + + @Override + public boolean willDeleteBackupsOnOfferingRemoval() { + return false; + } + + @Override + public boolean supportsInstanceFromBackup() { + return true; + } + + @Override + public boolean supportsMemoryVmSnapshot() { + return false; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); + Long totalSize = 0L; + Long usedSize = 0L; + for (final BackupRepository repository : repositories) { + if (repository.getCapacityBytes() != null) { + totalSize += repository.getCapacityBytes(); + } + if (repository.getUsedBytes() != null) { + usedSize += repository.getUsedBytes(); + } + } + return new Pair<>(usedSize, totalSize); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); + final Host host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + for (final BackupRepository repository : repositories) { + GetBackupStorageStatsCommand command = new GetBackupStorageStatsCommand(repository.getType(), repository.getAddress(), repository.getMountOptions()); + BackupStorageStatsAnswer answer; + try { + answer = (BackupStorageStatsAnswer) agentManager.send(host.getId(), command); + backupRepositoryDao.updateCapacity(repository, answer.getTotalSize(), answer.getUsedSize()); + } catch (AgentUnavailableException e) { + logger.warn("Unable to contact backend control plane to get backup stats for repository: {}", repository.getName()); + } catch (OperationTimedoutException e) { + logger.warn("Operation to get backup stats timed out for the repository: " + repository.getName()); + } + } + } + + @Override + public List listBackupOfferings(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); + final List offerings = new ArrayList<>(); + for (final BackupRepository repository : repositories) { + offerings.add(new AblestackNasBackupOffering(repository.getName(), repository.getUuid())); + } + return offerings; + } + + @Override + public boolean isValidProviderOffering(Long zoneId, String uuid) { + return true; + } + + @Override + public Boolean crossZoneInstanceCreationEnabled(BackupOffering backupOffering) { + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backupOffering.getId()); + if (backupRepository == null) { + throw new CloudRuntimeException("Backup repository not found for the backup offering" + backupOffering.getName()); + } + return Boolean.TRUE.equals(backupRepository.crossZoneInstanceCreationEnabled()); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + NASBackupRestoreMountTimeout, + NASBackupRestoreTimeout + }; + } + + @Override + public String getName() { + return "ablestack-nas"; + } + + @Override + public String getDescription() { + return "NAS Backup Plugin"; + } + + @Override + public String getConfigComponentName() { + return BackupService.class.getSimpleName(); + } + + @Override + public void syncBackups(VirtualMachine vm) { + } + + @Override + public boolean checkBackupAgent(final Long zoneId) { return true; } + + @Override + public boolean installBackupAgent(final Long zoneId) { return true; } + + @Override + public boolean importBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { return true; } + + @Override + public boolean updateBackupPlan(final Long zoneId, final String retentionPeriod, final String externalId) { return true; } + +} diff --git a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/module.properties b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/module.properties similarity index 90% rename from plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/module.properties rename to plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/module.properties index 1db48b423de7..5a64e71d2dfa 100644 --- a/plugins/backup/commvault/src/main/resources/META-INF/cloudstack/commvault/module.properties +++ b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/module.properties @@ -1,7 +1,7 @@ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file +# regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at @@ -14,5 +14,5 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -name=commvault +name=ablestack-nas parent=backup diff --git a/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/spring-backup-nas-context.xml b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/spring-backup-nas-context.xml new file mode 100644 index 000000000000..ecb71f96d19b --- /dev/null +++ b/plugins/backup/ablestack-nas/src/main/resources/META-INF/cloudstack/ablestack-nas/spring-backup-nas-context.xml @@ -0,0 +1,26 @@ + + + + + + + diff --git a/plugins/backup/ablestack-nas/src/test/java/org/apache/cloudstack/backup/AblestackNasBackupProviderTest.java b/plugins/backup/ablestack-nas/src/test/java/org/apache/cloudstack/backup/AblestackNasBackupProviderTest.java new file mode 100644 index 000000000000..2d6575209897 --- /dev/null +++ b/plugins/backup/ablestack-nas/src/test/java/org/apache/cloudstack/backup/AblestackNasBackupProviderTest.java @@ -0,0 +1,357 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.agent.AgentManager; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; + +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + +@RunWith(MockitoJUnitRunner.class) +public class AblestackNasBackupProviderTest { + @Spy + @InjectMocks + private AblestackNasBackupProvider ablestackNasBackupProvider; + + @Mock + private BackupDao backupDao; + + @Mock + private BackupRepositoryDao backupRepositoryDao; + + @Mock + private BackupOfferingDao backupOfferingDao; + + @Mock + private VMInstanceDao vmInstanceDao; + + @Mock + private AgentManager agentManager; + + @Mock + private VolumeDao volumeDao; + + @Mock + private HostDao hostDao; + + @Mock + private BackupManager backupManager; + + @Mock + private ResourceManager resourceManager; + + @Mock + private PrimaryDataStoreDao storagePoolDao; + + @Mock + private VMSnapshotDao vmSnapshotDaoMock; + + @Test + public void testDeleteBackup() throws OperationTimedoutException, AgentUnavailableException { + Long hostId = 1L; + BackupVO backup = new BackupVO(); + backup.setBackupOfferingId(1L); + backup.setVmId(1L); + backup.setExternalId("externalId"); + ReflectionTestUtils.setField(backup, "id", 1L); + + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L, null); + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + HostVO host = mock(HostVO.class); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(backupRepositoryDao.findByBackupOfferingId(1L)).thenReturn(backupRepository); + Mockito.when(vmInstanceDao.findByIdIncludingRemoved(1L)).thenReturn(vm); + Mockito.when(agentManager.send(anyLong(), Mockito.any(AblestackDeleteBackupCommand.class))).thenReturn(new BackupAnswer(new AblestackDeleteBackupCommand(null, null, null, null, true), true, "details")); + Mockito.when(backupDao.remove(1L)).thenReturn(true); + + boolean result = ablestackNasBackupProvider.deleteBackup(backup, true); + Assert.assertTrue(result); + Mockito.verify(agentManager).send(anyLong(), Mockito.argThat(AblestackDeleteBackupCommand::isForced)); + } + + @Test + public void testSyncBackupStorageStats() throws AgentUnavailableException, OperationTimedoutException { + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L, null); + + HostVO host = mock(HostVO.class); + Mockito.when(resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(host); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")).thenReturn(Collections.singletonList(backupRepository)); + GetBackupStorageStatsCommand command = new GetBackupStorageStatsCommand("nfs", "address", "sync"); + BackupStorageStatsAnswer answer = new BackupStorageStatsAnswer(command, true, null); + answer.setTotalSize(100L); + answer.setUsedSize(50L); + Mockito.when(agentManager.send(anyLong(), Mockito.any(GetBackupStorageStatsCommand.class))).thenReturn(answer); + + ablestackNasBackupProvider.syncBackupStorageStats(1L); + Mockito.verify(backupRepositoryDao, Mockito.times(1)).updateCapacity(backupRepository, 100L, 50L); + } + + @Test + public void testListBackupOfferings() { + BackupRepositoryVO backupRepository = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1024L, null); + ReflectionTestUtils.setField(backupRepository, "uuid", "uuid"); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")).thenReturn(Collections.singletonList(backupRepository)); + + List result = ablestackNasBackupProvider.listBackupOfferings(1L); + Assert.assertEquals(1, result.size()); + Assert.assertEquals("test-repo", result.get(0).getName()); + Assert.assertEquals("uuid", result.get(0).getUuid()); + } + + @Test + public void testGetBackupStorageStats() { + BackupRepositoryVO backupRepository1 = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 1000L, null); + backupRepository1.setUsedBytes(500L); + + BackupRepositoryVO backupRepository2 = new BackupRepositoryVO(1L, "nas", "test-repo", + "nfs", "address", "sync", 2000L, null); + backupRepository2.setUsedBytes(600L); + + Mockito.when(backupRepositoryDao.listByZoneAndProvider(1L, "nas")) + .thenReturn(List.of(backupRepository1, backupRepository2)); + + Pair result = ablestackNasBackupProvider.getBackupStorageStats(1L); + Assert.assertEquals(Long.valueOf(1100L), result.first()); + Assert.assertEquals(Long.valueOf(3000L), result.second()); + } + + @Test + public void takeBackupSuccessfully() throws AgentUnavailableException, OperationTimedoutException { + Long vmId = 1L; + Long hostId = 2L; + Long backupOfferingId = 3L; + Long accountId = 4L; + Long domainId = 5L; + Long zoneId = 6L; + Long backupId = 7L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getId()).thenReturn(vmId); + Mockito.when(vm.getHostId()).thenReturn(hostId); + Mockito.when(vm.getInstanceName()).thenReturn("test-vm"); + Mockito.when(vm.getBackupOfferingId()).thenReturn(backupOfferingId); + Mockito.when(vm.getAccountId()).thenReturn(accountId); + Mockito.when(vm.getDomainId()).thenReturn(domainId); + Mockito.when(vm.getDataCenterId()).thenReturn(zoneId); + Mockito.when(vm.getState()).thenReturn(VMInstanceVO.State.Running); + + BackupRepository backupRepository = mock(BackupRepository.class); + Mockito.when(backupRepository.getType()).thenReturn("nfs"); + Mockito.when(backupRepository.getAddress()).thenReturn("address"); + Mockito.when(backupRepository.getMountOptions()).thenReturn("sync"); + Mockito.when(backupRepositoryDao.findByBackupOfferingId(backupOfferingId)).thenReturn(backupRepository); + + HostVO host = mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(hostId); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + + VolumeVO volume1 = mock(VolumeVO.class); + Mockito.when(volume1.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume1.getSize()).thenReturn(100L); + VolumeVO volume2 = mock(VolumeVO.class); + Mockito.when(volume2.getState()).thenReturn(Volume.State.Ready); + Mockito.when(volume2.getSize()).thenReturn(200L); + Mockito.when(volumeDao.findByInstance(vmId)).thenReturn(List.of(volume1, volume2)); + + BackupAnswer answer = mock(BackupAnswer.class); + Mockito.when(answer.getResult()).thenReturn(true); + Mockito.when(answer.getSize()).thenReturn(100L); + Mockito.when(agentManager.send(anyLong(), Mockito.any(AblestackNasTakeBackupCommand.class))).thenReturn(answer); + + Mockito.when(backupDao.persist(Mockito.any(BackupVO.class))).thenAnswer(invocation -> invocation.getArgument(0)); + Mockito.when(backupDao.update(Mockito.anyLong(), Mockito.any(BackupVO.class))).thenReturn(true); + + Pair result = ablestackNasBackupProvider.takeBackup(vm, false); + + Assert.assertTrue(result.first()); + Assert.assertNotNull(result.second()); + BackupVO backup = (BackupVO) result.second(); + Assert.assertEquals(Optional.ofNullable(100L), Optional.ofNullable(backup.getSize())); + Assert.assertEquals(Backup.Status.BackedUp, backup.getStatus()); + Assert.assertEquals("FULL", backup.getType()); + Assert.assertEquals(Optional.of(300L), Optional.of(backup.getProtectedSize())); + Assert.assertEquals(Optional.of(backupOfferingId), Optional.of(backup.getBackupOfferingId())); + Assert.assertEquals(Optional.of(accountId), Optional.of(backup.getAccountId())); + Assert.assertEquals(Optional.of(domainId), Optional.of(backup.getDomainId())); + Assert.assertEquals(Optional.of(zoneId), Optional.of(backup.getZoneId())); + + Mockito.verify(backupDao).persist(Mockito.any(BackupVO.class)); + Mockito.verify(backupDao).update(Mockito.anyLong(), Mockito.any(BackupVO.class)); + Mockito.verify(agentManager).send(anyLong(), Mockito.any(AblestackNasTakeBackupCommand.class)); + } + + @Test + public void testGetVMHypervisorHost() { + Long hostId = 1L; + Long vmId = 1L; + Long zoneId = 1L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + + HostVO host = mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(hostId); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(hostId, result.getId())); + Mockito.verify(hostDao).findById(hostId); + } + + @Test + public void testGetVMHypervisorHostWithHostDown() { + Long hostId = 1L; + Long clusterId = 2L; + Long vmId = 1L; + Long zoneId = 1L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + + HostVO downHost = mock(HostVO.class); + Mockito.when(downHost.getStatus()).thenReturn(Status.Down); + Mockito.when(downHost.getClusterId()).thenReturn(clusterId); + Mockito.when(hostDao.findById(hostId)).thenReturn(downHost); + + HostVO upHostInCluster = mock(HostVO.class); + Mockito.when(upHostInCluster.getId()).thenReturn(3L); + Mockito.when(upHostInCluster.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(List.of(upHostInCluster)); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(Long.valueOf(3L), result.getId())); + Mockito.verify(hostDao).findById(hostId); + Mockito.verify(hostDao).findHypervisorHostInCluster(clusterId); + } + + @Test + public void testGetVMHypervisorHostWithUpHostViaRootVolumeCluster() { + Long vmId = 1L; + Long zoneId = 1L; + Long clusterId = 2L; + Long poolId = 3L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(null); + Mockito.when(vm.getId()).thenReturn(vmId); + + VolumeVO rootVolume = mock(VolumeVO.class); + Mockito.when(rootVolume.getPoolId()).thenReturn(poolId); + Mockito.when(volumeDao.getInstanceRootVolume(vmId)).thenReturn(rootVolume); + + StoragePoolVO storagePool = mock(StoragePoolVO.class); + Mockito.when(storagePool.getClusterId()).thenReturn(clusterId); + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(storagePool); + + HostVO upHostInCluster = mock(HostVO.class); + Mockito.when(upHostInCluster.getId()).thenReturn(4L); + Mockito.when(upHostInCluster.getStatus()).thenReturn(Status.Up); + Mockito.when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(List.of(upHostInCluster)); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(Long.valueOf(4L), result.getId())); + Mockito.verify(volumeDao).getInstanceRootVolume(vmId); + Mockito.verify(storagePoolDao).findById(poolId); + Mockito.verify(hostDao).findHypervisorHostInCluster(clusterId); + } + + @Test + public void testGetVMHypervisorHostFallbackToZoneWideKVMHost() { + Long hostId = 1L; + Long clusterId = 2L; + Long vmId = 1L; + Long zoneId = 1L; + + VMInstanceVO vm = mock(VMInstanceVO.class); + Mockito.when(vm.getLastHostId()).thenReturn(hostId); + Mockito.when(vm.getDataCenterId()).thenReturn(zoneId); + + HostVO downHost = mock(HostVO.class); + Mockito.when(downHost.getStatus()).thenReturn(Status.Down); + Mockito.when(downHost.getClusterId()).thenReturn(clusterId); + Mockito.when(hostDao.findById(hostId)).thenReturn(downHost); + + Mockito.when(hostDao.findHypervisorHostInCluster(clusterId)).thenReturn(Collections.emptyList()); + + HostVO fallbackHost = mock(HostVO.class); + Mockito.when(fallbackHost.getId()).thenReturn(5L); + Mockito.when(resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId)) + .thenReturn(fallbackHost); + + Host result = ablestackNasBackupProvider.getVMHypervisorHost(vm); + + Assert.assertNotNull(result); + Assert.assertTrue(Objects.equals(Long.valueOf(5L), result.getId())); + Mockito.verify(hostDao).findById(hostId); + Mockito.verify(hostDao).findHypervisorHostInCluster(clusterId); + Mockito.verify(resourceManager).findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 6350e5d28550..be862a947232 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -426,7 +426,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv private String vmActivityCheckPathRbd; private String vmActivityCheckPathClvm; private String nasBackupPath; - private String cvtBackupPath; + private String ableNasBackupPath; + private String ableCvtBackupPath; private String securityGroupPath; private String ovsPvlanDhcpHostPath; private String ovsPvlanVmPath; @@ -857,8 +858,12 @@ public String getNasBackupPath() { return nasBackupPath; } - public String getCvtBackupPath() { - return cvtBackupPath; + public String getAbleNasBackupPath() { + return ableNasBackupPath; + } + + public String getAbleCvtBackupPath() { + return ableCvtBackupPath; } public String getOvsPvlanDhcpHostPath() { @@ -1198,9 +1203,14 @@ public boolean configure(final String name, final Map params) th throw new ConfigurationException("Unable to find nasbackup.sh"); } - cvtBackupPath = Script.findScript(kvmScriptsDir, "cvtbackup.sh"); - if (cvtBackupPath == null) { - throw new ConfigurationException("Unable to find cvtbackup.sh"); + ableNasBackupPath = Script.findScript(kvmScriptsDir, "ablestack_nasbackup.sh"); + if (ableNasBackupPath == null) { + throw new ConfigurationException("Unable to find ablestack_nasbackup.sh"); + } + + ableCvtBackupPath = Script.findScript(kvmScriptsDir, "ablestack_cvtbackup.sh"); + if (ableCvtBackupPath == null) { + throw new ConfigurationException("Unable to find ablestack_cvtbackup.sh"); } createTmplPath = Script.findScript(storageScriptsDir, "createtmplt.sh"); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultBackupHelper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultBackupHelper.java new file mode 100644 index 000000000000..df663addd2ce --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultBackupHelper.java @@ -0,0 +1,361 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.hypervisor.kvm.resource.LibvirtConnection; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.AblestackCommvaultTakeBackupCommand; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.DomainInfo.DomainState; +import org.libvirt.LibvirtException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +class LibvirtAblestackCommvaultBackupHelper { + protected Logger LOGGER = LogManager.getLogger(LibvirtAblestackCommvaultBackupHelper.class); + static final Integer EXIT_CLEANUP_FAILED = 20; + private static final int BACKUP_JOB_POLL_INTERVAL_MS = 10000; + + enum BackupExecutionMode { + RUNNING("backup-running"), + STOPPED("backup-stopped"), + RBD("backup-rbd"); + + private final String scriptOperation; + + BackupExecutionMode(String scriptOperation) { + this.scriptOperation = scriptOperation; + } + + String getScriptOperation() { + return scriptOperation; + } + } + + private final LibvirtComputingResource resource; + + LibvirtAblestackCommvaultBackupHelper(LibvirtComputingResource resource) { + this.resource = resource; + } + + Pair executeBackup(AblestackCommvaultTakeBackupCommand command) { + List diskPaths = resolveDiskPaths(command.getVolumePools(), command.getVolumePaths()); + BackupExecutionMode executionMode = determineExecutionMode(command.getVmName(), command.getVolumePools()); + LOGGER.debug("Commvault backup execution mode=[{}], vm=[{}], backupType=[{}], diskPaths=[{}]", + executionMode, command.getVmName(), command.getBackupType(), diskPaths); + if (BackupExecutionMode.STOPPED.equals(executionMode)) { + return executeStoppedVmBackup(command, diskPaths); + } + + List commands = new ArrayList<>(); + String[] scriptCommand = buildBackupScriptCommand(command, diskPaths, executionMode); + LOGGER.debug("Executing Commvault backup script command=[{}]", String.join(" ", scriptCommand)); + commands.add(scriptCommand); + return Script.executePipedCommands(commands, resource.getCmdsTimeout()); + } + + List resolveDiskPaths(List volumePools, List volumePaths) { + List diskPaths = new ArrayList<>(); + if (volumePaths == null) { + return diskPaths; + } + + KVMStoragePoolManager storagePoolMgr = resource.getStoragePoolMgr(); + for (int idx = 0; idx < volumePaths.size(); idx++) { + PrimaryDataStoreTO volumePool = volumePools.get(idx); + String volumePath = volumePaths.get(idx); + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + diskPaths.add(volumePath); + continue; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + diskPaths.add(KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath)); + } + return diskPaths; + } + + private String[] buildBackupScriptCommand(AblestackCommvaultTakeBackupCommand command, List diskPaths, BackupExecutionMode executionMode) { + return new String[] { + resource.getAbleCvtBackupPath(), + "-o", executionMode.getScriptOperation(), + "-v", command.getVmName(), + "-p", command.getBackupPath(), + "-b", Objects.nonNull(command.getBackupType()) ? command.getBackupType() : "", + "-c", Objects.nonNull(command.getCheckpointName()) ? command.getCheckpointName() : "", + "-r", Objects.nonNull(command.getParentBackupPath()) ? command.getParentBackupPath() : "", + "-i", Objects.nonNull(command.getParentCheckpointName()) ? command.getParentCheckpointName() : "", + "-j", Objects.nonNull(command.getParentCheckpointPath()) ? command.getParentCheckpointPath() : "", + "-f", command.getBackupFiles() == null || command.getBackupFiles().isEmpty() ? "" : String.join(",", command.getBackupFiles()), + "-q", command.getQuiesce() != null && command.getQuiesce() ? "true" : "false", + "-d", diskPaths.isEmpty() ? "" : String.join(",", diskPaths) + }; + } + + private BackupExecutionMode determineExecutionMode(String vmName, List volumePools) { + if (volumePools != null && volumePools.stream().anyMatch(pool -> pool != null && pool.getPoolType() == Storage.StoragePoolType.RBD)) { + return BackupExecutionMode.RBD; + } + return isVmRunning(vmName) ? BackupExecutionMode.RUNNING : BackupExecutionMode.STOPPED; + } + + private boolean isVmRunning(String vmName) { + try { + Connect conn = LibvirtConnection.getConnectionByVmName(vmName); + Domain domain = resource.getDomain(conn, vmName); + return domain != null && DomainState.VIR_DOMAIN_RUNNING.equals(domain.getInfo().state); + } catch (LibvirtException e) { + return false; + } + } + + private Pair executeStoppedVmBackup(AblestackCommvaultTakeBackupCommand command, List diskPaths) { + String dummyVmName = String.format("DUMMY-VM-%s", command.getCheckpointName().replace('.', '-')); + Path dest = Path.of(command.getBackupPath()); + Connect conn = null; + try { + LOGGER.info("Starting stopped VM Commvault backup for vm=[{}], dummyVm=[{}], backupType=[{}]", + command.getVmName(), dummyVmName, command.getBackupType()); + validateStoppedBackupDiskPaths(diskPaths); + if (isIncremental(command)) { + resource.validateLibvirtAndQemuVersionForIncrementalSnapshots(); + } + Files.createDirectories(dest.resolve("checkpoints")); + + conn = LibvirtConnection.getConnection(); + String dummyVmXml = buildDummyVmXml(dummyVmName, diskPaths); + resource.startVM(conn, dummyVmName, dummyVmXml, Domain.CreateFlags.PAUSED); + + if (isIncremental(command) && command.getParentCheckpointPath() != null && !command.getParentCheckpointPath().isEmpty()) { + redefineCheckpointIfNeeded(dummyVmName, Path.of(command.getParentCheckpointPath())); + } + + List diskLabels = getDiskLabels(conn, dummyVmName); + Path backupXml = writeBackupXml(dest, command, diskLabels); + Path checkpointXml = writeCheckpointXml(dest, command, diskLabels); + + String backupBeginCommand = String.format("virsh -c qemu:///system backup-begin --domain %s --backupxml %s --checkpointxml %s", + shellQuote(dummyVmName), shellQuote(backupXml.toString()), shellQuote(checkpointXml.toString())); + LOGGER.debug("Starting stopped VM Commvault backup-begin command=[{}]", backupBeginCommand); + if (Script.runSimpleBashScriptForExitValue(backupBeginCommand, resource.getCmdsTimeout(), false) != 0) { + LOGGER.error("Failed to start backup for stopped VM Commvault dummy domain [{}]", dummyVmName); + return new Pair<>(1, "Failed to start backup for dummy VM " + dummyVmName); + } + + try { + waitForBackup(dummyVmName); + } catch (IOException e) { + cancelBackupJob(dummyVmName); + throw e; + } + + if (isIncremental(command) && command.getParentBackupPath() != null && !command.getParentBackupPath().isEmpty()) { + rebaseIncrementalChain(dest, command, diskPaths); + } + + dumpCheckpointXml(dummyVmName, command.getCheckpointName(), dest); + Files.deleteIfExists(backupXml); + Files.deleteIfExists(checkpointXml); + Script.runSimpleBashScriptForExitValue("sync", resource.getCmdsTimeout(), false); + LOGGER.info("Completed stopped VM Commvault backup for vm=[{}], dummyVm=[{}]", command.getVmName(), dummyVmName); + return new Pair<>(0, "success"); + } catch (Exception e) { + LOGGER.error("Stopped VM Commvault backup failed for vm=[{}], dummyVm=[{}] due to: {}", + command.getVmName(), dummyVmName, e.getMessage(), e); + return new Pair<>(1, e.getMessage()); + } finally { + cleanupDummyVm(dummyVmName); + } + } + + private String buildDummyVmXml(String vmName, List diskPaths) { + String arch = resource.getGuestCpuArch() != null ? resource.getGuestCpuArch() : "x86_64"; + String machine = resource.isGuestAarch64() ? LibvirtComputingResource.VIRT : LibvirtComputingResource.PC; + String emulator = resource.getHypervisorPath(); + StringBuilder xml = new StringBuilder(); + xml.append("") + .append("").append(vmName).append("") + .append("256") + .append("256") + .append("1") + .append("hvm") + .append("").append(emulator).append(""); + for (int i = 0; i < diskPaths.size(); i++) { + char letter = (char) ('a' + i); + String diskPath = diskPaths.get(i); + xml.append("") + .append("") + .append("") + .append(""); + } + xml.append(""); + return xml.toString(); + } + + private void validateStoppedBackupDiskPaths(List diskPaths) { + if (diskPaths.stream().anyMatch(path -> path != null && path.startsWith("rbd:"))) { + throw new IllegalArgumentException("Stopped VM dummy backup flow supports only file-backed disks. RBD backups must use the dedicated RBD backup path."); + } + } + + private void redefineCheckpointIfNeeded(String vmName, Path checkpointPath) throws IOException { + if (!Files.exists(checkpointPath)) { + return; + } + String checkpointName = checkpointPath.getFileName().toString().replace(".xml", ""); + int infoExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-info --domain %s --checkpointname %s > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointName))); + if (infoExit == 0) { + return; + } + int redefineExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-create --domain %s --xmlfile %s --redefine > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointPath.toString()))); + if (redefineExit != 0) { + throw new IOException("Failed to redefine checkpoint " + checkpointName + " on domain " + vmName); + } + } + + private List getDiskLabels(Connect conn, String vmName) { + return resource.getDisks(conn, vmName).stream() + .map(d -> d.getDiskLabel()) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + private Path writeBackupXml(Path dest, AblestackCommvaultTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder(""); + for (int i = 0; i < diskLabels.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("volume-%d.qcow2", i)); + xml.append("") + .append(""); + if (isIncremental(command) && command.getParentCheckpointName() != null && !command.getParentCheckpointName().isEmpty()) { + xml.append("").append(command.getParentCheckpointName()).append(""); + } + xml.append(""); + } + xml.append(""); + Path backupXml = dest.resolve("backup.xml"); + Files.writeString(backupXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return backupXml; + } + + private Path writeCheckpointXml(Path dest, AblestackCommvaultTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder("").append(command.getCheckpointName()).append(""); + for (String diskLabel : diskLabels) { + xml.append(""); + } + xml.append(""); + Path checkpointXml = dest.resolve("checkpoint.xml"); + Files.writeString(checkpointXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return checkpointXml; + } + + private void waitForBackup(String vmName) throws IOException { + int timeout = resource.getCmdsTimeout(); + while (timeout > 0) { + String result = checkBackupJob(vmName); + if (result != null && result.contains("Completed") && result.contains("Backup")) { + return; + } + if (result != null && result.contains("Failed")) { + throw new IOException("Virsh backup job failed for dummy VM " + vmName); + } + timeout -= BACKUP_JOB_POLL_INTERVAL_MS; + try { + Thread.sleep(BACKUP_JOB_POLL_INTERVAL_MS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } + } + throw new IOException("Timed out waiting for backup job of dummy VM " + vmName); + } + + private void cancelBackupJob(String vmName) { + Script.runSimpleBashScriptForExitValue(String.format("virsh -c qemu:///system domjobabort --domain %s > /dev/null 2>&1", shellQuote(vmName))); + } + + private String checkBackupJob(String vmName) { + return Script.runSimpleBashScriptWithFullResult( + String.format("virsh -c qemu:///system domjobinfo %s --completed --keep-completed", shellQuote(vmName)), 10); + } + + private void rebaseIncrementalChain(Path dest, AblestackCommvaultTakeBackupCommand command, List diskPaths) throws IOException { + for (int i = 0; i < diskPaths.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("volume-%d.qcow2", i)); + int exit = Script.runSimpleBashScriptForExitValue(String.format( + "qemu-img rebase -u -F qcow2 -b %s %s", + shellQuote(Path.of(command.getParentBackupPath(), backupFile).toString()), + shellQuote(dest.resolve(backupFile).toString())), resource.getCmdsTimeout(), false); + if (exit != 0) { + throw new IOException("qemu-img rebase failed for " + backupFile); + } + } + } + + private void dumpCheckpointXml(String vmName, String checkpointName, Path dest) { + Path checkpointDest = dest.resolve("checkpoints").resolve(checkpointName + ".xml"); + Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-dumpxml --domain %s --checkpointname %s --no-domain > %s 2>/dev/null", + shellQuote(vmName), shellQuote(checkpointName), shellQuote(checkpointDest.toString()))); + } + + private void cleanupDummyVm(String dummyVmName) { + Script.runSimpleBashScriptForExitValue(String.format("virsh -c qemu:///system destroy %s > /dev/null 2>&1 || true", shellQuote(dummyVmName))); + Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system undefine %s --nvram > /dev/null 2>&1 || virsh -c qemu:///system undefine %s > /dev/null 2>&1 || true", + shellQuote(dummyVmName), shellQuote(dummyVmName))); + } + + private boolean isIncremental(AblestackCommvaultTakeBackupCommand command) { + return "INCREMENTAL".equalsIgnoreCase(command.getBackupType()); + } + + private String getBackupFileByIndex(AblestackCommvaultTakeBackupCommand command, int index, String fallback) { + List backupFiles = command.getBackupFiles(); + if (backupFiles == null || index >= backupFiles.size()) { + return fallback; + } + return backupFiles.get(index); + } + + private String shellQuote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultRestoreBackupCommandWrapper.java new file mode 100644 index 000000000000..a407cec9916b --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultRestoreBackupCommandWrapper.java @@ -0,0 +1,828 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackCommvaultRestoreBackupCommand; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.StringUtils; +import org.libvirt.LibvirtException; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +@ResourceWrapper(handles = AblestackCommvaultRestoreBackupCommand.class) +public class LibvirtAblestackCommvaultRestoreBackupCommandWrapper extends CommandWrapper { + private static final String FILE_PATH_PLACEHOLDER = "%s/%s"; + private static final String ATTACH_QCOW2_DISK_COMMAND = " virsh attach-disk %s %s %s --driver qemu --subdriver qcow2 --cache none"; + private static final String ATTACH_RBD_DISK_XML_COMMAND = " virsh attach-device %s /dev/stdin < backedVolumeUUIDs = command.getBackupVolumesUUIDs(); + List backupFiles = command.getBackupFiles(); + List backupFileChains = command.getBackupFileChains(); + List restoreVolumePools = command.getRestoreVolumePools(); + List restoreVolumePaths = command.getRestoreVolumePaths(); + String restoreVolumeUuid = command.getRestoreVolumeUUID(); + int timeout = command.getTimeout(); + String cacheMode = command.getCacheMode(); + String hostName = command.getHostName(); + List backupSourceHosts = command.getBackupSourceHosts(); + KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); + + String newVolumeId = null; + try { + if (hostName != null) { + fetchBackupFile(hostName, backupPath); + } + if (backupSourceHosts != null && !backupSourceHosts.isEmpty()) { + LinkedHashSet sourceHosts = new LinkedHashSet<>(backupSourceHosts); + for (String sourceHost : sourceHosts) { + if (StringUtils.isBlank(sourceHost) || Objects.equals(sourceHost, hostName)) { + continue; + } + fetchBackupFile(sourceHost, backupPath); + } + } + if (Objects.isNull(vmExists)) { + PrimaryDataStoreTO volumePool = restoreVolumePools.get(0); + String volumePath = restoreVolumePaths.get(0); + int lastIndex = volumePath.lastIndexOf("/"); + newVolumeId = volumePath.substring(lastIndex + 1); + restoreVolume(storagePoolMgr, backupPath, volumePool, volumePath, diskType, restoreVolumeUuid, backupFiles, backupFileChains, + new Pair<>(vmName, command.getVmState()), timeout, cacheMode); + } else if (Boolean.TRUE.equals(vmExists)) { + restoreVolumesOfExistingVM(storagePoolMgr, restoreVolumePools, restoreVolumePaths, backedVolumeUUIDs, backupPath, backupFiles, backupFileChains, timeout); + } else { + restoreVolumesOfDestroyedVMs(storagePoolMgr, restoreVolumePools, restoreVolumePaths, vmName, backupPath, backupFiles, backupFileChains, timeout); + } + } catch (CloudRuntimeException e) { + String errorMessage = e.getMessage() != null ? e.getMessage() : ""; + return new BackupAnswer(command, false, errorMessage); + } + + return new BackupAnswer(command, true, newVolumeId); + } + + private void restoreVolumesOfExistingVM(KVMStoragePoolManager storagePoolMgr, List restoreVolumePools, List restoreVolumePaths, List backedVolumesUUIDs, + String backupPath, List backupFiles, List backupFileChains, int timeout) { + String diskType = "root"; + try { + for (int idx = 0; idx < restoreVolumePaths.size(); idx++) { + PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); + String restoreVolumePath = restoreVolumePaths.get(idx); + String backupVolumeUuid = backedVolumesUUIDs.get(idx); + List localBackupPaths = getLocalBackupPaths(backupPath, backupFiles, backupFileChains, idx, getLegacyBackupFileName(diskType, backupVolumeUuid)); + diskType = "datadisk"; + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, restoreVolumePath, localBackupPaths, timeout, backupPath, idx)) { + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", backupVolumeUuid)); + } + } + } finally { + deleteBackupDirectory(backupPath); + } + } + + private void restoreVolumesOfDestroyedVMs(KVMStoragePoolManager storagePoolMgr, List volumePools, List volumePaths, String vmName, String backupPath, + List backupFiles, List backupFileChains, int timeout) { + String diskType = "root"; + try { + for (int i = 0; i < volumePaths.size(); i++) { + PrimaryDataStoreTO volumePool = volumePools.get(i); + String volumePath = volumePaths.get(i); + String volumeUuid = volumePath.substring(volumePath.lastIndexOf(File.separator) + 1); + List localBackupPaths = getLocalBackupPaths(backupPath, backupFiles, backupFileChains, i, getLegacyBackupFileName(diskType, volumeUuid)); + diskType = "datadisk"; + if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, localBackupPaths, timeout, backupPath, i)) { + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", volumeUuid)); + } + } + } finally { + deleteBackupDirectory(backupPath); + } + } + + private void restoreVolume(KVMStoragePoolManager storagePoolMgr, String backupPath, PrimaryDataStoreTO volumePool, String volumePath, String diskType, String volumeUUID, + List backupFiles, List backupFileChains, + Pair vmNameAndState, int timeout, String cacheMode) { + try { + List localBackupPaths = getLocalBackupPaths(backupPath, backupFiles, backupFileChains, 0, getLegacyBackupFileName(diskType, volumeUUID)); + if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, localBackupPaths, timeout, backupPath, 0, true)) { + throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", volumeUUID)); + } + } finally { + deleteBackupDirectory(backupPath); + } + } + + private void deleteBackupDirectory(String backupDirectory) { + try { + FileUtils.deleteDirectory(new File(backupDirectory)); + } catch (IOException e) { + logger.error(String.format("Failed to delete backup directory: %s", backupDirectory), e); + throw new CloudRuntimeException("Failed to delete the backup directory"); + } + } + + private List getLocalBackupPaths(String backupPath, List backupFiles, List backupFileChains, int index, String legacyBackupFileName) { + List localPaths = new ArrayList<>(); + if (backupFileChains != null && backupFileChains.size() > index && StringUtils.isNotBlank(backupFileChains.get(index))) { + for (String chainPath : backupFileChains.get(index).split(";")) { + if (StringUtils.isBlank(chainPath)) { + continue; + } + localPaths.add(resolveBackupPath(backupPath, chainPath)); + } + } + if (localPaths.isEmpty() && backupFiles != null && backupFiles.size() > index && StringUtils.isNotBlank(backupFiles.get(index))) { + localPaths.add(resolveBackupPath(backupPath, backupFiles.get(index))); + } + if (localPaths.isEmpty()) { + localPaths.add(String.format(FILE_PATH_PLACEHOLDER, backupPath, legacyBackupFileName)); + } + return localPaths; + } + + private String resolveBackupPath(String backupPath, String chainPath) { + if (chainPath.startsWith("/")) { + return chainPath; + } + if (chainPath.contains("/")) { + return String.format(FILE_PATH_PLACEHOLDER, backupPath, chainPath); + } + return String.format(FILE_PATH_PLACEHOLDER, backupPath, chainPath); + } + + private String getLegacyBackupFileName(String diskType, String volumeUuid) { + return String.format("%s.%s.qcow2", diskType.toLowerCase(Locale.ROOT), volumeUuid); + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex) { + return replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, backupRootPath, backupIndex, false); + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex, boolean createTargetVolume) { + if (backupPaths == null || backupPaths.isEmpty()) { + return false; + } + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChainToFileVolume(volumePath, backupPaths, timeout, backupRootPath, backupIndex); + } + return replaceFileVolumeWithBackup(volumePath, getLastExistingBackupPath(backupPaths), timeout); + } + + return replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + private boolean restoreIncrementalRbdBackupChainToFileVolume(String volumePath, List backupPaths, int timeout, String backupRootPath, int backupIndex) { + if (StringUtils.isBlank(backupRootPath)) { + throw new CloudRuntimeException("Unable to locate backup root path for incremental RBD restore"); + } + RbdImageSpec sourceImage = getRbdImageSpecFromMetadata(backupRootPath, backupIndex); + String tempImage = sourceImage.buildTempImageSpec(); + try { + if (!importBackupChainToTemporaryRbd(backupPaths, timeout, sourceImage, tempImage)) { + return false; + } + return convertTemporaryRbdToFileVolume(volumePath, timeout, sourceImage, tempImage); + } finally { + removeTemporaryRbdImage(sourceImage, tempImage, timeout); + } + } + + private String getFirstExistingBackupPath(List backupPaths) { + for (String backupPath : backupPaths) { + if (StringUtils.isNotBlank(backupPath) && Files.exists(Paths.get(backupPath))) { + return backupPath; + } + } + return backupPaths.get(0); + } + + private String getLastExistingBackupPath(List backupPaths) { + for (int i = backupPaths.size() - 1; i >= 0; i--) { + String backupPath = backupPaths.get(i); + if (StringUtils.isNotBlank(backupPath) && Files.exists(Paths.get(backupPath))) { + return backupPath; + } + } + return backupPaths.get(backupPaths.size() - 1); + } + + private boolean replaceFileVolumeWithBackup(String volumePath, String backupPath, int timeout) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private boolean convertTemporaryRbdToFileVolume(String volumePath, int timeout, RbdImageSpec sourceImage, String tempImage) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(sourceImage.buildQemuUri(tempImage), QemuImg.PhysicalDiskFormat.RAW); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : tempImage; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : volumePath; + logger.error("Failed to convert temporary RBD {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private QemuImg.PhysicalDiskFormat getBackupFileFormat(String backupPath) { + if (backupPath.endsWith(".raw")) { + return QemuImg.PhysicalDiskFormat.RAW; + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private QemuImg.PhysicalDiskFormat getFileVolumeFormat(String volumePath) { + if (!Files.exists(Paths.get(volumePath))) { + return QemuImg.PhysicalDiskFormat.QCOW2; + } + try { + QemuImg qemu = new QemuImg(0); + java.util.Map info = qemu.info(new QemuImgFile(volumePath)); + String format = info.get("file_format"); + if (StringUtils.isNotBlank(format)) { + return QemuImg.PhysicalDiskFormat.valueOf(format.toUpperCase(Locale.ROOT)); + } + } catch (QemuImgException | LibvirtException | IllegalArgumentException e) { + logger.warn("Failed to detect file volume format for path {}. Falling back to qcow2.", volumePath, e); + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private boolean replaceRbdVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, boolean createTargetVolume) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChain(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + String backupPath = getFirstExistingBackupPath(backupPaths); + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, volumeStoragePool); + if (getBackupFileFormat(backupPath) == QemuImg.PhysicalDiskFormat.RAW) { + return importRawBackupToRbd(volumeStoragePool, normalizedVolumePath, backupPath, timeout, createTargetVolume); + } + + QemuImg qemu; + try { + qemu = new QemuImg(timeout * 1000, true, false); + if (!createTargetVolume) { + KVMPhysicalDisk rdbDisk = volumeStoragePool.getPhysicalDisk(normalizedVolumePath); + logger.debug("Restoring RBD volume: {}", rdbDisk.toString()); + qemu.setSkipTargetVolumeCreation(true); + } + } catch (LibvirtException ex) { + throw new CloudRuntimeException("Failed to create qemu-img command to restore RBD volume with backup", ex); + } + + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, normalizedVolumePath); + destVolumeFile = new QemuImgFile(rbdDestVolumeFile, QemuImg.PhysicalDiskFormat.RAW); + + logger.debug("Starting convert backup {} to RBD volume {}", backupPath, normalizedVolumePath); + qemu.convert(srcBackupFile, destVolumeFile); + logger.debug("Successfully converted backup {} to RBD volume {}", backupPath, normalizedVolumePath); + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + + return true; + } + + private boolean importRawBackupToRbd(KVMStoragePool volumeStoragePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { + if (!createTargetVolume && !volumeStoragePool.deletePhysicalDisk(volumePath, Storage.ImageFormat.RAW)) { + logger.error("Failed to delete existing RBD volume {} before raw import", volumePath); + return false; + } + + String importCommand = buildRbdImportCommand(volumeStoragePool, backupPath, volumePath); + if (Script.runSimpleBashScriptForExitValue(importCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import raw backup {} into volume {}", backupPath, volumePath); + return false; + } + return true; + } + + private boolean restoreIncrementalRbdBackupChain(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, + int timeout, boolean createTargetVolume) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid())); + if (!replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, normalizedVolumePath, List.of(backupPaths.get(0)), timeout, createTargetVolume)) { + return false; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(volumeStoragePool, normalizedVolumePath, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on volume %s", parentCheckpoint, normalizedVolumePath)); + } + String importDiffCommand = buildRbdImportDiffCommand(volumeStoragePool, backupPath, normalizedVolumePath); + if (Script.runSimpleBashScriptForExitValue(importDiffCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import RBD diff {} into volume {}", backupPath, normalizedVolumePath); + return false; + } + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(volumeStoragePool, normalizedVolumePath, restoreSnapshots, timeout); + } + } + + private String normalizeRbdVolumePath(String volumePath, KVMStoragePool storagePool) { + if (StringUtils.isBlank(volumePath)) { + return volumePath; + } + String normalized = volumePath; + String poolPath = storagePool.getSourceDir(); + if (StringUtils.isNotBlank(poolPath)) { + String poolPrefix = poolPath + "/"; + if (normalized.startsWith(poolPrefix)) { + normalized = normalized.substring(poolPrefix.length()); + } + } + if (normalized.startsWith("/")) { + normalized = normalized.substring(normalized.lastIndexOf('/') + 1); + } + return normalized; + } + + private String buildRbdImportDiffCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import-diff ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String buildRbdImportCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String formatRbdMonHosts(String hosts, int port) { + String[] hostValues = hosts.split(","); + List formattedHosts = new ArrayList<>(); + for (String host : hostValues) { + String normalizedHost = host.replace("[", "").replace("]", "").trim(); + if (StringUtils.isBlank(normalizedHost)) { + continue; + } + formattedHosts.add(port > 0 ? normalizedHost + ":" + port : normalizedHost); + } + return String.join(",", formattedHosts); + } + + private boolean importBackupChainToTemporaryRbd(List backupPaths, int timeout, RbdImageSpec sourceImage, String tempImage) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + String importCommand = sourceImage.buildRbdCommand("import", quote(backupPaths.get(0)), quote(tempImage)); + if (Script.runSimpleBashScriptForExitValue(importCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import base RBD backup {} into temporary image {}", backupPaths.get(0), tempImage); + return false; + } + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(sourceImage, tempImage, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(sourceImage, tempImage, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on temporary image %s", parentCheckpoint, tempImage)); + } + String importDiffCommand = sourceImage.buildRbdCommand("import-diff", quote(backupPath), quote(tempImage)); + if (Script.runSimpleBashScriptForExitValue(importDiffCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import RBD diff {} into temporary image {}", backupPath, tempImage); + return false; + } + if (!ensureRbdSnapshotExists(sourceImage, tempImage, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(sourceImage, tempImage, restoreSnapshots, timeout); + } + } + + private Map readRbdBackupMetadata(String backupPath) { + java.nio.file.Path metadataPath = Paths.get(backupPath).getParent().resolve("rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + return Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private boolean ensureRbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + if (rbdSnapshotExists(storagePool, volumePath, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap create", volumePath + "@" + snapshotName); + if (Script.runSimpleBashScriptForExitValue(createSnapshotCommand, timeout * 1000, false) != 0) { + logger.error("Failed to create RBD snapshot {} on volume {}", snapshotName, volumePath); + return false; + } + return true; + } + + private boolean ensureRbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + if (rbdSnapshotExists(imageSpec, image, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = imageSpec.buildRbdCommand("snap", "create", quote(image + "@" + snapshotName)); + if (Script.runSimpleBashScriptForExitValue(createSnapshotCommand, timeout * 1000, false) != 0) { + logger.error("Failed to create RBD snapshot {} on image {}", snapshotName, image); + return false; + } + return true; + } + + private boolean rbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + String existsCommand = buildRbdSnapshotCommand(storagePool, "snap ls", volumePath) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private boolean rbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + String existsCommand = imageSpec.buildRbdCommand("snap", "ls", quote(image)) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private void cleanupRbdRestoreSnapshots(KVMStoragePool storagePool, String volumePath, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap rm", volumePath + "@" + snapshotName); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private void cleanupRbdRestoreSnapshots(RbdImageSpec imageSpec, String image, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = imageSpec.buildRbdCommand("snap", "rm", quote(image + "@" + snapshotName)); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private String buildRbdSnapshotCommand(KVMStoragePool storagePool, String action, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" ").append(action).append(" ").append(target); + return command.toString(); + } + + private void removeTemporaryRbdImage(RbdImageSpec sourceImage, String tempImage, int timeout) { + String removeCommand = sourceImage.buildRbdCommand("rm", quote(tempImage)); + Script.runSimpleBashScriptForExitValue(removeCommand, timeout * 1000, false); + } + + private RbdImageSpec getRbdImageSpecFromMetadata(String backupRootPath, int backupIndex) { + java.nio.file.Path metadataPath = Paths.get(backupRootPath, "rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + java.util.Map metadata = Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + String diskPaths = metadata.get("disk_paths"); + if (StringUtils.isBlank(diskPaths)) { + throw new CloudRuntimeException("RBD backup metadata does not contain disk_paths"); + } + List values = Arrays.asList(diskPaths.split(",")); + if (backupIndex >= values.size()) { + throw new CloudRuntimeException(String.format("RBD backup metadata does not contain disk path for index %d", backupIndex)); + } + return RbdImageSpec.fromUri(values.get(backupIndex)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private String quote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + + private boolean attachVolumeToVm(KVMStoragePoolManager storagePoolMgr, String vmName, PrimaryDataStoreTO volumePool, String volumePath, String cacheMode) { + String deviceToAttachDiskTo = getDeviceToAttachDisk(vmName); + int exitValue; + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_QCOW2_DISK_COMMAND, vmName, volumePath, deviceToAttachDiskTo)); + } else { + String xmlForRbdDisk = getXmlForRbdDisk(storagePoolMgr, volumePool, volumePath, deviceToAttachDiskTo, cacheMode); + logger.debug("RBD disk xml to attach: {}", xmlForRbdDisk); + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_RBD_DISK_XML_COMMAND, vmName, xmlForRbdDisk)); + } + return exitValue == 0; + } + + private String getDeviceToAttachDisk(String vmName) { + String currentDevice = Script.runSimpleBashScript(String.format(CURRRENT_DEVICE, vmName)); + char lastChar = currentDevice.charAt(currentDevice.length() - 1); + char incrementedChar = (char) (lastChar + 1); + return currentDevice.substring(0, currentDevice.length() - 1) + incrementedChar; + } + + private String getXmlForRbdDisk(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String deviceToAttachDiskTo, String cacheMode) { + StringBuilder diskBuilder = new StringBuilder(); + diskBuilder.append("\n\n"); + + diskBuilder.append(" \n"); + + diskBuilder.append("\n"); + for (String sourceHost : volumePool.getHost().split(",")) { + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + String authUserName = null; + final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + if (primaryPool != null) { + authUserName = primaryPool.getAuthUserName(); + } + if (StringUtils.isNotBlank(authUserName)) { + diskBuilder.append("\n"); + diskBuilder.append("\n"); + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + diskBuilder.append("\n"); + return diskBuilder.toString(); + } + + private void fetchBackupFile(String hostName, String backupPath) { + int mkdirExit = Script.runSimpleBashScriptForExitValue(String.format(MKDIR_P, backupPath)); + if (mkdirExit != 0) { + throw new CloudRuntimeException(String.format("Failed to create local backup directory: %s", backupPath)); + } + + String cmd = String.format(RSYNC_DIR_FROM_REMOTE, hostName, backupPath, backupPath); + logger.debug("Fetching commvault backup directory from remote host. cmd={}", cmd); + + int exit = Script.runSimpleBashScriptForExitValue(cmd); + if (exit != 0) { + throw new CloudRuntimeException(String.format( + "Failed to fetch backup directory from remote host [%s]. remotePath=[%s], localPath=[%s]", + hostName, backupPath, backupPath)); + } + } + + private static final class RbdImageSpec { + private final String image; + private final String monHost; + private final String user; + private final String key; + + private RbdImageSpec(String image, String monHost, String user, String key) { + this.image = image; + this.monHost = monHost; + this.user = user; + this.key = key; + } + + private static RbdImageSpec fromUri(String uri) { + String image = null; + String monHost = null; + String user = null; + String key = null; + if (uri.startsWith("rbd:")) { + String payload = uri.substring("rbd:".length()); + image = payload.contains(":") ? payload.substring(0, payload.indexOf(':')) : payload; + monHost = extract(uri, ":mon_host=([^:]*)"); + if (monHost != null) { + monHost = monHost.replace("\\;", ",").replace("\\:", ":"); + } + user = extract(uri, ":id=([^:]*)"); + key = extract(uri, ":key=([^:]*)"); + } else if (uri.startsWith("rbd/")) { + image = uri; + } + if (StringUtils.isBlank(image)) { + throw new CloudRuntimeException(String.format("Unable to parse RBD disk path: %s", uri)); + } + return new RbdImageSpec(image, monHost, user, key); + } + + private static String extract(String value, String regex) { + java.util.regex.Matcher matcher = java.util.regex.Pattern.compile(regex).matcher(value); + return matcher.find() ? matcher.group(1) : null; + } + + private String buildTempImageSpec() { + return String.format("%s-csrestore-%s", image, org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric(8).toLowerCase(Locale.ROOT)); + } + + private String buildRbdCommand(String action, String source, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(monHost)) { + command.append(" -m ").append(quoteArg(monHost)); + } + if (StringUtils.isNotBlank(user)) { + command.append(" --id ").append(quoteArg(user)); + } + if (StringUtils.isNotBlank(key)) { + command.append(" --key ").append(quoteArg(key)); + } + command.append(" ").append(action); + if (StringUtils.isNotBlank(source)) { + command.append(" ").append(source); + } + if (StringUtils.isNotBlank(target)) { + command.append(" ").append(target); + } + return command.toString(); + } + + private String buildRbdCommand(String action, String target) { + return buildRbdCommand(action, null, target); + } + + private String buildQemuUri(String imageSpec) { + StringBuilder uri = new StringBuilder("rbd:").append(imageSpec); + if (StringUtils.isNotBlank(monHost)) { + uri.append(":mon_host=").append(monHost.replace(",", "\\;")); + } + if (StringUtils.isNotBlank(user)) { + uri.append(":id=").append(user); + } + if (StringUtils.isNotBlank(key)) { + uri.append(":key=").append(key); + } + return uri.toString(); + } + + private String quoteArg(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultTakeBackupCommandWrapper.java new file mode 100644 index 000000000000..1509cae11cc9 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackCommvaultTakeBackupCommandWrapper.java @@ -0,0 +1,50 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackCommvaultTakeBackupCommand; + +@ResourceWrapper(handles = AblestackCommvaultTakeBackupCommand.class) +public class LibvirtAblestackCommvaultTakeBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(AblestackCommvaultTakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + LibvirtAblestackCommvaultBackupHelper backupHelper = new LibvirtAblestackCommvaultBackupHelper(libvirtComputingResource); + Pair result = backupHelper.executeBackup(command); + + if (result.first() != 0) { + logger.debug("Failed to take VM backup"); + BackupAnswer answer = new BackupAnswer(command, false, null); + if (result.first() == LibvirtAblestackCommvaultBackupHelper.EXIT_CLEANUP_FAILED) { + logger.debug("Backup cleanup failed"); + answer.setNeedsCleanup(true); + } + return answer; + } + + BackupAnswer answer = new BackupAnswer(command, true, "success"); + return answer; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackDeleteBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackDeleteBackupCommandWrapper.java new file mode 100644 index 000000000000..7355a4bf56e7 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackDeleteBackupCommandWrapper.java @@ -0,0 +1,103 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackDeleteBackupCommand; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.List; + +@ResourceWrapper(handles = AblestackDeleteBackupCommand.class) +public class LibvirtAblestackDeleteBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(AblestackDeleteBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + final String backupPath = command.getBackupPath(); + final String backupRepoType = command.getBackupRepoType(); + final String backupRepoAddress = command.getBackupRepoAddress(); + final String mountOptions = command.getMountOptions(); + final String backupProvider = command.getBackupProvider(); + final String checkpointName = command.getCheckpointName(); + final String diskPaths = command.getDiskPaths(); + final boolean forced = command.isForced(); + + List commands = new ArrayList<>(); + if ("ablestack-commvault".equalsIgnoreCase(backupProvider)) { + List deleteCommand = new ArrayList<>(); + deleteCommand.add(libvirtComputingResource.getAbleCvtBackupPath()); + deleteCommand.add("-o"); + deleteCommand.add("delete"); + deleteCommand.add("-p"); + deleteCommand.add(backupPath); + deleteCommand.add("-x"); + deleteCommand.add(Boolean.toString(forced)); + if (StringUtils.isNotBlank(checkpointName)) { + deleteCommand.add("-c"); + deleteCommand.add(checkpointName); + } + if (StringUtils.isNotBlank(diskPaths)) { + deleteCommand.add("-d"); + deleteCommand.add(diskPaths); + } + commands.add(deleteCommand.toArray(new String[0])); + } else { + List deleteCommand = new ArrayList<>(); + deleteCommand.add(libvirtComputingResource.getAbleNasBackupPath()); + deleteCommand.add("-o"); + deleteCommand.add("delete"); + deleteCommand.add("-t"); + deleteCommand.add(backupRepoType); + deleteCommand.add("-s"); + deleteCommand.add(backupRepoAddress); + deleteCommand.add("-m"); + deleteCommand.add(mountOptions); + deleteCommand.add("-p"); + deleteCommand.add(backupPath); + deleteCommand.add("-x"); + deleteCommand.add(Boolean.toString(forced)); + if (StringUtils.isNotBlank(checkpointName)) { + deleteCommand.add("-c"); + deleteCommand.add(checkpointName); + } + if (StringUtils.isNotBlank(diskPaths)) { + deleteCommand.add("-d"); + deleteCommand.add(diskPaths); + } + commands.add(deleteCommand.toArray(new String[0])); + } + + Pair result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout()); + + logger.debug(String.format("Backup delete result: %s , exit code: %s", result.second(), result.first())); + + if (result.first() != 0) { + logger.debug(String.format("Failed to delete VM backup: %s", result.second())); + return new BackupAnswer(command, false, result.second()); + } + return new BackupAnswer(command, true, null); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasBackupHelper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasBackupHelper.java new file mode 100644 index 000000000000..391eb067717f --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasBackupHelper.java @@ -0,0 +1,466 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.amazonaws.util.CollectionUtils; +import com.cloud.hypervisor.kvm.resource.LibvirtConnection; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.AblestackNasTakeBackupCommand; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.libvirt.Connect; +import org.libvirt.Domain; +import org.libvirt.DomainInfo.DomainState; +import org.libvirt.LibvirtException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +class LibvirtAblestackNasBackupHelper { + protected Logger LOGGER = LogManager.getLogger(LibvirtAblestackNasBackupHelper.class); + static final Integer EXIT_CLEANUP_FAILED = 20; + private static final int BACKUP_JOB_POLL_INTERVAL_MS = 10000; + + enum BackupExecutionMode { + RUNNING("backup-running"), + STOPPED("backup-stopped"), + RBD("backup-rbd"); + + private final String scriptOperation; + + BackupExecutionMode(String scriptOperation) { + this.scriptOperation = scriptOperation; + } + + String getScriptOperation() { + return scriptOperation; + } + } + + private final LibvirtComputingResource resource; + + LibvirtAblestackNasBackupHelper(LibvirtComputingResource resource) { + this.resource = resource; + } + + Pair executeBackup(AblestackNasTakeBackupCommand command) { + LOGGER.info("LibvirtNasBackupHelper executeBackup entered for vm=[{}], backupPath=[{}], backupType=[{}]", + command.getVmName(), command.getBackupPath(), command.getBackupType()); + List diskPaths = resolveDiskPaths(command.getVolumePools(), command.getVolumePaths()); + BackupExecutionMode executionMode = determineExecutionMode(command.getVmName(), command.getVolumePools()); + LOGGER.debug("NAS backup execution mode=[{}], vm=[{}], backupType=[{}], diskPaths=[{}]", + executionMode, command.getVmName(), command.getBackupType(), diskPaths); + if (BackupExecutionMode.STOPPED.equals(executionMode)) { + return executeStoppedVmBackup(command, diskPaths); + } + List commands = new ArrayList<>(); + String[] scriptCommand = buildBackupScriptCommand(command, diskPaths, executionMode); + LOGGER.debug("Executing NAS backup script command=[{}]", String.join(" ", scriptCommand)); + commands.add(scriptCommand); + return Script.executePipedCommands(commands, resource.getCmdsTimeout()); + } + + List resolveDiskPaths(List volumePools, List volumePaths) { + List diskPaths = new ArrayList<>(); + if (Objects.isNull(volumePaths)) { + return diskPaths; + } + + KVMStoragePoolManager storagePoolMgr = resource.getStoragePoolMgr(); + for (int idx = 0; idx < volumePaths.size(); idx++) { + PrimaryDataStoreTO volumePool = volumePools.get(idx); + String volumePath = volumePaths.get(idx); + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + diskPaths.add(volumePath); + continue; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + diskPaths.add(KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath)); + } + + return diskPaths; + } + + long parseBackupSize(String output, List diskPaths) { + if (CollectionUtils.isNullOrEmpty(diskPaths)) { + List outputLines = Arrays.asList(output.trim().split("\n")); + if (!outputLines.isEmpty()) { + return Long.parseLong(outputLines.get(outputLines.size() - 1).trim()); + } + return 0L; + } + + long backupSize = 0L; + String[] outputLines = output.trim().split("\n"); + for (String line : outputLines) { + backupSize += Long.parseLong(line.split(" ")[0].trim()); + } + return backupSize; + } + + private String[] buildBackupScriptCommand(AblestackNasTakeBackupCommand command, List diskPaths, BackupExecutionMode executionMode) { + return new String[] { + resource.getAbleNasBackupPath(), + "-o", executionMode.getScriptOperation(), + "-v", command.getVmName(), + "-t", command.getBackupRepoType(), + "-s", command.getBackupRepoAddress(), + "-m", Objects.nonNull(command.getMountOptions()) ? command.getMountOptions() : "", + "-p", command.getBackupPath(), + "-b", Objects.nonNull(command.getBackupType()) ? command.getBackupType() : "", + "-c", Objects.nonNull(command.getCheckpointName()) ? command.getCheckpointName() : "", + "-r", Objects.nonNull(command.getParentBackupPath()) ? command.getParentBackupPath() : "", + "-i", Objects.nonNull(command.getParentCheckpointName()) ? command.getParentCheckpointName() : "", + "-j", Objects.nonNull(command.getParentCheckpointPath()) ? command.getParentCheckpointPath() : "", + "-q", command.getQuiesce() != null && command.getQuiesce() ? "true" : "false", + "-f", CollectionUtils.isNullOrEmpty(command.getBackupFiles()) ? "" : String.join(",", command.getBackupFiles()), + "-d", diskPaths.isEmpty() ? "" : String.join(",", diskPaths) + }; + } + + private BackupExecutionMode determineExecutionMode(String vmName, List volumePools) { + if (hasRbdVolumes(volumePools)) { + return BackupExecutionMode.RBD; + } + return isVmRunning(vmName) ? BackupExecutionMode.RUNNING : BackupExecutionMode.STOPPED; + } + + private boolean hasRbdVolumes(List volumePools) { + if (CollectionUtils.isNullOrEmpty(volumePools)) { + return false; + } + return volumePools.stream().anyMatch(pool -> pool != null && pool.getPoolType() == Storage.StoragePoolType.RBD); + } + + private boolean isVmRunning(String vmName) { + try { + Connect conn = LibvirtConnection.getConnectionByVmName(vmName); + Domain domain = resource.getDomain(conn, vmName); + return domain != null && DomainState.VIR_DOMAIN_RUNNING.equals(domain.getInfo().state); + } catch (LibvirtException e) { + return false; + } + } + + private Pair executeStoppedVmBackup(AblestackNasTakeBackupCommand command, List diskPaths) { + Path mountPoint = null; + String dummyVmName = String.format("DUMMY-VM-%s", command.getCheckpointName().replace('.', '-')); + Connect conn = null; + try { + LOGGER.info("Starting stopped VM NAS backup for vm=[{}], dummyVm=[{}], backupType=[{}]", + command.getVmName(), dummyVmName, command.getBackupType()); + validateStoppedBackupDiskPaths(diskPaths); + if (isIncremental(command)) { + resource.validateLibvirtAndQemuVersionForIncrementalSnapshots(); + } + mountPoint = mountRepository(command); + Path dest = mountPoint.resolve(command.getBackupPath()); + Files.createDirectories(dest.resolve("checkpoints")); + + conn = LibvirtConnection.getConnection(); + String dummyVmXml = buildDummyVmXml(dummyVmName, diskPaths, conn); + resource.startVM(conn, dummyVmName, dummyVmXml, Domain.CreateFlags.PAUSED); + + if (isIncremental(command) && command.getParentCheckpointPath() != null && !command.getParentCheckpointPath().isEmpty()) { + redefineCheckpointIfNeeded(dummyVmName, mountPoint.resolve(command.getParentCheckpointPath())); + } + + List diskLabels = getDiskLabels(conn, dummyVmName); + Path backupXml = writeBackupXml(dest, command, diskLabels); + Path checkpointXml = writeCheckpointXml(dest, command, diskLabels); + + String backupBeginCommand = String.format("virsh -c qemu:///system backup-begin --domain %s --backupxml %s --checkpointxml %s", + shellQuote(dummyVmName), shellQuote(backupXml.toString()), shellQuote(checkpointXml.toString())); + LOGGER.debug("Starting stopped VM NAS backup-begin command=[{}]", backupBeginCommand); + if (Script.runSimpleBashScriptForExitValue(backupBeginCommand, resource.getCmdsTimeout(), false) != 0) { + LOGGER.error("Failed to start backup for stopped VM dummy domain [{}]", dummyVmName); + return new Pair<>(1, "Failed to start backup for dummy VM " + dummyVmName); + } + + try { + waitForBackup(dummyVmName); + } catch (IOException e) { + cancelBackupJob(dummyVmName); + throw e; + } + + if (isIncremental(command) && command.getParentBackupPath() != null && !command.getParentBackupPath().isEmpty()) { + rebaseIncrementalChain(dest, command, diskPaths); + } + + dumpCheckpointXml(dummyVmName, command.getCheckpointName(), dest); + backupDomainInformation(dummyVmName, dest, command, diskPaths); + + Files.deleteIfExists(backupXml); + Files.deleteIfExists(checkpointXml); + runCommand(String.format("sync")); + String output = listTopLevelFileSizes(dest); + LOGGER.info("Completed stopped VM NAS backup for vm=[{}], dummyVm=[{}]", command.getVmName(), dummyVmName); + return new Pair<>(0, output); + } catch (Exception e) { + LOGGER.error("Stopped VM NAS backup failed for vm=[{}], dummyVm=[{}] due to: {}", + command.getVmName(), dummyVmName, e.getMessage(), e); + return new Pair<>(1, e.getMessage()); + } finally { + cleanupDummyVm(dummyVmName); + unmountRepository(mountPoint); + } + } + + private Path mountRepository(AblestackNasTakeBackupCommand command) throws IOException { + Path mountPoint = Files.createTempDirectory("csbackup."); + StringBuilder mount = new StringBuilder() + .append("mount -t ").append(shellQuote(command.getBackupRepoType())) + .append(" ").append(shellQuote(command.getBackupRepoAddress())) + .append(" ").append(shellQuote(mountPoint.toString())); + if (command.getMountOptions() != null && !command.getMountOptions().isEmpty()) { + mount.append(" -o ").append(shellQuote(command.getMountOptions())); + } + if (Script.runSimpleBashScriptForExitValue(mount.toString(), resource.getCmdsTimeout(), false) != 0) { + throw new IOException("Failed to mount backup repository"); + } + return mountPoint; + } + + private void unmountRepository(Path mountPoint) { + if (mountPoint == null) { + return; + } + Script.runSimpleBashScriptForExitValue(String.format("umount %s", shellQuote(mountPoint.toString()))); + try { + Files.deleteIfExists(mountPoint); + } catch (IOException ignored) { + } + } + + private String buildDummyVmXml(String vmName, List diskPaths, Connect conn) throws LibvirtException { + String arch = resource.getGuestCpuArch() != null ? resource.getGuestCpuArch() : "x86_64"; + String machine = resource.isGuestAarch64() ? LibvirtComputingResource.VIRT : LibvirtComputingResource.PC; + String emulator = resource.getHypervisorPath(); + StringBuilder xml = new StringBuilder(); + xml.append("") + .append("").append(vmName).append("") + .append("128") + .append("1") + .append("hvm") + .append("").append(emulator).append(""); + for (int i = 0; i < diskPaths.size(); i++) { + char letter = (char) ('a' + i); + xml.append("") + .append("") + .append("") + .append(""); + } + xml.append(""); + return xml.toString(); + } + + private void validateStoppedBackupDiskPaths(List diskPaths) { + if (diskPaths.stream().anyMatch(path -> path != null && path.startsWith("rbd:"))) { + throw new IllegalArgumentException("Stopped VM dummy backup flow supports only file-backed disks. RBD backups must use the dedicated RBD backup path."); + } + } + + private void redefineCheckpointIfNeeded(String vmName, Path checkpointPath) throws IOException { + if (!Files.exists(checkpointPath)) { + return; + } + String checkpointName = checkpointPath.getFileName().toString().replace(".xml", ""); + int infoExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-info --domain %s --checkpointname %s > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointName))); + if (infoExit == 0) { + return; + } + int redefineExit = Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-create --domain %s --xmlfile %s --redefine > /dev/null 2>&1", + shellQuote(vmName), shellQuote(checkpointPath.toString()))); + if (redefineExit != 0) { + throw new IOException("Failed to redefine checkpoint " + checkpointName + " on domain " + vmName); + } + } + + private List getDiskLabels(Connect conn, String vmName) { + return resource.getDisks(conn, vmName).stream() + .map(disk -> disk.getDiskLabel()) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + private Path writeBackupXml(Path dest, AblestackNasTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder(""); + if (isIncremental(command) && command.getParentCheckpointName() != null && !command.getParentCheckpointName().isEmpty()) { + xml.append("").append(command.getParentCheckpointName()).append(""); + } + xml.append(""); + for (int i = 0; i < diskLabels.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("disk-%d.qcow2", i)); + xml.append("") + .append("") + .append(""); + } + xml.append(""); + Path backupXml = dest.resolve("backup.xml"); + Files.writeString(backupXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return backupXml; + } + + private Path writeCheckpointXml(Path dest, AblestackNasTakeBackupCommand command, List diskLabels) throws IOException { + StringBuilder xml = new StringBuilder("") + .append(command.getCheckpointName()) + .append(""); + for (String diskLabel : diskLabels) { + xml.append(""); + } + xml.append(""); + Path checkpointXml = dest.resolve("checkpoint.xml"); + Files.writeString(checkpointXml, xml.toString(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + return checkpointXml; + } + + private void waitForBackup(String vmName) throws IOException { + int timeout = resource.getCmdsTimeout(); + while (timeout > 0) { + String result = checkBackupJob(vmName); + if (result != null && result.contains("Completed") && result.contains("Backup")) { + return; + } + if (result != null && result.contains("Failed")) { + throw new IOException("Virsh backup job failed for dummy VM " + vmName); + } + timeout -= BACKUP_JOB_POLL_INTERVAL_MS; + try { + Thread.sleep(BACKUP_JOB_POLL_INTERVAL_MS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } + } + throw new IOException("Timed out waiting for backup job of dummy VM " + vmName); + } + + private void cancelBackupJob(String vmName) { + Script.runSimpleBashScriptForExitValue(String.format("virsh -c qemu:///system domjobabort --domain %s > /dev/null 2>&1", shellQuote(vmName))); + } + + private String checkBackupJob(String vmName) { + return Script.runSimpleBashScriptWithFullResult( + String.format("virsh -c qemu:///system domjobinfo %s --completed --keep-completed", shellQuote(vmName)), 10); + } + + private void rebaseIncrementalChain(Path dest, AblestackNasTakeBackupCommand command, List diskPaths) throws IOException { + for (int i = 0; i < diskPaths.size(); i++) { + String backupFile = getBackupFileByIndex(command, i, String.format("disk-%d.qcow2", i)); + Path output = dest.resolve(backupFile); + String parent = "../" + Path.of(command.getParentBackupPath()).getFileName() + "/" + backupFile; + int exit = Script.runSimpleBashScriptForExitValue(String.format( + "qemu-img rebase -u -F qcow2 -b %s %s", + shellQuote(parent), shellQuote(output.toString())), resource.getCmdsTimeout(), false); + if (exit != 0) { + throw new IOException("qemu-img rebase failed for " + output + " with parent " + parent); + } + } + } + + private void dumpCheckpointXml(String vmName, String checkpointName, Path dest) { + Path checkpointDest = dest.resolve("checkpoints").resolve(checkpointName + ".xml"); + Script.runSimpleBashScriptForExitValue(String.format( + "virsh -c qemu:///system checkpoint-dumpxml --domain %s --checkpointname %s --no-domain > %s 2>/dev/null", + shellQuote(vmName), shellQuote(checkpointName), shellQuote(checkpointDest.toString()))); + } + + private void backupDomainInformation(String vmName, Path dest, AblestackNasTakeBackupCommand command, List diskPaths) { + runCommand(String.format("virsh -c qemu:///system dumpxml %s > %s 2>/dev/null || true", + shellQuote(vmName), shellQuote(dest.resolve("domain-config.xml").toString()))); + runCommand(String.format("virsh -c qemu:///system dominfo %s > %s 2>/dev/null || true", + shellQuote(vmName), shellQuote(dest.resolve("dominfo.xml").toString()))); + runCommand(String.format("virsh -c qemu:///system domiflist %s > %s 2>/dev/null || true", + shellQuote(vmName), shellQuote(dest.resolve("domiflist.xml").toString()))); + runCommand(String.format("virsh -c qemu:///system domblklist %s > %s 2>/dev/null || true", + shellQuote(vmName), shellQuote(dest.resolve("domblklist.xml").toString()))); + Path checkpointMeta = dest.resolve("checkpoints").resolve(command.getCheckpointName() + ".meta"); + String metadata = "checkpoint_name=" + command.getCheckpointName() + "\n" + + "backup_type=" + command.getBackupType() + "\n" + + "vm_name=" + vmName + "\n" + + "disk_paths=" + String.join(",", diskPaths) + "\n" + + "backup_files=" + String.join(",", Objects.requireNonNullElse(command.getBackupFiles(), List.of())) + "\n"; + try { + Files.writeString(checkpointMeta, metadata, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); + } catch (IOException ignored) { + } + } + + private String listTopLevelFileSizes(Path dest) throws IOException { + try (var stream = Files.list(dest)) { + return stream.filter(Files::isRegularFile) + .sorted(Comparator.comparing(path -> path.getFileName().toString())) + .map(path -> { + try { + return String.valueOf(Files.size(path)); + } catch (IOException e) { + return "0"; + } + }) + .collect(Collectors.joining("\n")); + } + } + + private void cleanupDummyVm(String dummyVmName) { + runCommand(String.format("virsh -c qemu:///system destroy %s > /dev/null 2>&1 || true", shellQuote(dummyVmName))); + runCommand(String.format("virsh -c qemu:///system undefine %s --nvram > /dev/null 2>&1 || virsh -c qemu:///system undefine %s > /dev/null 2>&1 || true", + shellQuote(dummyVmName), shellQuote(dummyVmName))); + } + + private void runCommand(String command) { + Script.runSimpleBashScriptForExitValue(command, resource.getCmdsTimeout(), false); + } + + private boolean isIncremental(AblestackNasTakeBackupCommand command) { + return "INCREMENTAL".equalsIgnoreCase(command.getBackupType()); + } + + private String getBackupFileByIndex(AblestackNasTakeBackupCommand command, int index, String fallback) { + List backupFiles = command.getBackupFiles(); + if (CollectionUtils.isNullOrEmpty(backupFiles) || index >= backupFiles.size()) { + return fallback; + } + return backupFiles.get(index); + } + + private String shellQuote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasRestoreBackupCommandWrapper.java new file mode 100644 index 000000000000..0bd3d6fe2ed5 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasRestoreBackupCommandWrapper.java @@ -0,0 +1,835 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackNasRestoreBackupCommand; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; +import org.libvirt.LibvirtException; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +@ResourceWrapper(handles = AblestackNasRestoreBackupCommand.class) +public class LibvirtAblestackNasRestoreBackupCommandWrapper extends CommandWrapper { + private static final String BACKUP_TEMP_FILE_PREFIX = "csbackup"; + private static final String MOUNT_COMMAND = "sudo mount -t %s %s %s"; + private static final String UMOUNT_COMMAND = "sudo umount %s"; + private static final String FILE_PATH_PLACEHOLDER = "%s/%s"; + private static final String ATTACH_QCOW2_DISK_COMMAND = " virsh attach-disk %s %s %s --driver qemu --subdriver qcow2 --cache none"; + private static final String ATTACH_RBD_DISK_XML_COMMAND = " virsh attach-device %s /dev/stdin < restoreVolumePools = command.getRestoreVolumePools(); + List restoreVolumePaths = command.getRestoreVolumePaths(); + Integer mountTimeout = command.getMountTimeout() * 1000; + int timeout = command.getWait() > 0 ? command.getWait() : command.getMountTimeout(); + String cacheMode = command.getCacheMode(); + KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); + List volumePaths = command.getVolumePaths(); + List backupFiles = command.getBackupFiles(); + List backupFileChains = command.getBackupFileChains(); + + String newVolumeId = null; + try { + String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions, mountTimeout); + if (Objects.isNull(vmExists)) { + String volumePath = volumePaths.get(0); + String backupFile = backupFiles.get(0); + String backupFileChain = backupFileChains != null && !backupFileChains.isEmpty() ? backupFileChains.get(0) : null; + int lastIndex = volumePath.lastIndexOf("/"); + newVolumeId = volumePath.substring(lastIndex + 1); + restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, backupFile, backupFileChain, + new Pair<>(vmName, command.getVmState()), mountOptions, mountTimeout, timeout, storagePoolMgr, restoreVolumePools.get(0), cacheMode); + } else if (Boolean.TRUE.equals(vmExists)) { + restoreVolumesOfExistingVM(restoreVolumePaths, backupPath, backupFiles, backupFileChains, mountDirectory, timeout, storagePoolMgr, restoreVolumePools); + } else { + restoreVolumesOfDestroyedVMs(restoreVolumePaths, backupPath, backupFiles, backupFileChains, backupRepoAddress, backupRepoType, mountOptions, mountTimeout, storagePoolMgr, restoreVolumePools, timeout); + } + } catch (CloudRuntimeException e) { + String errorMessage = e.getMessage() != null ? e.getMessage() : ""; + return new BackupAnswer(command, false, errorMessage); + } + + return new BackupAnswer(command, true, newVolumeId); + } + + private void restoreVolumesOfExistingVM(List volumePaths, String backupPath, List backupFiles, List backupFileChains, + String mountDirectory, Integer timeout, KVMStoragePoolManager storagePoolMgr, List restoreVolumePools) { + try { + for (int idx = 0; idx < volumePaths.size(); idx++) { + String volumePath = volumePaths.get(idx); + String backupFile = backupFiles.get(idx); + String backupFileChain = backupFileChains != null && backupFileChains.size() > idx ? backupFileChains.get(idx) : null; + List mountedBackupPaths = getMountedBackupPaths(mountDirectory, backupPath, backupFile, backupFileChain); + PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, volumePath, mountedBackupPaths, timeout, + String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), idx)) { + throw new CloudRuntimeException(String.format("Unable to restore backup from volume [%s].", volumePath)); + } + } + } finally { + unmountBackupDirectory(mountDirectory); + deleteTemporaryDirectory(mountDirectory); + } + } + + private void restoreVolumesOfDestroyedVMs(List volumePaths, String backupPath, List backupFiles, List backupFileChains, + String backupRepoAddress, String backupRepoType, String mountOptions, Integer mountTimeout, KVMStoragePoolManager storagePoolMgr, + List restoreVolumePools, Integer timeout) { + String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions, mountTimeout); + try { + for (int idx = 0; idx < volumePaths.size(); idx++) { + String volumePath = volumePaths.get(idx); + String backupFile = backupFiles.get(idx); + String backupFileChain = backupFileChains != null && backupFileChains.size() > idx ? backupFileChains.get(idx) : null; + List mountedBackupPaths = getMountedBackupPaths(mountDirectory, backupPath, backupFile, backupFileChain); + PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, volumePath, mountedBackupPaths, timeout, + String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), idx)) { + throw new CloudRuntimeException(String.format("Unable to restore backup from volume [%s].", volumePath)); + } + } + } finally { + unmountBackupDirectory(mountDirectory); + deleteTemporaryDirectory(mountDirectory); + } + } + + private void restoreVolume(String backupPath, String backupRepoType, String backupRepoAddress, String volumePath, String backupFile, String backupFileChain, + Pair vmNameAndState, String mountOptions, Integer mountTimeout, Integer timeout, + KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO restoreVolumePool, String cacheMode) { + String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType, mountOptions, mountTimeout); + try { + List mountedBackupPaths = getMountedBackupPaths(mountDirectory, backupPath, backupFile, backupFileChain); + if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, volumePath, mountedBackupPaths, timeout, + String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), 0, true)) { + throw new CloudRuntimeException(String.format("Unable to restore backup from volume [%s].", volumePath)); + } + if (VirtualMachine.State.Running.equals(vmNameAndState.second())) { + if (!attachVolumeToVm(storagePoolMgr, vmNameAndState.first(), restoreVolumePool, volumePath, cacheMode)) { + throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first())); + } + } + } finally { + unmountBackupDirectory(mountDirectory); + deleteTemporaryDirectory(mountDirectory); + } + } + + + private String mountBackupDirectory(String backupRepoAddress, String backupRepoType, String mountOptions, Integer mountTimeout) { + String randomChars = RandomStringUtils.random(5, true, false); + String mountDirectory = String.format("%s.%s",BACKUP_TEMP_FILE_PREFIX , randomChars); + + try { + mountDirectory = Files.createTempDirectory(mountDirectory).toString(); + } catch (IOException e) { + logger.error(String.format("Failed to create the tmp mount directory {} for restore", mountDirectory), e); + throw new CloudRuntimeException("Failed to create the tmp mount directory for restore on the KVM host"); + } + + String mount = String.format(MOUNT_COMMAND, backupRepoType, backupRepoAddress, mountDirectory); + if ("cifs".equals(backupRepoType)) { + if (Objects.isNull(mountOptions) || mountOptions.trim().isEmpty()) { + mountOptions = "nobrl"; + } else { + mountOptions += ",nobrl"; + } + } + if (Objects.nonNull(mountOptions) && !mountOptions.trim().isEmpty()) { + mount += " -o " + mountOptions; + } + + int exitValue = Script.runSimpleBashScriptForExitValue(mount, mountTimeout, false); + if (exitValue != 0) { + logger.error(String.format("Failed to mount repository {} of type {} to the directory {}", backupRepoAddress, backupRepoType, mountDirectory)); + throw new CloudRuntimeException("Failed to mount the backup repository on the KVM host"); + } + return mountDirectory; + } + + private void unmountBackupDirectory(String backupDirectory) { + String umountCmd = String.format(UMOUNT_COMMAND, backupDirectory); + int exitValue = Script.runSimpleBashScriptForExitValue(umountCmd); + if (exitValue != 0) { + logger.error(String.format("Failed to unmount backup directory {}", backupDirectory)); + throw new CloudRuntimeException("Failed to unmount the backup directory"); + } + } + + private void deleteTemporaryDirectory(String backupDirectory) { + try { + Files.deleteIfExists(Paths.get(backupDirectory)); + } catch (IOException e) { + logger.error(String.format("Failed to delete backup directory: %s", backupDirectory), e); + throw new CloudRuntimeException("Failed to delete the backup directory"); + } + } + + private List getMountedBackupPaths(String mountDirectory, String backupPath, String backupFile, String backupFileChain) { + List mountedPaths = new ArrayList<>(); + if (StringUtils.isNotBlank(backupFileChain)) { + for (String chainPath : backupFileChain.split(";")) { + if (StringUtils.isBlank(chainPath)) { + continue; + } + String normalizedPath = chainPath.startsWith("/") ? chainPath.substring(1) : chainPath; + if (!normalizedPath.contains("/") && StringUtils.isNotBlank(backupPath)) { + mountedPaths.add(String.format(FILE_PATH_PLACEHOLDER, String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), normalizedPath)); + } else { + mountedPaths.add(String.format(FILE_PATH_PLACEHOLDER, mountDirectory, normalizedPath)); + } + } + } + if (mountedPaths.isEmpty() && StringUtils.isNotBlank(backupFile)) { + mountedPaths.add(String.format(FILE_PATH_PLACEHOLDER, String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath), backupFile)); + } + return mountedPaths; + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex) { + return replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, backupRootPath, backupIndex, false); + } + + private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, + String backupRootPath, int backupIndex, boolean createTargetVolume) { + if (backupPaths == null || backupPaths.isEmpty()) { + return false; + } + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChainToFileVolume(volumePath, backupPaths, timeout, backupRootPath, backupIndex); + } + return replaceFileVolumeWithBackup(volumePath, getFirstExistingBackupPath(backupPaths), timeout); + } + + return replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + private boolean restoreIncrementalRbdBackupChainToFileVolume(String volumePath, List backupPaths, int timeout, String backupRootPath, int backupIndex) { + if (StringUtils.isBlank(backupRootPath)) { + throw new CloudRuntimeException("Unable to locate backup root path for incremental RBD restore"); + } + RbdImageSpec sourceImage = getRbdImageSpecFromMetadata(backupRootPath, backupIndex); + String tempImage = sourceImage.buildTempImageSpec(); + try { + if (!importBackupChainToTemporaryRbd(backupPaths, timeout, sourceImage, tempImage)) { + return false; + } + return convertTemporaryRbdToFileVolume(volumePath, timeout, sourceImage, tempImage); + } finally { + removeTemporaryRbdImage(sourceImage, tempImage, timeout); + } + } + + private String getFirstExistingBackupPath(List backupPaths) { + for (String backupPath : backupPaths) { + if (StringUtils.isNotBlank(backupPath) && Files.exists(Paths.get(backupPath))) { + return backupPath; + } + } + return backupPaths.get(0); + } + + private boolean replaceFileVolumeWithBackup(String volumePath, String backupPath, int timeout) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private boolean convertTemporaryRbdToFileVolume(String volumePath, int timeout, RbdImageSpec sourceImage, String tempImage) { + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + QemuImg qemu = new QemuImg(timeout * 1000, true, false); + srcBackupFile = new QemuImgFile(sourceImage.buildQemuUri(tempImage), QemuImg.PhysicalDiskFormat.RAW); + destVolumeFile = new QemuImgFile(volumePath, getFileVolumeFormat(volumePath)); + qemu.convert(srcBackupFile, destVolumeFile); + return true; + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : tempImage; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : volumePath; + logger.error("Failed to convert temporary RBD {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + } + + private QemuImg.PhysicalDiskFormat getBackupFileFormat(String backupPath) { + if (backupPath.endsWith(".raw")) { + return QemuImg.PhysicalDiskFormat.RAW; + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private QemuImg.PhysicalDiskFormat getFileVolumeFormat(String volumePath) { + if (!Files.exists(Paths.get(volumePath))) { + return QemuImg.PhysicalDiskFormat.QCOW2; + } + try { + QemuImg qemu = new QemuImg(0); + Map info = qemu.info(new QemuImgFile(volumePath)); + String format = info.get("file_format"); + if (StringUtils.isNotBlank(format)) { + return QemuImg.PhysicalDiskFormat.valueOf(format.toUpperCase(Locale.ROOT)); + } + } catch (QemuImgException | LibvirtException | IllegalArgumentException e) { + logger.warn("Failed to detect file volume format for path {}. Falling back to qcow2.", volumePath, e); + } + return QemuImg.PhysicalDiskFormat.QCOW2; + } + + private boolean replaceRbdVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, int timeout, boolean createTargetVolume) { + if (backupPaths.stream().anyMatch(path -> path.endsWith(".rbdiff"))) { + return restoreIncrementalRbdBackupChain(storagePoolMgr, volumePool, volumePath, backupPaths, timeout, createTargetVolume); + } + + String backupPath = getFirstExistingBackupPath(backupPaths); + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, volumeStoragePool); + if (getBackupFileFormat(backupPath) == QemuImg.PhysicalDiskFormat.RAW) { + return importRawBackupToRbd(volumeStoragePool, normalizedVolumePath, backupPath, timeout, createTargetVolume); + } + + QemuImg qemu; + try { + qemu = new QemuImg(timeout * 1000, true, false); + if (!createTargetVolume) { + KVMPhysicalDisk rdbDisk = volumeStoragePool.getPhysicalDisk(normalizedVolumePath); + logger.debug("Restoring RBD volume: {}", rdbDisk.toString()); + qemu.setSkipTargetVolumeCreation(true); + } + } catch (LibvirtException ex) { + throw new CloudRuntimeException("Failed to create qemu-img command to restore RBD volume with backup", ex); + } + + QemuImgFile srcBackupFile = null; + QemuImgFile destVolumeFile = null; + try { + srcBackupFile = new QemuImgFile(backupPath, getBackupFileFormat(backupPath)); + String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, normalizedVolumePath); + destVolumeFile = new QemuImgFile(rbdDestVolumeFile, QemuImg.PhysicalDiskFormat.RAW); + + logger.debug("Starting convert backup {} to RBD volume {}", backupPath, normalizedVolumePath); + qemu.convert(srcBackupFile, destVolumeFile); + logger.debug("Successfully converted backup {} to RBD volume {}", backupPath, normalizedVolumePath); + } catch (QemuImgException | LibvirtException e) { + String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; + String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; + logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); + return false; + } + + return true; + } + + private boolean importRawBackupToRbd(KVMStoragePool volumeStoragePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { + if (!createTargetVolume && !volumeStoragePool.deletePhysicalDisk(volumePath, Storage.ImageFormat.RAW)) { + logger.error("Failed to delete existing RBD volume {} before raw import", volumePath); + return false; + } + + String importCommand = buildRbdImportCommand(volumeStoragePool, backupPath, volumePath); + if (Script.runSimpleBashScriptForExitValue(importCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import raw backup {} into volume {}", backupPath, volumePath); + return false; + } + return true; + } + + private boolean restoreIncrementalRbdBackupChain(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, List backupPaths, + int timeout, boolean createTargetVolume) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + + String normalizedVolumePath = normalizeRbdVolumePath(volumePath, storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid())); + if (!replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, normalizedVolumePath, List.of(backupPaths.get(0)), timeout, createTargetVolume)) { + return false; + } + + KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(volumeStoragePool, normalizedVolumePath, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on volume %s", parentCheckpoint, normalizedVolumePath)); + } + + String importDiffCommand = buildRbdImportDiffCommand(volumeStoragePool, backupPath, normalizedVolumePath); + if (Script.runSimpleBashScriptForExitValue(importDiffCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import RBD diff {} into volume {}", backupPath, normalizedVolumePath); + return false; + } + + if (!ensureRbdSnapshotExists(volumeStoragePool, normalizedVolumePath, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(volumeStoragePool, normalizedVolumePath, restoreSnapshots, timeout); + } + } + + private String normalizeRbdVolumePath(String volumePath, KVMStoragePool storagePool) { + if (StringUtils.isBlank(volumePath)) { + return volumePath; + } + String normalized = volumePath; + String poolPath = storagePool.getSourceDir(); + if (StringUtils.isNotBlank(poolPath)) { + String poolPrefix = poolPath + "/"; + if (normalized.startsWith(poolPrefix)) { + normalized = normalized.substring(poolPrefix.length()); + } + } + if (normalized.startsWith("/")) { + normalized = normalized.substring(normalized.lastIndexOf('/') + 1); + } + return normalized; + } + + private String buildRbdImportDiffCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import-diff ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String buildRbdImportCommand(KVMStoragePool storagePool, String backupPath, String volumePath) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" import ").append(backupPath).append(" ").append(volumePath); + return command.toString(); + } + + private String formatRbdMonHosts(String hosts, int port) { + String[] hostValues = hosts.split(","); + List formattedHosts = new ArrayList<>(); + for (String host : hostValues) { + String normalizedHost = host.replace("[", "").replace("]", "").trim(); + if (StringUtils.isBlank(normalizedHost)) { + continue; + } + formattedHosts.add(port > 0 ? normalizedHost + ":" + port : normalizedHost); + } + return String.join(",", formattedHosts); + } + + private boolean importBackupChainToTemporaryRbd(List backupPaths, int timeout, RbdImageSpec sourceImage, String tempImage) { + if (backupPaths.isEmpty() || !backupPaths.get(0).endsWith(".raw")) { + throw new CloudRuntimeException("Incremental RBD backup chain is missing the base full backup"); + } + String importCommand = sourceImage.buildRbdCommand("import", quote(backupPaths.get(0)), quote(tempImage)); + if (Script.runSimpleBashScriptForExitValue(importCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import base RBD backup {} into temporary image {}", backupPaths.get(0), tempImage); + return false; + } + + List restoreSnapshots = new ArrayList<>(); + try { + Map baseMetadata = readRbdBackupMetadata(backupPaths.get(0)); + String baseCheckpoint = baseMetadata.get("checkpoint_name"); + if (StringUtils.isNotBlank(baseCheckpoint)) { + if (!ensureRbdSnapshotExists(sourceImage, tempImage, baseCheckpoint, timeout)) { + return false; + } + restoreSnapshots.add(baseCheckpoint); + } + + for (int index = 1; index < backupPaths.size(); index++) { + String backupPath = backupPaths.get(index); + if (!backupPath.endsWith(".rbdiff")) { + continue; + } + Map metadata = readRbdBackupMetadata(backupPath); + String parentCheckpoint = metadata.get("parent_checkpoint_name"); + String checkpoint = metadata.get("checkpoint_name"); + if (StringUtils.isBlank(parentCheckpoint) || StringUtils.isBlank(checkpoint)) { + throw new CloudRuntimeException(String.format("RBD incremental backup metadata is incomplete for %s", backupPath)); + } + if (!rbdSnapshotExists(sourceImage, tempImage, parentCheckpoint, timeout)) { + throw new CloudRuntimeException(String.format("Required parent snapshot %s is missing on temporary image %s", parentCheckpoint, tempImage)); + } + String importDiffCommand = sourceImage.buildRbdCommand("import-diff", quote(backupPath), quote(tempImage)); + if (Script.runSimpleBashScriptForExitValue(importDiffCommand, timeout * 1000, false) != 0) { + logger.error("Failed to import RBD diff {} into temporary image {}", backupPath, tempImage); + return false; + } + if (!ensureRbdSnapshotExists(sourceImage, tempImage, checkpoint, timeout)) { + return false; + } + restoreSnapshots.add(checkpoint); + } + return true; + } finally { + cleanupRbdRestoreSnapshots(sourceImage, tempImage, restoreSnapshots, timeout); + } + } + + private Map readRbdBackupMetadata(String backupPath) { + java.nio.file.Path metadataPath = Paths.get(backupPath).getParent().resolve("rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + return Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private boolean ensureRbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + if (rbdSnapshotExists(storagePool, volumePath, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap create", volumePath + "@" + snapshotName); + if (Script.runSimpleBashScriptForExitValue(createSnapshotCommand, timeout * 1000, false) != 0) { + logger.error("Failed to create RBD snapshot {} on volume {}", snapshotName, volumePath); + return false; + } + return true; + } + + private boolean ensureRbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + if (rbdSnapshotExists(imageSpec, image, snapshotName, timeout)) { + return true; + } + String createSnapshotCommand = imageSpec.buildRbdCommand("snap", "create", quote(image + "@" + snapshotName)); + if (Script.runSimpleBashScriptForExitValue(createSnapshotCommand, timeout * 1000, false) != 0) { + logger.error("Failed to create RBD snapshot {} on image {}", snapshotName, image); + return false; + } + return true; + } + + private boolean rbdSnapshotExists(KVMStoragePool storagePool, String volumePath, String snapshotName, int timeout) { + String existsCommand = buildRbdSnapshotCommand(storagePool, "snap ls", volumePath) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private boolean rbdSnapshotExists(RbdImageSpec imageSpec, String image, String snapshotName, int timeout) { + String existsCommand = imageSpec.buildRbdCommand("snap", "ls", quote(image)) + " | awk 'NR>1 {print $2}' | grep -Fx " + quote(snapshotName); + return Script.runSimpleBashScriptForExitValue(existsCommand, timeout * 1000, false) == 0; + } + + private void cleanupRbdRestoreSnapshots(KVMStoragePool storagePool, String volumePath, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = buildRbdSnapshotCommand(storagePool, "snap rm", volumePath + "@" + snapshotName); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private void cleanupRbdRestoreSnapshots(RbdImageSpec imageSpec, String image, List snapshotNames, int timeout) { + for (int index = snapshotNames.size() - 1; index >= 0; index--) { + String snapshotName = snapshotNames.get(index); + String removeSnapshotCommand = imageSpec.buildRbdCommand("snap", "rm", quote(image + "@" + snapshotName)); + Script.runSimpleBashScriptForExitValue(removeSnapshotCommand, timeout * 1000, false); + } + } + + private String buildRbdSnapshotCommand(KVMStoragePool storagePool, String action, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(storagePool.getSourceHost())) { + command.append(" -m ").append(formatRbdMonHosts(storagePool.getSourceHost(), storagePool.getSourcePort())); + } + if (StringUtils.isNotBlank(storagePool.getAuthUserName())) { + command.append(" --id ").append(storagePool.getAuthUserName()); + } + if (StringUtils.isNotBlank(storagePool.getAuthSecret())) { + command.append(" --key ").append(storagePool.getAuthSecret()); + } + command.append(" ").append(action).append(" ").append(target); + return command.toString(); + } + + private void removeTemporaryRbdImage(RbdImageSpec sourceImage, String tempImage, int timeout) { + String removeCommand = sourceImage.buildRbdCommand("rm", quote(tempImage)); + Script.runSimpleBashScriptForExitValue(removeCommand, timeout * 1000, false); + } + + private RbdImageSpec getRbdImageSpecFromMetadata(String backupRootPath, int backupIndex) { + java.nio.file.Path metadataPath = Paths.get(backupRootPath, "rbd-backup.meta"); + if (!Files.exists(metadataPath)) { + throw new CloudRuntimeException(String.format("RBD backup metadata file not found: %s", metadataPath)); + } + try { + Map metadata = Files.readAllLines(metadataPath).stream() + .map(String::trim) + .filter(line -> !line.isEmpty() && line.contains("=")) + .map(line -> line.split("=", 2)) + .collect(java.util.stream.Collectors.toMap(parts -> parts[0], parts -> parts[1], (left, right) -> right)); + String diskPaths = metadata.get("disk_paths"); + if (StringUtils.isBlank(diskPaths)) { + throw new CloudRuntimeException("RBD backup metadata does not contain disk_paths"); + } + List values = Arrays.asList(diskPaths.split(",")); + if (backupIndex >= values.size()) { + throw new CloudRuntimeException(String.format("RBD backup metadata does not contain disk path for index %d", backupIndex)); + } + return RbdImageSpec.fromUri(values.get(backupIndex)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to read RBD backup metadata: %s", metadataPath), e); + } + } + + private String quote(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + + private static final class RbdImageSpec { + private final String image; + private final String monHost; + private final String user; + private final String key; + + private RbdImageSpec(String image, String monHost, String user, String key) { + this.image = image; + this.monHost = monHost; + this.user = user; + this.key = key; + } + + private static RbdImageSpec fromUri(String uri) { + String image = null; + String monHost = null; + String user = null; + String key = null; + if (uri.startsWith("rbd:")) { + String payload = uri.substring("rbd:".length()); + image = payload.contains(":") ? payload.substring(0, payload.indexOf(':')) : payload; + monHost = extract(uri, ":mon_host=([^:]*)"); + if (monHost != null) { + monHost = monHost.replace("\\;", ",").replace("\\:", ":"); + } + user = extract(uri, ":id=([^:]*)"); + key = extract(uri, ":key=([^:]*)"); + } else if (uri.startsWith("rbd/")) { + image = uri; + } + if (StringUtils.isBlank(image)) { + throw new CloudRuntimeException(String.format("Unable to parse RBD disk path: %s", uri)); + } + return new RbdImageSpec(image, monHost, user, key); + } + + private static String extract(String value, String regex) { + java.util.regex.Matcher matcher = java.util.regex.Pattern.compile(regex).matcher(value); + return matcher.find() ? matcher.group(1) : null; + } + + private String buildTempImageSpec() { + return String.format("%s-csrestore-%s", image, RandomStringUtils.randomAlphanumeric(8).toLowerCase(Locale.ROOT)); + } + + private String buildRbdCommand(String action, String source, String target) { + StringBuilder command = new StringBuilder("rbd"); + if (StringUtils.isNotBlank(monHost)) { + command.append(" -m ").append(quoteArg(monHost)); + } + if (StringUtils.isNotBlank(user)) { + command.append(" --id ").append(quoteArg(user)); + } + if (StringUtils.isNotBlank(key)) { + command.append(" --key ").append(quoteArg(key)); + } + command.append(" ").append(action); + if (StringUtils.isNotBlank(source)) { + command.append(" ").append(source); + } + if (StringUtils.isNotBlank(target)) { + command.append(" ").append(target); + } + return command.toString(); + } + + private String buildRbdCommand(String action, String target) { + return buildRbdCommand(action, null, target); + } + + private String buildQemuUri(String imageSpec) { + StringBuilder uri = new StringBuilder("rbd:").append(imageSpec); + if (StringUtils.isNotBlank(monHost)) { + uri.append(":mon_host=").append(monHost.replace(",", "\\;")); + } + if (StringUtils.isNotBlank(user)) { + uri.append(":id=").append(user); + } + if (StringUtils.isNotBlank(key)) { + uri.append(":key=").append(key); + } + return uri.toString(); + } + + private String quoteArg(String value) { + return "'" + value.replace("'", "'\"'\"'") + "'"; + } + } + + private boolean attachVolumeToVm(KVMStoragePoolManager storagePoolMgr, String vmName, PrimaryDataStoreTO volumePool, String volumePath, String cacheMode) { + String deviceToAttachDiskTo = getDeviceToAttachDisk(vmName); + int exitValue; + if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_QCOW2_DISK_COMMAND, vmName, volumePath, deviceToAttachDiskTo)); + } else { + String xmlForRbdDisk = getXmlForRbdDisk(storagePoolMgr, volumePool, volumePath, deviceToAttachDiskTo, cacheMode); + logger.debug("RBD disk xml to attach: {}", xmlForRbdDisk); + exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_RBD_DISK_XML_COMMAND, vmName, xmlForRbdDisk)); + } + return exitValue == 0; + } + + private String getDeviceToAttachDisk(String vmName) { + String currentDevice = Script.runSimpleBashScript(String.format(CURRRENT_DEVICE, vmName)); + char lastChar = currentDevice.charAt(currentDevice.length() - 1); + char incrementedChar = (char) (lastChar + 1); + return currentDevice.substring(0, currentDevice.length() - 1) + incrementedChar; + } + + private String getXmlForRbdDisk(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String deviceToAttachDiskTo, String cacheMode) { + StringBuilder diskBuilder = new StringBuilder(); + diskBuilder.append("\n\n"); + + diskBuilder.append(" \n"); + + diskBuilder.append("\n"); + for (String sourceHost : volumePool.getHost().split(",")) { + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + String authUserName = null; + final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); + if (primaryPool != null) { + authUserName = primaryPool.getAuthUserName(); + } + if (StringUtils.isNotBlank(authUserName)) { + diskBuilder.append("\n"); + diskBuilder.append("\n"); + diskBuilder.append("\n"); + } + diskBuilder.append("\n"); + diskBuilder.append("\n"); + return diskBuilder.toString(); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasTakeBackupCommandWrapper.java new file mode 100644 index 000000000000..2293b8d4e824 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtAblestackNasTakeBackupCommandWrapper.java @@ -0,0 +1,61 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.AblestackNasTakeBackupCommand; + +import java.util.List; + +@ResourceWrapper(handles = AblestackNasTakeBackupCommand.class) +public class LibvirtAblestackNasTakeBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(AblestackNasTakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + logger.info("LibvirtTakeBackupCommandWrapper entering execute for vm=[{}], backupPath=[{}], backupType=[{}]", + command.getVmName(), command.getBackupPath(), command.getBackupType()); + LibvirtAblestackNasBackupHelper backupHelper = new LibvirtAblestackNasBackupHelper(libvirtComputingResource); + List diskPaths = backupHelper.resolveDiskPaths(command.getVolumePools(), command.getVolumePaths()); + logger.info("LibvirtTakeBackupCommandWrapper invoking helper for vm=[{}], diskPaths=[{}]", + command.getVmName(), diskPaths); + Pair result = backupHelper.executeBackup(command); + logger.info("LibvirtTakeBackupCommandWrapper helper returned for vm=[{}], resultCode=[{}], details=[{}]", + command.getVmName(), result.first(), result.second()); + + if (result.first() != 0) { + logger.debug("Failed to take VM backup: " + result.second()); + BackupAnswer answer = new BackupAnswer(command, false, result.second().trim()); + if (result.first() == LibvirtAblestackNasBackupHelper.EXIT_CLEANUP_FAILED) { + logger.debug("Backup cleanup failed"); + answer.setNeedsCleanup(true); + } + return answer; + } + + long backupSize = backupHelper.parseBackupSize(result.second(), diskPaths); + BackupAnswer answer = new BackupAnswer(command, true, result.second().trim()); + answer.setSize(backupSize); + return answer; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultRestoreBackupCommandWrapper.java deleted file mode 100644 index 5ffce02fd138..000000000000 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultRestoreBackupCommandWrapper.java +++ /dev/null @@ -1,319 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// - -package com.cloud.hypervisor.kvm.resource.wrapper; - -import com.cloud.agent.api.Answer; -import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; -import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; -import com.cloud.hypervisor.kvm.storage.KVMStoragePool; -import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; -import com.cloud.resource.CommandWrapper; -import com.cloud.resource.ResourceWrapper; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; -import com.cloud.vm.VirtualMachine; -import org.apache.cloudstack.backup.BackupAnswer; -import org.apache.cloudstack.backup.CommvaultRestoreBackupCommand; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.utils.qemu.QemuImg; -import org.apache.cloudstack.utils.qemu.QemuImgException; -import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.libvirt.LibvirtException; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Locale; -import java.util.Objects; - -@ResourceWrapper(handles = CommvaultRestoreBackupCommand.class) -public class LibvirtCommvaultRestoreBackupCommandWrapper extends CommandWrapper { - private static final String FILE_PATH_PLACEHOLDER = "%s/%s"; - private static final String ATTACH_QCOW2_DISK_COMMAND = " virsh attach-disk %s %s %s --driver qemu --subdriver qcow2 --cache none"; - private static final String ATTACH_RBD_DISK_XML_COMMAND = " virsh attach-device %s /dev/stdin < backedVolumeUUIDs = command.getBackupVolumesUUIDs(); - List restoreVolumePools = command.getRestoreVolumePools(); - List restoreVolumePaths = command.getRestoreVolumePaths(); - String restoreVolumeUuid = command.getRestoreVolumeUUID(); - int timeout = command.getWait(); - String cacheMode = command.getCacheMode(); - String hostName = command.getHostName(); - KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); - - String newVolumeId = null; - try { - if (hostName != null) { - fetchBackupFile(hostName, backupPath); - } - if (Objects.isNull(vmExists)) { - PrimaryDataStoreTO volumePool = restoreVolumePools.get(0); - String volumePath = restoreVolumePaths.get(0); - int lastIndex = volumePath.lastIndexOf("/"); - newVolumeId = volumePath.substring(lastIndex + 1); - restoreVolume(storagePoolMgr, backupPath, volumePool, volumePath, diskType, restoreVolumeUuid, - new Pair<>(vmName, command.getVmState()), timeout, cacheMode); - } else if (Boolean.TRUE.equals(vmExists)) { - restoreVolumesOfExistingVM(storagePoolMgr, restoreVolumePools, restoreVolumePaths, backedVolumeUUIDs, backupPath, timeout); - } else { - restoreVolumesOfDestroyedVMs(storagePoolMgr, restoreVolumePools, restoreVolumePaths, vmName, backupPath, timeout); - } - } catch (CloudRuntimeException e) { - String errorMessage = e.getMessage() != null ? e.getMessage() : ""; - return new BackupAnswer(command, false, errorMessage); - } - - return new BackupAnswer(command, true, newVolumeId); - } - - private void verifyBackupFile(String backupPath, String volUuid) { - if (!checkBackupPathExists(backupPath)) { - throw new CloudRuntimeException(String.format("Backup file for the volume [%s] does not exist.", volUuid)); - } - if (!checkBackupFileImage(backupPath)) { - throw new CloudRuntimeException(String.format("Backup qcow2 file for the volume [%s] is corrupt.", volUuid)); - } - } - - private void restoreVolumesOfExistingVM(KVMStoragePoolManager storagePoolMgr, List restoreVolumePools, List restoreVolumePaths, List backedVolumesUUIDs, - String backupPath, int timeout) { - String diskType = "root"; - try { - for (int idx = 0; idx < restoreVolumePaths.size(); idx++) { - PrimaryDataStoreTO restoreVolumePool = restoreVolumePools.get(idx); - String restoreVolumePath = restoreVolumePaths.get(idx); - String backupVolumeUuid = backedVolumesUUIDs.get(idx); - Pair bkpPathAndVolUuid = getBackupPath(null, backupPath, diskType, backupVolumeUuid); - diskType = "datadisk"; - verifyBackupFile(bkpPathAndVolUuid.first(), bkpPathAndVolUuid.second()); - if (!replaceVolumeWithBackup(storagePoolMgr, restoreVolumePool, restoreVolumePath, bkpPathAndVolUuid.first(), timeout)) { - throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); - } - } - } finally { - deleteBackupDirectory(backupPath); - } - } - - private void restoreVolumesOfDestroyedVMs(KVMStoragePoolManager storagePoolMgr, List volumePools, List volumePaths, String vmName, String backupPath, int timeout) { - String diskType = "root"; - try { - for (int i = 0; i < volumePaths.size(); i++) { - PrimaryDataStoreTO volumePool = volumePools.get(i); - String volumePath = volumePaths.get(i); - Pair bkpPathAndVolUuid = getBackupPath(volumePath, backupPath, diskType, null); - diskType = "datadisk"; - verifyBackupFile(bkpPathAndVolUuid.first(), bkpPathAndVolUuid.second()); - if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, bkpPathAndVolUuid.first(), timeout)) { - throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); - } - } - } finally { - deleteBackupDirectory(backupPath); - } - } - - private void restoreVolume(KVMStoragePoolManager storagePoolMgr, String backupPath, PrimaryDataStoreTO volumePool, String volumePath, String diskType, String volumeUUID, - Pair vmNameAndState, int timeout, String cacheMode) { - Pair bkpPathAndVolUuid; - try { - bkpPathAndVolUuid = getBackupPath(volumePath, backupPath, diskType, volumeUUID); - verifyBackupFile(bkpPathAndVolUuid.first(), bkpPathAndVolUuid.second()); - if (!replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, bkpPathAndVolUuid.first(), timeout, true)) { - throw new CloudRuntimeException(String.format("Unable to restore contents from the backup volume [%s].", bkpPathAndVolUuid.second())); - } - if (VirtualMachine.State.Running.equals(vmNameAndState.second())) { - if (!attachVolumeToVm(storagePoolMgr, vmNameAndState.first(), volumePool, volumePath, cacheMode)) { - throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first())); - } - } - } finally { - deleteBackupDirectory(backupPath); - } - } - - private void deleteBackupDirectory(String backupDirectory) { - try { - FileUtils.deleteDirectory(new File(backupDirectory)); - } catch (IOException e) { - logger.error(String.format("Failed to delete backup directory: %s", backupDirectory), e); - throw new CloudRuntimeException("Failed to delete the backup directory"); - } - } - - private Pair getBackupPath(String volumePath, String backupPath, String diskType, String volumeUuid) { - String volUuid = Objects.isNull(volumeUuid) ? volumePath.substring(volumePath.lastIndexOf(File.separator) + 1) : volumeUuid; - String backupFileName = String.format("%s.%s.qcow2", diskType.toLowerCase(Locale.ROOT), volUuid); - backupPath = String.format(FILE_PATH_PLACEHOLDER, backupPath, backupFileName); - return new Pair<>(backupPath, volUuid); - } - - private boolean checkBackupFileImage(String backupPath) { - int exitValue = Script.runSimpleBashScriptForExitValue(String.format("qemu-img check %s", backupPath)); - return exitValue == 0; - } - - private boolean checkBackupPathExists(String backupPath) { - int exitValue = Script.runSimpleBashScriptForExitValue(String.format("ls %s", backupPath)); - return exitValue == 0; - } - - private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String backupPath, int timeout) { - return replaceVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPath, timeout, false); - } - - private boolean replaceVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { - if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { - int exitValue = Script.runSimpleBashScriptForExitValue(String.format(RSYNC_COMMAND, backupPath, volumePath)); - return exitValue == 0; - } - - return replaceRbdVolumeWithBackup(storagePoolMgr, volumePool, volumePath, backupPath, timeout, createTargetVolume); - } - - private boolean replaceRbdVolumeWithBackup(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String backupPath, int timeout, boolean createTargetVolume) { - KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); - QemuImg qemu; - try { - qemu = new QemuImg(timeout * 1000, true, false); - if (!createTargetVolume) { - KVMPhysicalDisk rdbDisk = volumeStoragePool.getPhysicalDisk(volumePath); - logger.debug("Restoring RBD volume: {}", rdbDisk.toString()); - qemu.setSkipTargetVolumeCreation(true); - } - } catch (LibvirtException ex) { - throw new CloudRuntimeException("Failed to create qemu-img command to restore RBD volume with backup", ex); - } - - QemuImgFile srcBackupFile = null; - QemuImgFile destVolumeFile = null; - try { - srcBackupFile = new QemuImgFile(backupPath, QemuImg.PhysicalDiskFormat.QCOW2); - String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath); - destVolumeFile = new QemuImgFile(rbdDestVolumeFile, QemuImg.PhysicalDiskFormat.RAW); - - logger.debug("Starting convert backup {} to RBD volume {}", backupPath, volumePath); - qemu.convert(srcBackupFile, destVolumeFile); - logger.debug("Successfully converted backup {} to RBD volume {}", backupPath, volumePath); - } catch (QemuImgException | LibvirtException e) { - String srcFilename = srcBackupFile != null ? srcBackupFile.getFileName() : null; - String destFilename = destVolumeFile != null ? destVolumeFile.getFileName() : null; - logger.error("Failed to convert backup {} to volume {}, the error was: {}", srcFilename, destFilename, e.getMessage()); - return false; - } - - return true; - } - - private boolean attachVolumeToVm(KVMStoragePoolManager storagePoolMgr, String vmName, PrimaryDataStoreTO volumePool, String volumePath, String cacheMode) { - String deviceToAttachDiskTo = getDeviceToAttachDisk(vmName); - int exitValue; - if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { - exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_QCOW2_DISK_COMMAND, vmName, volumePath, deviceToAttachDiskTo)); - } else { - String xmlForRbdDisk = getXmlForRbdDisk(storagePoolMgr, volumePool, volumePath, deviceToAttachDiskTo, cacheMode); - logger.debug("RBD disk xml to attach: {}", xmlForRbdDisk); - exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_RBD_DISK_XML_COMMAND, vmName, xmlForRbdDisk)); - } - return exitValue == 0; - } - - private String getDeviceToAttachDisk(String vmName) { - String currentDevice = Script.runSimpleBashScript(String.format(CURRRENT_DEVICE, vmName)); - char lastChar = currentDevice.charAt(currentDevice.length() - 1); - char incrementedChar = (char) (lastChar + 1); - return currentDevice.substring(0, currentDevice.length() - 1) + incrementedChar; - } - - private String getXmlForRbdDisk(KVMStoragePoolManager storagePoolMgr, PrimaryDataStoreTO volumePool, String volumePath, String deviceToAttachDiskTo, String cacheMode) { - StringBuilder diskBuilder = new StringBuilder(); - diskBuilder.append("\n\n"); - - diskBuilder.append(" \n"); - - diskBuilder.append("\n"); - for (String sourceHost : volumePool.getHost().split(",")) { - diskBuilder.append("\n"); - } - diskBuilder.append("\n"); - String authUserName = null; - final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); - if (primaryPool != null) { - authUserName = primaryPool.getAuthUserName(); - } - if (StringUtils.isNotBlank(authUserName)) { - diskBuilder.append("\n"); - diskBuilder.append("\n"); - diskBuilder.append("\n"); - } - diskBuilder.append("\n"); - diskBuilder.append("\n"); - return diskBuilder.toString(); - } - - private void fetchBackupFile(String hostName, String backupPath) { - int mkdirExit = Script.runSimpleBashScriptForExitValue(String.format(MKDIR_P, backupPath)); - if (mkdirExit != 0) { - throw new CloudRuntimeException(String.format("Failed to create local backup directory: %s", backupPath)); - } - - String cmd = String.format(RSYNC_DIR_FROM_REMOTE, hostName, backupPath, backupPath); - logger.debug("Fetching commvault backup directory from remote host. cmd={}", cmd); - - int exit = Script.runSimpleBashScriptForExitValue(cmd); - if (exit != 0) { - throw new CloudRuntimeException(String.format( - "Failed to fetch backup directory from remote host [%s]. remotePath=[%s], localPath=[%s]", - hostName, backupPath, backupPath)); - } - } -} \ No newline at end of file diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultTakeBackupCommandWrapper.java deleted file mode 100644 index 277d38e8573d..000000000000 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCommvaultTakeBackupCommandWrapper.java +++ /dev/null @@ -1,91 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// - -package com.cloud.hypervisor.kvm.resource.wrapper; - -import com.cloud.agent.api.Answer; -import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; -import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; -import com.cloud.hypervisor.kvm.storage.KVMStoragePool; -import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; -import com.cloud.resource.CommandWrapper; -import com.cloud.resource.ResourceWrapper; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.script.Script; -import org.apache.cloudstack.backup.BackupAnswer; -import org.apache.cloudstack.backup.CommvaultTakeBackupCommand; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; - -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -@ResourceWrapper(handles = CommvaultTakeBackupCommand.class) -public class LibvirtCommvaultTakeBackupCommandWrapper extends CommandWrapper { - private static final Integer EXIT_CLEANUP_FAILED = 20; - @Override - public Answer execute(CommvaultTakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { - final String vmName = command.getVmName(); - final String backupPath = command.getBackupPath(); - List volumePools = command.getVolumePools(); - final List volumePaths = command.getVolumePaths(); - KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); - - List diskPaths = new ArrayList<>(); - if (Objects.nonNull(volumePaths)) { - for (int idx = 0; idx < volumePaths.size(); idx++) { - PrimaryDataStoreTO volumePool = volumePools.get(idx); - String volumePath = volumePaths.get(idx); - if (volumePool.getPoolType() != Storage.StoragePoolType.RBD) { - diskPaths.add(volumePath); - } else { - KVMStoragePool volumeStoragePool = storagePoolMgr.getStoragePool(volumePool.getPoolType(), volumePool.getUuid()); - String rbdDestVolumeFile = KVMPhysicalDisk.RBDStringBuilder(volumeStoragePool, volumePath); - diskPaths.add(rbdDestVolumeFile); - } - } - } - - List commands = new ArrayList<>(); - commands.add(new String[]{ - libvirtComputingResource.getCvtBackupPath(), - "-o", "backup", - "-v", vmName, - "-p", backupPath, - "-q", command.getQuiesce() != null && command.getQuiesce() ? "true" : "false", - "-d", diskPaths.isEmpty() ? "" : String.join(",", diskPaths) - }); - - Pair result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout()); - - if (result.first() != 0) { - logger.debug("Failed to take VM backup"); - BackupAnswer answer = new BackupAnswer(command, false, null); - if (result.first() == EXIT_CLEANUP_FAILED) { - logger.debug("Backup cleanup failed"); - answer.setNeedsCleanup(true); - } - return answer; - } - - BackupAnswer answer = new BackupAnswer(command, true, "success"); - return answer; - } -} diff --git a/plugins/pom.xml b/plugins/pom.xml index f6537a107b9b..f7d4643c3446 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -63,7 +63,8 @@ backup/dummy backup/networker backup/nas - backup/commvault + backup/ablestack-nas + backup/ablestack-commvault ca/root-ca diff --git a/scripts/vm/hypervisor/kvm/ablestack_cvtbackup.sh b/scripts/vm/hypervisor/kvm/ablestack_cvtbackup.sh new file mode 100644 index 000000000000..b1139039c241 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/ablestack_cvtbackup.sh @@ -0,0 +1,446 @@ +#!/usr/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -eo pipefail + +# CloudStack B&R Commvault Backup and Recovery Tool for KVM + +# TODO: do libvirt/logging etc checks + +### Declare variables ### + +OP="" +VM="" +BACKUP_DIR="" +DISK_PATHS="" +QUIESCE="" +BACKUP_TYPE="FULL" +CHECKPOINT_NAME="" +PARENT_BACKUP_DIR="" +PARENT_CHECKPOINT_NAME="" +PARENT_CHECKPOINT_PATH="" +BACKUP_FILES="" +FORCED="false" +logFile="/var/log/cloudstack/agent/agent.log" + +EXIT_CLEANUP_FAILED=20 + +log() { + [[ "$verb" -eq 1 ]] && builtin echo "$@" + if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then + builtin echo -e "$(date '+%Y-%m-%d %H-%M-%S>')" "${@: 2}" >> "$logFile" + else + builtin echo "$(date '+%Y-%m-%d %H-%M-%S>')" "$@" >> "$logFile" + fi +} + +vercomp() { + local IFS=. + local i ver1=($1) ver2=($3) + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]]; then + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})); then + return 0 + elif ((10#${ver1[i]} < 10#${ver2[i]})); then + return 2 + fi + done + return 0 +} + +sanity_checks() { + hvVersion=$(virsh version | grep hypervisor | awk '{print $(NF)}') + libvVersion=$(virsh version | grep libvirt | awk '{print $(NF)}' | tail -n 1) + apiVersion=$(virsh version | grep API | awk '{print $(NF)}') + + vercomp "$hvVersion" ">=" "4.2.0" + hvStatus=$? + vercomp "$libvVersion" ">=" "7.2.0" + libvStatus=$? + + if [[ $hvStatus -eq 0 && $libvStatus -eq 0 ]]; then + log -ne "Success... [ QEMU: $hvVersion Libvirt: $libvVersion apiVersion: $apiVersion ]" + else + echo "Failure... Your QEMU version $hvVersion or libvirt version $libvVersion is unsupported. Consider upgrading to the required minimum version of QEMU: 4.2.0 and Libvirt: 7.2.0" + exit 1 + fi +} + +cleanup() { + local status=0 + rm -rf "$dest" || { echo "Failed to delete $dest"; status=1; } + if [[ $status -ne 0 ]]; then + echo "Backup cleanup failed" + exit $EXIT_CLEANUP_FAILED + fi +} + +split_csv() { + tr ',' '\n' <<< "$1" +} + +is_rbd_disk_path() { + local disk_path="$1" + [[ "$disk_path" == rbd:* || "$disk_path" == rbd/* ]] +} + +get_backup_file_by_index() { + local index="$1" + local fallback="$2" + if [[ -z "$BACKUP_FILES" ]]; then + echo "$fallback" + return + fi + local current=0 + while IFS= read -r value; do + if [[ "$current" -eq "$index" ]]; then + echo "$value" + return + fi + current=$((current + 1)) + done < <(split_csv "$BACKUP_FILES") + echo "$fallback" +} + +dump_checkpoint_xml() { + local vm_name="$1" + if [[ -n "$CHECKPOINT_NAME" ]]; then + virsh -c qemu:///system checkpoint-dumpxml --domain "$vm_name" --checkpointname "$CHECKPOINT_NAME" --no-domain > "$dest/checkpoints/$CHECKPOINT_NAME.xml" 2>/dev/null || true + fi +} + +redefine_checkpoint_if_needed() { + local vm_name="$1" + local checkpoint_file="$2" + if [[ -z "$PARENT_CHECKPOINT_NAME" || -z "$checkpoint_file" || ! -f "$checkpoint_file" ]]; then + return + fi + if virsh -c qemu:///system checkpoint-info --domain "$vm_name" --checkpointname "$PARENT_CHECKPOINT_NAME" > /dev/null 2>&1; then + return + fi + if ! virsh -c qemu:///system checkpoint-create --domain "$vm_name" --xmlfile "$checkpoint_file" --redefine > /dev/null 2>&1; then + echo "Failed to redefine checkpoint $PARENT_CHECKPOINT_NAME on domain $vm_name" + exit 1 + fi +} + + +parse_rbd_uri() { + local uri="$1" + log -ne "parse_rbd_uri called with uri=[$uri]" + + RBD_IMAGE="" + RBD_MON_HOST="" + RBD_USER="" + RBD_KEY="" + + if [[ "$uri" == rbd:* ]]; then + local payload="${uri#rbd:}" + RBD_IMAGE="${payload%%:*}" + + if [[ "$uri" =~ :mon_host=([^:]*) ]]; then + RBD_MON_HOST="${BASH_REMATCH[1]}" + RBD_MON_HOST="${RBD_MON_HOST//\\;/,}" + RBD_MON_HOST="${RBD_MON_HOST//\\:/:}" + fi + + if [[ "$uri" =~ :id=([^:]*) ]]; then + RBD_USER="${BASH_REMATCH[1]}" + fi + + if [[ "$uri" =~ :key=([^:]*) ]]; then + RBD_KEY="${BASH_REMATCH[1]}" + fi + elif [[ "$uri" == rbd/* ]]; then + RBD_IMAGE="$uri" + else + echo "Invalid RBD disk path: $uri" + cleanup + fi + + if [[ -z "$RBD_IMAGE" ]]; then + echo "Failed to parse RBD image from uri: $uri" + cleanup + fi + + log -ne "Parsed RBD uri -> IMAGE=[$RBD_IMAGE], MON=[$RBD_MON_HOST], USER=[$RBD_USER]" +} + +build_rbd_cmd() { + RBD_CMD=(rbd) + if [[ -n "$RBD_MON_HOST" ]]; then + RBD_CMD+=(-m "$RBD_MON_HOST") + fi + if [[ -n "$RBD_USER" ]]; then + RBD_CMD+=(--id "$RBD_USER") + fi + if [[ -n "$RBD_KEY" ]]; then + RBD_CMD+=(--key "$RBD_KEY") + fi +} + +write_rbd_backup_metadata() { + local backup_type="$1" + local checkpoint_name="$2" + local parent_checkpoint_name="$3" + + cat > "$dest/rbd-backup.meta" < "$dest/checkpoints/$checkpoint_name.meta" <" > "$dest/backup.xml" + local index=0 + for disk in $(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/{print $3}'); do + local target_file="$dest/$(get_backup_file_by_index "$index")" + echo "" >> "$dest/backup.xml" + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + echo "$PARENT_CHECKPOINT_NAME" >> "$dest/backup.xml" + fi + echo "" >> "$dest/backup.xml" + index=$((index + 1)) + done + echo "" >> "$dest/backup.xml" + + echo "$CHECKPOINT_NAME" > "$dest/checkpoint.xml" + for disk in $(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/{print $3}'); do + echo "" >> "$dest/checkpoint.xml" + done + echo "" >> "$dest/checkpoint.xml" + + local thaw=0 + if [[ ${QUIESCE} == "true" ]]; then + if virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-freeze"}' > /dev/null 2>/dev/null; then + thaw=1 + fi + fi + + local backup_begin=0 + if virsh -c qemu:///system backup-begin --domain "$VM" --backupxml "$dest/backup.xml" --checkpointxml "$dest/checkpoint.xml" > /dev/null 2>&1; then + backup_begin=1 + fi + + if [[ $thaw -eq 1 ]]; then + virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-thaw"}' > /dev/null 2>&1 || true + fi + + if [[ $backup_begin -ne 1 ]]; then + cleanup + exit 1 + fi + + while true; do + status=$(virsh -c qemu:///system domjobinfo "$VM" --completed --keep-completed | awk '/Job type:/ {print $3}') + case "$status" in + Completed) break ;; + Failed) echo "Virsh backup job failed"; cleanup ;; + esac + sleep 5 + done + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_BACKUP_DIR" ]]; then + while IFS= read -r backup_file; do + [[ -z "$backup_file" ]] && continue + qemu-img rebase -u -F qcow2 -b "$PARENT_BACKUP_DIR/$backup_file" "$dest/$backup_file" > /dev/null 2>&1 || true + done < <(split_csv "$BACKUP_FILES") + fi + + dump_checkpoint_xml "$VM" + rm -f "$dest/backup.xml" "$dest/checkpoint.xml" + sync +} + +backup_rbd_volumes() { + mkdir -p "$dest/checkpoints" || { echo "Failed to create backup directory $dest"; exit 1; } + local index=0 + while IFS= read -r disk_path; do + [[ -z "$disk_path" ]] && continue + local created_snapshot="" + log -ne "Loop disk raw value=[$disk_path]" + parse_rbd_uri "$disk_path" + build_rbd_cmd + log -ne "Built RBD command: ${RBD_CMD[*]}" + + local output_file="$dest/$(get_backup_file_by_index "$index" "${RBD_IMAGE##*/}.raw")" + log -ne "Starting RBD backup for disk path [$disk_path], resolved image [$RBD_IMAGE], output [$output_file]" + + if ! timeout 30s "${RBD_CMD[@]}" info "$RBD_IMAGE" >> "$logFile" 2>&1; then + echo "Failed to access RBD image $RBD_IMAGE" + cleanup + fi + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 30s "${RBD_CMD[@]}" snap ls "$RBD_IMAGE" 2>>"$logFile" | awk 'NR>1 {print $2}' | grep -Fxq "$PARENT_CHECKPOINT_NAME"; then + echo "Parent RBD snapshot ${RBD_IMAGE}@${PARENT_CHECKPOINT_NAME} not found for incremental backup" + cleanup + fi + fi + + if ! timeout 30s "${RBD_CMD[@]}" snap create "${RBD_IMAGE}@${CHECKPOINT_NAME}" >> "$logFile" 2>&1; then + echo "Failed to create RBD snapshot ${RBD_IMAGE}@${CHECKPOINT_NAME}" + cleanup + fi + created_snapshot="${RBD_IMAGE}@${CHECKPOINT_NAME}" + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 6h "${RBD_CMD[@]}" export-diff --from-snap "$PARENT_CHECKPOINT_NAME" "${RBD_IMAGE}@${CHECKPOINT_NAME}" "$output_file" >> "$logFile" 2>&1; then + echo "Failed to export incremental RBD diff for ${RBD_IMAGE}@${CHECKPOINT_NAME}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + else + if ! timeout 6h "${RBD_CMD[@]}" export "${RBD_IMAGE}@${CHECKPOINT_NAME}" "$output_file" >> "$logFile" 2>&1; then + echo "Failed to export full RBD snapshot ${RBD_IMAGE}@${CHECKPOINT_NAME}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + fi + + log -ne "Finished exporting backup file [$output_file] size=[$(stat -c %s "$output_file" 2>/dev/null)]" + index=$((index + 1)) + done < <(split_csv "$DISK_PATHS") + + write_rbd_backup_metadata "$BACKUP_TYPE" "$CHECKPOINT_NAME" "$PARENT_CHECKPOINT_NAME" + write_rbd_checkpoint_metadata "$CHECKPOINT_NAME" "$PARENT_CHECKPOINT_NAME" +} + +has_child_backup() { + local checkpoint_name="$1" + [[ -z "$checkpoint_name" ]] && return 1 + grep -R -q "^parent_checkpoint_name=$checkpoint_name$" "$(dirname "$dest")"/*/rbd-backup.meta 2>/dev/null +} + +delete_rbd_snapshot_if_unreferenced() { + local disk_paths="$1" + local checkpoint_name="$2" + + [[ -z "$checkpoint_name" ]] && return 0 + + if has_child_backup "$checkpoint_name"; then + log -ne "Skip snapshot delete [$checkpoint_name] (child exists)" + return 0 + fi + + while IFS= read -r disk_path; do + [[ -z "$disk_path" ]] && continue + parse_rbd_uri "$disk_path" + build_rbd_cmd + + if timeout 30s "${RBD_CMD[@]}" snap ls "$RBD_IMAGE" 2>/dev/null | awk 'NR>1 {print $2}' | grep -Fxq "$checkpoint_name"; then + log -ne "Deleting snapshot [${RBD_IMAGE}@${checkpoint_name}]" + "${RBD_CMD[@]}" snap rm "${RBD_IMAGE}@${checkpoint_name}" >> "$logFile" 2>&1 || true + fi + done < <(split_csv "$disk_paths") +} + +delete_backup() { + if [[ -f "$dest/rbd-backup.meta" ]]; then + source "$dest/rbd-backup.meta" + + log -ne "Deleting backup with metadata [$dest]" + + if [[ "$FORCED" != "true" ]] && has_child_backup "$checkpoint_name"; then + echo "Cannot delete backup [$backup_dir]: child backup exists" + exit 1 + fi + + delete_rbd_snapshot_if_unreferenced "$disk_paths" "$checkpoint_name" + elif [[ -n "$CHECKPOINT_NAME" && -n "$DISK_PATHS" ]]; then + log -ne "Deleting backup using command metadata [$dest]" + delete_rbd_snapshot_if_unreferenced "$DISK_PATHS" "$CHECKPOINT_NAME" + fi + + rm -frv "$dest" + sync +} + +usage() { + echo "" + echo "Usage: $0 -o -v|--vm -p -b -c -r -i -j -f -d -q|--quiesce " + echo "" + exit 1 +} + +while [[ $# -gt 0 ]]; do + case $1 in + -o|--operation) OP="$2"; shift; shift ;; + -v|--vm) VM="$2"; shift; shift ;; + -p|--path) BACKUP_DIR="$2"; shift; shift ;; + -b|--backuptype) BACKUP_TYPE="$2"; shift; shift ;; + -c|--checkpoint) CHECKPOINT_NAME="$2"; shift; shift ;; + -r|--parentbackup) PARENT_BACKUP_DIR="$2"; shift; shift ;; + -i|--parentcheckpoint) PARENT_CHECKPOINT_NAME="$2"; shift; shift ;; + -j|--parentcheckpointpath) PARENT_CHECKPOINT_PATH="$2"; shift; shift ;; + -f|--backupfiles) BACKUP_FILES="$2"; shift; shift ;; + -q|--quiesce) QUIESCE="$2"; shift; shift ;; + -d|--diskpaths) DISK_PATHS="$2"; shift; shift ;; + -x|--forced) FORCED="$2"; shift; shift ;; + -h|--help) usage ;; + *) echo "Invalid option: $1"; usage ;; + esac +done + +if [[ -z "$BACKUP_DIR" ]]; then + echo "Backup path (-p|--path) is required" + exit 1 +fi + +dest="$BACKUP_DIR" +sanity_checks + +log -ne "ablestack_cvtbackup.sh start op=[$OP] vm=[$VM] backupDir=[$BACKUP_DIR] backupType=[$BACKUP_TYPE] checkpoint=[$CHECKPOINT_NAME] parentBackup=[$PARENT_BACKUP_DIR] parentCheckpoint=[$PARENT_CHECKPOINT_NAME] diskPaths=[$DISK_PATHS] backupFiles=[$BACKUP_FILES]" + +if [[ "$OP" == "backup-running" ]]; then + backup_running_vm +elif [[ "$OP" == "backup-rbd" ]]; then + backup_rbd_volumes +elif [[ "$OP" == "delete" ]]; then + delete_backup +else + echo "Unsupported operation: $OP" + exit 1 +fi diff --git a/scripts/vm/hypervisor/kvm/ablestack_nasbackup.sh b/scripts/vm/hypervisor/kvm/ablestack_nasbackup.sh new file mode 100755 index 000000000000..1960720ec7a9 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/ablestack_nasbackup.sh @@ -0,0 +1,626 @@ +#!/usr/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -eo pipefail + +# CloudStack B&R NAS Backup and Recovery Tool for KVM + +# TODO: do libvirt/logging etc checks + +### Declare variables ### + +OP="" +VM="" +NAS_TYPE="" +NAS_ADDRESS="" +MOUNT_OPTS="" +BACKUP_DIR="" +BACKUP_TYPE="" +CHECKPOINT_NAME="" +PARENT_BACKUP_DIR="" +PARENT_CHECKPOINT_NAME="" +PARENT_CHECKPOINT_PATH="" +BACKUP_FILES="" +DISK_PATHS="" +QUIESCE="" +FORCED="false" +logFile="/var/log/cloudstack/agent/agent.log" + +EXIT_CLEANUP_FAILED=20 + +log() { + [[ "$verb" -eq 1 ]] && builtin echo "$@" + if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then + builtin echo -e "$(date '+%Y-%m-%d %H-%M-%S>')" "${@: 2}" >> "$logFile" + else + builtin echo "$(date '+%Y-%m-%d %H-%M-%S>')" "$@" >> "$logFile" + fi +} + +vercomp() { + local IFS=. + local i ver1=($1) ver2=($3) + + # Compare each segment of the version numbers + for ((i=0; i<${#ver1[@]}; i++)); do + if [[ -z ${ver2[i]} ]]; then + ver2[i]=0 + fi + + if ((10#${ver1[i]} > 10#${ver2[i]})); then + return 0 # Version 1 is greater + elif ((10#${ver1[i]} < 10#${ver2[i]})); then + return 2 # Version 2 is greater + fi + done + return 0 # Versions are equal +} + +sanity_checks() { + hvVersion=$(virsh version | grep hypervisor | awk '{print $(NF)}') + libvVersion=$(virsh version | grep libvirt | awk '{print $(NF)}' | tail -n 1) + apiVersion=$(virsh version | grep API | awk '{print $(NF)}') + + vercomp "$hvVersion" ">=" "4.2.0" + hvStatus=$? + vercomp "$libvVersion" ">=" "7.2.0" + libvStatus=$? + + if [[ $hvStatus -eq 0 && $libvStatus -eq 0 ]]; then + log -ne "Success... [ QEMU: $hvVersion Libvirt: $libvVersion apiVersion: $apiVersion ]" + else + echo "Failure... Your QEMU version $hvVersion or libvirt version $libvVersion is unsupported. Consider upgrading to the required minimum version of QEMU: 4.2.0 and Libvirt: 7.2.0" + exit 1 + fi +} + +### Operation methods ### + +backup_running_vm() { + mount_operation + mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } + mkdir -p "$dest/checkpoints" || { echo "Failed to create checkpoint directory $dest/checkpoints"; exit 1; } + + local parent_checkpoint_file="" + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_PATH" ]]; then + parent_checkpoint_file="$mount_point/$PARENT_CHECKPOINT_PATH" + redefine_checkpoint_if_needed "$VM" "$parent_checkpoint_file" + fi + + echo "" > "$dest/backup.xml" + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + echo "$PARENT_CHECKPOINT_NAME" >> "$dest/backup.xml" + fi + echo "" >> "$dest/backup.xml" + echo "$CHECKPOINT_NAME" > "$dest/checkpoint.xml" + local index=0 + while IFS='|' read -r disk target; do + [[ -z "$disk" ]] && continue + local backup_file + backup_file=$(get_backup_file_by_index "$index" "$(basename "$target").qcow2") + echo "" >> "$dest/backup.xml" + echo "" >> "$dest/checkpoint.xml" + index=$((index + 1)) + done < <(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/ {print $3 "|" $4}') + echo "" >> "$dest/backup.xml" + echo "" >> "$dest/checkpoint.xml" + + local thaw=0 + if [[ ${QUIESCE} == "true" ]]; then + if virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-freeze"}' > /dev/null 2>/dev/null; then + thaw=1 + fi + fi + + # Start push backup + local backup_begin=0 + if virsh -c qemu:///system backup-begin --domain "$VM" --backupxml "$dest/backup.xml" --checkpointxml "$dest/checkpoint.xml" 2>&1 > /dev/null; then + backup_begin=1; + fi + + if [[ $thaw -eq 1 ]]; then + if ! response=$(virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-thaw"}' 2>&1 > /dev/null); then + echo "Failed to thaw the filesystem for vm $VM: $response" + cleanup + exit 1 + fi + fi + + if [[ $backup_begin -ne 1 ]]; then + cleanup + exit 1 + fi + + backup_domain_information "$VM" + + while true; do + status=$(virsh -c qemu:///system domjobinfo "$VM" --completed --keep-completed | awk '/Job type:/ {print $3}') + case "$status" in + Completed) + break ;; + Failed) + echo "Virsh backup job failed" + cleanup ;; + esac + sleep 5 + done + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_BACKUP_DIR" ]]; then + local index=0 + while IFS='|' read -r disk target; do + [[ -z "$disk" ]] && continue + local backup_file + backup_file=$(get_backup_file_by_index "$index" "$(basename "$target").qcow2") + output="$dest/$backup_file" + parent="../$(basename "$PARENT_BACKUP_DIR")/$backup_file" + if ! qemu-img rebase -u -F qcow2 -b "$parent" "$output" > "$logFile" 2> >(cat >&2); then + echo "qemu-img rebase failed for $output with parent $parent" + cleanup + fi + index=$((index + 1)) + done < <(virsh -c qemu:///system domblklist "$VM" --details 2>/dev/null | awk '/disk/ {print $3 "|" $4}') + fi + + dump_checkpoint_xml "$VM" + rm -f "$dest/backup.xml" + rm -f "$dest/checkpoint.xml" + sync + + # Print statistics + virsh -c qemu:///system domjobinfo "$VM" --completed + du -sb "$dest" | cut -f1 + + umount "$mount_point" + rmdir "$mount_point" +} + +backup_rbd_volumes() { + log -ne "Entered backup_rbd_volumes with DISK_PATHS=[$DISK_PATHS], BACKUP_FILES=[$BACKUP_FILES], BACKUP_DIR=[$BACKUP_DIR]" + mount_operation + mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } + + backup_domain_information "$VM" + + local index=0 + while IFS= read -r disk; do + local created_snapshot="" + log -ne "Loop disk raw value=[$disk]" + [[ -z "$disk" ]] && continue + + parse_rbd_uri "$disk" + log -ne "Parsed disk [$disk] -> RBD_IMAGE=[$RBD_IMAGE], MON=[$RBD_MON_HOST], USER=[$RBD_USER]" + + if [[ -z "$RBD_IMAGE" ]]; then + echo "Unable to parse RBD disk path: $disk" + cleanup + fi + + build_rbd_cmd + log -ne "Built RBD command: ${RBD_CMD[*]}" + + local backup_file + backup_file=$(get_backup_file_by_index "$index" "${RBD_IMAGE##*/}.raw") + local output="$dest/$backup_file" + local current_snapshot="${CHECKPOINT_NAME}" + + log -ne "Resolved backup file [$backup_file], destination [$output]" + log -ne "Starting RBD backup for disk path [$disk], resolved image [$RBD_IMAGE], output [$output]" + + if ! timeout 30s "${RBD_CMD[@]}" info "$RBD_IMAGE" >> "$logFile" 2>&1; then + echo "Failed to access RBD image $RBD_IMAGE" + cleanup + fi + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 30s "${RBD_CMD[@]}" snap ls "$RBD_IMAGE" 2>>"$logFile" | awk 'NR>1 {print $2}' | grep -Fxq "$PARENT_CHECKPOINT_NAME"; then + echo "Parent RBD snapshot ${RBD_IMAGE}@${PARENT_CHECKPOINT_NAME} not found for incremental backup" + cleanup + fi + fi + + if ! timeout 30s "${RBD_CMD[@]}" snap create "${RBD_IMAGE}@${current_snapshot}" >> "$logFile" 2>&1; then + echo "Failed to create RBD snapshot ${RBD_IMAGE}@${current_snapshot}" + cleanup + fi + created_snapshot="${RBD_IMAGE}@${current_snapshot}" + + if [[ "$BACKUP_TYPE" == "INCREMENTAL" && -n "$PARENT_CHECKPOINT_NAME" ]]; then + if ! timeout 6h "${RBD_CMD[@]}" export-diff --from-snap "$PARENT_CHECKPOINT_NAME" "${RBD_IMAGE}@${current_snapshot}" "$output" >> "$logFile" 2>&1; then + echo "Failed to export incremental RBD diff for ${RBD_IMAGE}@${current_snapshot}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + else + if ! timeout 6h "${RBD_CMD[@]}" export "${RBD_IMAGE}@${current_snapshot}" "$output" >> "$logFile" 2>&1; then + echo "Failed to export full RBD snapshot ${RBD_IMAGE}@${current_snapshot}" + [[ -n "$created_snapshot" ]] && "${RBD_CMD[@]}" snap rm "$created_snapshot" >> "$logFile" 2>&1 || true + cleanup + fi + fi + + log -ne "Finished exporting backup file [$output] size=[$(stat -c %s "$output" 2>/dev/null)]" + stat -c %s "$output" + index=$((index + 1)) + done < <(split_csv "$DISK_PATHS") + + write_rbd_backup_metadata "$BACKUP_TYPE" "$CHECKPOINT_NAME" "$PARENT_CHECKPOINT_NAME" + + sync + log -ne "RBD backup completed checkpoint=[$CHECKPOINT_NAME] parent=[$PARENT_CHECKPOINT_NAME]" + umount "$mount_point" + rmdir "$mount_point" +} + +backup_domain_information() { + local vm_name="$1" + + [[ -z "$vm_name" ]] && return 0 + + mkdir -p "$dest/checkpoints" || { + echo "Failed to create checkpoint directory $dest/checkpoints" + exit 1 + } + + if virsh -c qemu:///system dominfo "$vm_name" > /dev/null 2>&1; then + virsh -c qemu:///system dumpxml "$vm_name" > "$dest/domain-config.xml" 2>/dev/null || true + virsh -c qemu:///system dominfo "$vm_name" > "$dest/dominfo.xml" 2>/dev/null || true + virsh -c qemu:///system domiflist "$vm_name" > "$dest/domiflist.xml" 2>/dev/null || true + virsh -c qemu:///system domblklist "$vm_name" > "$dest/domblklist.xml" 2>/dev/null || true + + if [[ -n "$CHECKPOINT_NAME" ]]; then + cat > "$dest/checkpoints/$CHECKPOINT_NAME.meta" </dev/null +} + +delete_rbd_snapshot_if_unreferenced() { + local disk_paths="$1" + local checkpoint_name="$2" + + [[ -z "$checkpoint_name" ]] && return 0 + + if has_child_backup "$checkpoint_name"; then + log -ne "Skip snapshot delete [$checkpoint_name] (child exists)" + return 0 + fi + + while IFS= read -r disk; do + [[ -z "$disk" ]] && continue + parse_rbd_uri "$disk" + build_rbd_cmd + + if [[ -n "$RBD_IMAGE" ]]; then + log -ne "Deleting snapshot [${RBD_IMAGE}@${checkpoint_name}]" + "${RBD_CMD[@]}" snap rm "${RBD_IMAGE}@${checkpoint_name}" >> "$logFile" 2>&1 || true + fi + done < <(split_csv "$disk_paths") +} + +delete_backup() { + mount_operation + + if [[ -f "$dest/rbd-backup.meta" ]]; then + source "$dest/rbd-backup.meta" + + log -ne "Deleting backup with metadata [$dest]" + + if [[ "$FORCED" != "true" ]] && has_child_backup "$checkpoint_name"; then + echo "Cannot delete backup [$backup_dir]: child backup exists" + umount "$mount_point" + rmdir "$mount_point" + exit 1 + fi + + delete_rbd_snapshot_if_unreferenced "$disk_paths" "$checkpoint_name" + elif [[ -n "$CHECKPOINT_NAME" && -n "$DISK_PATHS" ]]; then + log -ne "Deleting backup using command metadata [$dest]" + delete_rbd_snapshot_if_unreferenced "$DISK_PATHS" "$CHECKPOINT_NAME" + fi + + rm -frv $dest + sync + umount $mount_point + rmdir $mount_point +} + +get_backup_stats() { + mount_operation + + echo $mount_point + df -P $mount_point 2>/dev/null | awk 'NR==2 {print $2, $3}' + umount $mount_point + rmdir $mount_point +} + +mount_operation() { + mount_point=$(mktemp -d -t csbackup.XXXXX) + dest="$mount_point/${BACKUP_DIR}" + if [ ${NAS_TYPE} == "cifs" ]; then + MOUNT_OPTS="${MOUNT_OPTS},nobrl" + fi + mount -t ${NAS_TYPE} ${NAS_ADDRESS} ${mount_point} $([[ ! -z "${MOUNT_OPTS}" ]] && echo -o ${MOUNT_OPTS}) 2>&1 | tee -a "$logFile" + if [ $? -eq 0 ]; then + log -ne "Successfully mounted ${NAS_TYPE} store" + else + echo "Failed to mount ${NAS_TYPE} store" + exit 1 + fi +} + +cleanup() { + local status=0 + + rm -rf "$dest" || { echo "Failed to delete $dest"; status=1; } + umount "$mount_point" || { echo "Failed to unmount $mount_point"; status=1; } + rmdir "$mount_point" || { echo "Failed to remove mount point $mount_point"; status=1; } + + if [[ $status -ne 0 ]]; then + echo "Backup cleanup failed" + exit $EXIT_CLEANUP_FAILED + fi +} + +split_csv() { + tr ',' '\n' <<< "$1" +} + +is_rbd_disk_path() { + local disk_path="$1" + [[ "$disk_path" == rbd:* || "$disk_path" == rbd/* ]] +} + +get_backup_file_by_index() { + local index="$1" + local fallback="$2" + if [[ -z "$BACKUP_FILES" ]]; then + echo "$fallback" + return + fi + local current=0 + while IFS= read -r value; do + if [[ "$current" -eq "$index" ]]; then + echo "$value" + return + fi + current=$((current + 1)) + done < <(split_csv "$BACKUP_FILES") + echo "$fallback" +} + +dump_checkpoint_xml() { + local vm_name="$1" + if [[ -n "$CHECKPOINT_NAME" ]]; then + virsh -c qemu:///system checkpoint-dumpxml --domain "$vm_name" --checkpointname "$CHECKPOINT_NAME" --no-domain > "$dest/checkpoints/$CHECKPOINT_NAME.xml" 2>/dev/null || true + fi +} + +redefine_checkpoint_if_needed() { + local vm_name="$1" + local checkpoint_file="$2" + if [[ -z "$PARENT_CHECKPOINT_NAME" || -z "$checkpoint_file" || ! -f "$checkpoint_file" ]]; then + return + fi + if virsh -c qemu:///system checkpoint-info --domain "$vm_name" --checkpointname "$PARENT_CHECKPOINT_NAME" > /dev/null 2>&1; then + return + fi + if ! virsh -c qemu:///system checkpoint-create --domain "$vm_name" --xmlfile "$checkpoint_file" --redefine > /dev/null 2>&1; then + echo "Failed to redefine checkpoint $PARENT_CHECKPOINT_NAME on domain $vm_name" + cleanup + fi +} + +parse_rbd_uri() { + local uri="$1" + log -ne "parse_rbd_uri called with uri=[$uri]" + + RBD_IMAGE="" + RBD_MON_HOST="" + RBD_USER="" + RBD_KEY="" + + if [[ "$uri" == rbd:* ]]; then + local payload="${uri#rbd:}" + RBD_IMAGE="${payload%%:*}" + + if [[ "$uri" =~ :mon_host=([^:]*) ]]; then + RBD_MON_HOST="${BASH_REMATCH[1]}" + RBD_MON_HOST="${RBD_MON_HOST//\\;/,}" + RBD_MON_HOST="${RBD_MON_HOST//\\:/:}" + fi + + if [[ "$uri" =~ :id=([^:]*) ]]; then + RBD_USER="${BASH_REMATCH[1]}" + fi + + if [[ "$uri" =~ :key=([^:]*) ]]; then + RBD_KEY="${BASH_REMATCH[1]}" + fi + elif [[ "$uri" == rbd/* ]]; then + RBD_IMAGE="$uri" + else + echo "Invalid RBD disk path: $uri" + cleanup + fi + + if [[ -z "$RBD_IMAGE" ]]; then + echo "Failed to parse RBD image from uri: $uri" + cleanup + fi + + log -ne "Parsed RBD uri -> IMAGE=[$RBD_IMAGE], MON=[$RBD_MON_HOST], USER=[$RBD_USER]" +} + +build_rbd_cmd() { + RBD_CMD=(rbd) + if [[ -n "$RBD_MON_HOST" ]]; then + RBD_CMD+=(-m "$RBD_MON_HOST") + fi + if [[ -n "$RBD_USER" ]]; then + RBD_CMD+=(--id "$RBD_USER") + fi + if [[ -n "$RBD_KEY" ]]; then + RBD_CMD+=(--key "$RBD_KEY") + fi +} + +write_rbd_backup_metadata() { + local backup_type="$1" + local checkpoint_name="$2" + local parent_checkpoint_name="$3" + + cat > "$dest/rbd-backup.meta" < -v|--vm -t -s -m -p -b -c -r -i -j -f -d -q|--quiesce -x|--forced " + echo "" + exit 1 +} + +while [[ $# -gt 0 ]]; do + case $1 in + -o|--operation) + OP="$2" + shift + shift + ;; + -v|--vm) + VM="$2" + shift + shift + ;; + -t|--type) + NAS_TYPE="$2" + shift + shift + ;; + -s|--storage) + NAS_ADDRESS="$2" + shift + shift + ;; + -m|--mount) + MOUNT_OPTS="$2" + shift + shift + ;; + -p|--path) + BACKUP_DIR="$2" + shift + shift + ;; + -b|--backuptype) + BACKUP_TYPE="$2" + shift + shift + ;; + -c|--checkpoint) + CHECKPOINT_NAME="$2" + shift + shift + ;; + -r|--parentpath) + PARENT_BACKUP_DIR="$2" + shift + shift + ;; + -i|--parentcheckpoint) + PARENT_CHECKPOINT_NAME="$2" + shift + shift + ;; + -j|--parentcheckpointpath) + PARENT_CHECKPOINT_PATH="$2" + shift + shift + ;; + -f|--backupfiles) + BACKUP_FILES="$2" + shift + shift + ;; + -q|--quiesce) + QUIESCE="$2" + shift + shift + ;; + -x|--forced) + FORCED="$2" + shift + shift + ;; + -d|--diskpaths) + DISK_PATHS="$2" + shift + shift + ;; + -h|--help) + usage + shift + ;; + *) + echo "Invalid option: $1" + usage + ;; + esac +done + +# Perform Initial sanity checks +sanity_checks + +log -ne "nasbackup.sh start op=[$OP] vm=[$VM] backupDir=[$BACKUP_DIR] backupType=[$BACKUP_TYPE] checkpoint=[$CHECKPOINT_NAME] parentBackup=[$PARENT_BACKUP_DIR] parentCheckpoint=[$PARENT_CHECKPOINT_NAME] diskPaths=[$DISK_PATHS] backupFiles=[$BACKUP_FILES]" + +if [ "$OP" = "backup-running" ]; then + backup_running_vm +elif [ "$OP" = "backup-rbd" ]; then + backup_rbd_volumes +elif [ "$OP" = "delete" ]; then + delete_backup +elif [ "$OP" = "stats" ]; then + get_backup_stats +fi diff --git a/scripts/vm/hypervisor/kvm/cvtbackup.sh b/scripts/vm/hypervisor/kvm/cvtbackup.sh deleted file mode 100644 index 0493654fce02..000000000000 --- a/scripts/vm/hypervisor/kvm/cvtbackup.sh +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -eo pipefail - -# CloudStack B&R Commvault Backup and Recovery Tool for KVM - -# TODO: do libvirt/logging etc checks - -### Declare variables ### - -OP="" -VM="" -BACKUP_DIR="" -DISK_PATHS="" -QUIESCE="" -logFile="/var/log/cloudstack/agent/agent.log" - -EXIT_CLEANUP_FAILED=20 - -log() { - [[ "$verb" -eq 1 ]] && builtin echo "$@" - if [[ "$1" == "-ne" || "$1" == "-e" || "$1" == "-n" ]]; then - builtin echo -e "$(date '+%Y-%m-%d %H-%M-%S>')" "${@: 2}" >> "$logFile" - else - builtin echo "$(date '+%Y-%m-%d %H-%M-%S>')" "$@" >> "$logFile" - fi -} - -vercomp() { - local IFS=. - local i ver1=($1) ver2=($3) - - # Compare each segment of the version numbers - for ((i=0; i<${#ver1[@]}; i++)); do - if [[ -z ${ver2[i]} ]]; then - ver2[i]=0 - fi - - if ((10#${ver1[i]} > 10#${ver2[i]})); then - return 0 # Version 1 is greater - elif ((10#${ver1[i]} < 10#${ver2[i]})); then - return 2 # Version 2 is greater - fi - done - return 0 # Versions are equal -} - -sanity_checks() { - hvVersion=$(virsh version | grep hypervisor | awk '{print $(NF)}') - libvVersion=$(virsh version | grep libvirt | awk '{print $(NF)}' | tail -n 1) - apiVersion=$(virsh version | grep API | awk '{print $(NF)}') - - # Compare qemu version (hvVersion >= 4.2.0) - vercomp "$hvVersion" ">=" "4.2.0" - hvStatus=$? - - # Compare libvirt version (libvVersion >= 7.2.0) - vercomp "$libvVersion" ">=" "7.2.0" - libvStatus=$? - - if [[ $hvStatus -eq 0 && $libvStatus -eq 0 ]]; then - log -ne "Success... [ QEMU: $hvVersion Libvirt: $libvVersion apiVersion: $apiVersion ]" - else - echo "Failure... Your QEMU version $hvVersion or libvirt version $libvVersion is unsupported. Consider upgrading to the required minimum version of QEMU: 4.2.0 and Libvirt: 7.2.0" - exit 1 - fi - - log -ne "Environment Sanity Checks successfully passed" -} - -### Operation methods ### - -backup_running_vm() { - mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } - - name="root" - echo "" > $dest/backup.xml - for disk in $(virsh -c qemu:///system domblklist $VM --details 2>/dev/null | awk '/disk/{print$3}'); do - volpath=$(virsh -c qemu:///system domblklist $VM --details | awk "/$disk/{print $4}" | sed 's/.*\///') - echo "" >> $dest/backup.xml - name="datadisk" - done - echo "" >> $dest/backup.xml - - local thaw=0 - if [[ ${QUIESCE} == "true" ]]; then - log -ne "Pause option is enabled on a running virtual machine" - if virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-freeze"}' > /dev/null 2>/dev/null; then - thaw=1 - fi - fi - - # Start push backup - local backup_begin=0 - if virsh -c qemu:///system backup-begin --domain $VM --backupxml $dest/backup.xml 2>&1 > /dev/null; then - backup_begin=1; - fi - - if [[ $thaw -eq 1 ]]; then - if ! response=$(virsh -c qemu:///system qemu-agent-command "$VM" '{"execute":"guest-fsfreeze-thaw"}' 2>&1 > /dev/null); then - echo "Failed to thaw the filesystem for vm $VM: $response" - cleanup - exit 1 - fi - fi - - if [[ $backup_begin -ne 1 ]]; then - cleanup - exit 1 - fi - - # Backup domain information - virsh -c qemu:///system dumpxml $VM > $dest/domain-config.xml 2>/dev/null - virsh -c qemu:///system dominfo $VM > $dest/dominfo.xml 2>/dev/null - virsh -c qemu:///system domiflist $VM > $dest/domiflist.xml 2>/dev/null - virsh -c qemu:///system domblklist $VM > $dest/domblklist.xml 2>/dev/null - - while true; do - status=$(virsh -c qemu:///system domjobinfo $VM --completed --keep-completed | awk '/Job type:/ {print $3}') - case "$status" in - Completed) - break ;; - Failed) - echo "Virsh backup job failed" - cleanup ;; - esac - sleep 5 - done - sync - -} - -backup_stopped_vm() { - mkdir -p "$dest" || { echo "Failed to create backup directory $dest"; exit 1; } - - IFS="," - - name="root" - for disk in $DISK_PATHS; do - if [[ "$disk" == rbd:* ]]; then - # disk for rbd => rbd:/:mon_host=... - # sample: rbd:cloudstack/53d5c355-d726-4d3e-9422-046a503a0b12:mon_host=10.0.1.2... - beforeUuid="${disk#*/}" # Remove up to first slash after rbd: - volUuid="${beforeUuid%%:*}" # Remove everything after colon to get the uuid - else - volUuid="${disk##*/}" - fi - output="$dest/$name.$volUuid.qcow2" - if ! qemu-img convert -O qcow2 "$disk" "$output" > "$logFile" 2> >(cat >&2); then - echo "qemu-img convert failed for $disk $output" - cleanup - fi - name="datadisk" - done - sync - -} - -cleanup() { - local status=0 - - rm -rf "$dest" || { echo "Failed to delete $dest"; status=1; } - - if [[ $status -ne 0 ]]; then - echo "Backup cleanup failed" - exit $EXIT_CLEANUP_FAILED - fi -} - -function usage { - echo "" - echo "Usage: $0 -o -v|--vm -p -d -q|--quiesce " - echo "" - exit 1 -} - -while [[ $# -gt 0 ]]; do - case $1 in - -o|--operation) - OP="$2" - shift - shift - ;; - -v|--vm) - VM="$2" - shift - shift - ;; - -p|--path) - BACKUP_DIR="$2" - shift - shift - ;; - -q|--quiesce) - QUIESCE="$2" - shift - shift - ;; - -d|--diskpaths) - DISK_PATHS="$2" - shift - shift - ;; - -h|--help) - usage - shift - ;; - *) - echo "Invalid option: $1" - usage - ;; - esac -done - -if [[ -z "$BACKUP_DIR" ]]; then - echo "Backup path (-p|--path) is required" - exit 1 -fi - -dest="$BACKUP_DIR" - -# Perform Initial sanity checks -sanity_checks - -if [[ "$OP" != "backup" ]]; then - echo "Unsupported operation: $OP" - exit 1 -fi - -STATE=$(virsh -c qemu:///system list | awk -v vm="$VM" '$2 == vm {print $3}') - -if [[ -n "$STATE" && "$STATE" == "running" ]]; then - backup_running_vm -else - backup_stopped_vm -fi - -exit 0 diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 37552192814c..07d03a91710b 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -53,7 +53,9 @@ import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; +import org.apache.cloudstack.secstorage.heuristics.HeuristicType; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; @@ -323,6 +325,15 @@ protected DataStore verifyHeuristicRulesForZone(VMTemplateVO template, Long zone return null; } + protected boolean isWritableImageStore(DataStore imageStore, Long zoneId) { + ImageStoreVO imageStoreVO = _imgStoreDao.findById(imageStore.getId()); + if (imageStoreVO == null) { + logger.warn("Unable to find image store [{}] in zone [{}] while validating heuristic rule selection.", imageStore, zoneId); + return false; + } + return !imageStoreVO.isReadonly(); + } + protected void standardImageStoreAllocation(List imageStores, VMTemplateVO template) { Set zoneSet = new HashSet(); Collections.shuffle(imageStores); diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index dc79c766072b..a070c8af20f4 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -29,6 +29,8 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.annotation.AnnotationService; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; @@ -179,6 +181,8 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme PrimaryDataStoreDao _storagePoolDao; @Inject private AnnotationDao annotationDao; + @Inject + private BackupDao backupDao; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -448,6 +452,10 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc throw new CloudRuntimeException("There are other active Instance Snapshot tasks on the Instance, please try again later"); } + if (backupDao.listByVmId(null, vmId).stream().anyMatch(backup -> Backup.Status.BackedUp.equals(backup.getStatus()))) { + throw new CloudRuntimeException("Creating Instance Snapshot failed because the Instance has a backup chain."); + } + VMSnapshot.Type vmSnapshotType = VMSnapshot.Type.Disk; if (snapshotMemory && userVmVo.getState() == VirtualMachine.State.Running) vmSnapshotType = VMSnapshot.Type.DiskAndMemory; diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index bc25f083e1b5..3566207a2316 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -272,9 +272,10 @@ public List listBackupProviderOfferings(final Long zoneId, final } List allOfferings = new ArrayList<>(); List providers = getBackupProvidersForZone(zoneId); + final String canonicalProviderName = BackupProviderNameUtils.canonicalize(providerName); for (BackupProvider provider : providers) { - if (provider.getName().equalsIgnoreCase(providerName)) { + if (provider.getName().equalsIgnoreCase(canonicalProviderName)) { try { logger.debug("Listing external backup offerings for provider {} in zone {}", provider.getName(), zoneId); List offerings = provider.listBackupOfferings(zoneId); @@ -294,7 +295,7 @@ public List listBackupProviderOfferings(final Long zoneId, final public BackupOffering importBackupOffering(final ImportBackupOfferingCmd cmd) { validateBackupForZone(cmd.getZoneId()); - String providerName = cmd.getProvider(); + String providerName = BackupProviderNameUtils.canonicalize(cmd.getProvider()); if (StringUtils.isEmpty(providerName)) { throw new CloudRuntimeException("Provider name must be specified"); } @@ -695,7 +696,9 @@ public BackupSchedule configureBackupSchedule(CreateBackupScheduleCmd cmd) { final int maxBackups = validateAndGetDefaultBackupRetentionIfRequired(cmd.getMaxBackups(), offering, vm); - if ((!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) && cmd.getQuiesceVM() != null) { + if (!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider()) && + cmd.getQuiesceVM() != null) { throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS, Commvault backup provider"); } @@ -897,7 +900,9 @@ public boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllo throw new CloudRuntimeException("The assigned backup offering does not allow ad-hoc user backup"); } - if ((!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) && cmd.getQuiesceVM() != null) { + if (!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider()) && + cmd.getQuiesceVM() != null) { throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS, Commvault backup provider"); } @@ -1497,7 +1502,8 @@ public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws Cl String host = null; String dataStore = null; - if (!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) { + if (!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider())) { Pair restoreInfo = getRestoreVolumeHostAndDatastore(vm); host = restoreInfo.first().getPrivateIpAddress(); dataStore = restoreInfo.second().getUuid(); @@ -1575,7 +1581,8 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, BackupProvider backupProvider = getBackupProvider(offering.getProvider()); VolumeVO backedUpVolume = volumeDao.findByUuid(backedUpVolumeUuid); Pair restoreInfo; - if ((!"nas".equals(offering.getProvider()) && !"commvault".equals(offering.getProvider())) || backedUpVolume == null) { + if ((!BackupProviderNameUtils.isNasFamily(offering.getProvider()) && + !BackupProviderNameUtils.isCommvaultFamily(offering.getProvider())) || backedUpVolume == null) { restoreInfo = getRestoreVolumeHostAndDatastore(vm); } else { restoreInfo = getRestoreVolumeHostAndDatastoreForNas(vm, backedUpVolume); @@ -1598,9 +1605,15 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, throw new CloudRuntimeException(String.format("Error restoring volume [%s] of VM [%s] to host [%s] using backup provider [%s] due to: [%s].", backedUpVolumeUuid, vm.getUuid(), host.getUuid(), backupProvider.getName(), result.second())); } - if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), backupVolumeInfo, - backedUpVolumeUuid, vm, datastore.getUuid(), backup)) { - throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s].", backedUpVolumeUuid, vm.getUuid())); + try { + if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), backupVolumeInfo, + backedUpVolumeUuid, vm, datastore.getUuid(), backup)) { + cleanupRestoredVolumeAfterAttachFailure(result.second()); + throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s].", backedUpVolumeUuid, vm.getUuid())); + } + } catch (Exception e) { + cleanupRestoredVolumeAfterAttachFailure(result.second()); + throw e; } return true; } @@ -1656,6 +1669,7 @@ public boolean deleteBackup(final Long backupId, final Boolean forced) { Long backupSize = backup.getSize() != null ? backup.getSize() : 0L; resourceLimitMgr.decrementResourceCount(backup.getAccountId(), Resource.ResourceType.backup_storage, backupSize); if (backupDao.remove(backup.getId())) { + backupDetailsDao.removeDetails(backup.getId()); checkAndGenerateUsageForLastBackupDeletedAfterOfferingRemove(vm, backup); return true; } else { @@ -1722,6 +1736,22 @@ private boolean attachVolumeToVM(Long zoneId, String restoredVolumeLocation, Bac } } + private void cleanupRestoredVolumeAfterAttachFailure(String restoredVolumeLocation) { + if (StringUtils.isBlank(restoredVolumeLocation)) { + return; + } + VolumeVO restoredVolume = volumeDao.findByUuid(restoredVolumeLocation); + if (restoredVolume == null) { + return; + } + try { + Account caller = CallContext.current() != null ? CallContext.current().getCallingAccount() : accountDao.findById(restoredVolume.getAccountId()); + volumeApiService.deleteVolume(restoredVolume.getId(), caller); + } catch (Exception e) { + logger.warn("Failed to cleanup restored volume {} after attach failure", restoredVolumeLocation, e); + } + } + private void checkAndGenerateUsageForLastBackupDeletedAfterOfferingRemove(VirtualMachine vm, Backup backup) { if (vm != null && (vm.getBackupOfferingId() == null || vm.getBackupOfferingId() != backup.getBackupOfferingId())) { @@ -1754,7 +1784,18 @@ public void validateBackupForZone(final Long zoneId) { @Override public List listBackupProviders() { - return backupProviders; + final List providers = new ArrayList<>(); + final Set seenProviders = new HashSet<>(); + for (final BackupProvider provider : backupProviders) { + if (provider == null) { + continue; + } + final String displayName = BackupProviderNameUtils.toDisplayName(provider.getName()); + if (seenProviders.add(displayName)) { + providers.add(provider); + } + } + return providers; } @Override @@ -1784,7 +1825,12 @@ public List getBackupProvidersForZone(final Long zoneId) { if (!StringUtils.isEmpty(trimmedName)) { try { BackupProvider provider = getBackupProvider(trimmedName); - providers.add(provider); + boolean exists = providers.stream().anyMatch(p -> + BackupProviderNameUtils.toDisplayName(p.getName()).equalsIgnoreCase( + BackupProviderNameUtils.toDisplayName(provider.getName()))); + if (!exists) { + providers.add(provider); + } } catch (CloudRuntimeException e) { logger.warn("Failed to load backup provider: " + trimmedName + " for zone: " + zoneId, e); } @@ -1800,10 +1846,11 @@ public BackupProvider getBackupProvider(final String name) { if (StringUtils.isEmpty(name)) { throw new CloudRuntimeException("Invalid backup provider name provided"); } - if (!backupProvidersMap.containsKey(name)) { - throw new CloudRuntimeException("Failed to find backup provider by the name: " + name); - } - return backupProvidersMap.get(name); + final String canonicalName = BackupProviderNameUtils.canonicalize(name); + if (!backupProvidersMap.containsKey(canonicalName)) { + throw new CloudRuntimeException("Failed to find backup provider by the name: " + canonicalName); + } + return backupProvidersMap.get(canonicalName); } @Override @@ -1856,6 +1903,8 @@ public ConfigKey[] getConfigKeys() { BackupProviderPlugin, BackupSyncPollingInterval, BackupEnableAttachDetachVolumes, + KvmIncrementalBackup, + BackupDeltaMax, DefaultMaxAccountBackups, DefaultMaxAccountBackupStorage, DefaultMaxProjectBackups, diff --git a/ui/src/components/view/DeployVMFromBackup.vue b/ui/src/components/view/DeployVMFromBackup.vue index b5769c3913a0..a406028914f7 100644 --- a/ui/src/components/view/DeployVMFromBackup.vue +++ b/ui/src/components/view/DeployVMFromBackup.vue @@ -475,9 +475,10 @@
0 ? this.preFillContent : {} + if (Array.isArray(this.dataPreFill.networkids) && this.dataPreFill.networkids.length > 0) { + this.form.networkids = [...this.dataPreFill.networkids] + if (!this.form.defaultnetworkid) { + this.defaultnetworkid = this.dataPreFill.networkids[0] + this.form.defaultnetworkid = this.dataPreFill.networkids[0] + } + } this.showOverrideDiskOfferingOption = this.dataPreFill.overridediskoffering if (this.dataPreFill.isIso) { @@ -1660,6 +1671,12 @@ export default { const param = this.params.networks this.fetchOptions(param, 'networks') }, + resetDefaultNetworkSelectionState () { + this.defaultnetworkid = '' + this.hasInitializedDefaultNetworkSelection = false + this.networkConfig = [] + this.form.defaultnetworkid = undefined + }, resetData () { this.vm = { name: null, @@ -1681,11 +1698,13 @@ export default { disksize: null } this.zoneSelected = false + this.hasInitializedDefaultNetworkSelection = false this.formRef.value.resetFields() this.fetchData() }, updateFieldValue (name, value) { if (name === 'templateid') { + this.resetDefaultNetworkSelectionState() this.tabKey = 'templateid' this.form.templateid = value this.form.isoid = null @@ -1723,6 +1742,7 @@ export default { } } } else if (name === 'isoid') { + this.resetDefaultNetworkSelectionState() this.templateConfigurations = [] this.selectedTemplateConfiguration = {} this.templateNics = [] @@ -1776,10 +1796,36 @@ export default { }, updateNetworks (ids) { this.form.networkids = ids + this.networks = this.getSelectedNetworksWithExistingConfig( + _.filter(this.options.networks, (option) => _.includes(ids, option.id)) + ) + if (!this.hasInitializedDefaultNetworkSelection && ids && ids.length > 0 && !this.defaultnetworkid) { + this.hasInitializedDefaultNetworkSelection = true + this.updateDefaultNetworks(ids[0]) + return + } + if (!ids || ids.length === 0 || !ids.includes(this.defaultnetworkid)) { + this.updateDefaultNetworks('') + } }, updateDefaultNetworks (id) { this.defaultnetworkid = id this.form.defaultnetworkid = id + + if (!id) { + return + } + + const existingIds = Array.isArray(this.form.networkids) ? [...this.form.networkids] : [] + + if (!existingIds.includes(id)) { + existingIds.unshift(id) + this.form.networkids = existingIds + } + + this.networks = this.getSelectedNetworksWithExistingConfig( + _.filter(this.options.networks, option => _.includes(this.form.networkids, option.id)) + ) }, updateNetworkConfig (networks) { this.networkConfig = networks @@ -2289,8 +2335,16 @@ export default { }, onTabChange (key, type) { this[type] = key + this.resetDefaultNetworkSelectionState() + if (key === 'isoid') { this.fetchAllIsos() + } else if (key === 'templateid') { + this.fetchAllTemplates() + } + + if (this.form.networkids && this.form.networkids.length > 0) { + this.updateNetworks(this.form.networkids) } }, fetchIsos (isoFilter, params) { diff --git a/ui/src/components/view/SearchFilter.vue b/ui/src/components/view/SearchFilter.vue index 34ca438b5c5b..1b38ae6820d9 100644 --- a/ui/src/components/view/SearchFilter.vue +++ b/ui/src/components/view/SearchFilter.vue @@ -56,7 +56,7 @@