From ac3fb34a7621afcc8a2ad89a99821adf704b5dc0 Mon Sep 17 00:00:00 2001 From: Alan Hanson Date: Tue, 19 Nov 2024 17:51:51 -0800 Subject: [PATCH 1/4] Update Crucible to latest (#7101) No Propolis changes other than to update Crucible Crucible changes are: Add debug/timeout to test_memory.sh (#1563) Consolidate ack checking (#1561) Rename for crutest: RegionInfo -> DiskInfo (#1562) Fix dtrace system level scripts (#1560) Remove `ackable_work`; ack immediately instead (#1552) No more New jobs, no more New jobs column (#1559) Remove delay-based backpressure in favor of explicit queue limits (#1515) Only send flushes when Downstairs is idle; send Barrier otherwise (#1505) Update Rust crate reqwest to v0.12.9 (#1536) Update Rust crate omicron-zone-package to 0.11.1 (#1535) Remove separate validation array (#1522) Remove more unnecessary `DsState` variants (#1550) Consolidate `DownstairsClient::reinitialize` (#1549) Update Rust crate uuid to v1.11.0 (#1546) Update Rust crate reedline to 0.36.0 (#1544) Update Rust crate bytes to v1.8.0 (#1541) Update Rust crate thiserror to v1.0.66 (#1539) Update Rust crate serde_json to v1.0.132 (#1538) Update Rust crate serde to v1.0.214 (#1537) Remove transient states in `DsState` (#1526) Update Rust crate libc to v0.2.161 (#1534) Update Rust crate futures to v0.3.31 (#1532) Update Rust crate clap to v4.5.20 (#1531) Update Rust crate async-trait to 0.1.83 (#1530) Update Rust crate anyhow to v1.0.92 (#1529) Remove obsolete crutest perf test (#1528) Update dependency rust to v1.82.0 (#1512) Still more updates to support Volume layer activities. (#1508) Remove remaining IOPS/bandwidth limiting code (#1525) Add unit test for VersionMismatch (#1524) Removing panic paths by only destructuring once (#1523) Update actions/checkout digest to 11bd719 (#1518) Switch to using `Duration` for times (#1520) Co-authored-by: Alan Hanson --- Cargo.lock | 20 ++++++++++---------- Cargo.toml | 8 ++++---- package-manifest.toml | 16 ++++++++-------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 727ec68f27..210170537b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -693,7 +693,7 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f#aadc0998c0f07f08ab15a95c006074291734800f" +source = "git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e#8f8fbb74662b4e19b643c500d55d2d384a6cee5e" dependencies = [ "bhyve_api_sys", "libc", @@ -703,7 +703,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f#aadc0998c0f07f08ab15a95c006074291734800f" +source = "git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e#8f8fbb74662b4e19b643c500d55d2d384a6cee5e" dependencies = [ "libc", "strum", @@ -1838,7 +1838,7 @@ dependencies = [ [[package]] name = "crucible-client-types" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=b7b9d5660b28ca5e865242b2bdecd032c0852d40#b7b9d5660b28ca5e865242b2bdecd032c0852d40" +source = "git+https://github.com/oxidecomputer/crucible?rev=1d82cc9fd9925814d097d39f7cbafd62fb8cfb7e#1d82cc9fd9925814d097d39f7cbafd62fb8cfb7e" dependencies = [ "base64 0.22.1", "crucible-workspace-hack", @@ -6944,7 +6944,7 @@ dependencies = [ "pq-sys", "pretty_assertions", "progenitor-client", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e)", "qorb", "rand", "rcgen", @@ -7207,7 +7207,7 @@ dependencies = [ "oximeter-producer", "oxnet", "pretty_assertions", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e)", "propolis-mock-server", "propolis_api_types", "rand", @@ -8916,7 +8916,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f#aadc0998c0f07f08ab15a95c006074291734800f" +source = "git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e#8f8fbb74662b4e19b643c500d55d2d384a6cee5e" dependencies = [ "async-trait", "base64 0.21.7", @@ -8958,7 +8958,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f#aadc0998c0f07f08ab15a95c006074291734800f" +source = "git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e#8f8fbb74662b4e19b643c500d55d2d384a6cee5e" dependencies = [ "anyhow", "atty", @@ -9000,7 +9000,7 @@ dependencies = [ [[package]] name = "propolis_api_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f#aadc0998c0f07f08ab15a95c006074291734800f" +source = "git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e#8f8fbb74662b4e19b643c500d55d2d384a6cee5e" dependencies = [ "crucible-client-types", "propolis_types", @@ -9013,7 +9013,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f#aadc0998c0f07f08ab15a95c006074291734800f" +source = "git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e#8f8fbb74662b4e19b643c500d55d2d384a6cee5e" dependencies = [ "schemars", "serde", @@ -10656,7 +10656,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oxnet", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=aadc0998c0f07f08ab15a95c006074291734800f)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=8f8fbb74662b4e19b643c500d55d2d384a6cee5e)", "rcgen", "schemars", "serde", diff --git a/Cargo.toml b/Cargo.toml index c5b7a2a4a7..7485803312 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -535,10 +535,10 @@ prettyplease = { version = "0.2.25", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = "0.8.0" progenitor-client = "0.8.0" -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "aadc0998c0f07f08ab15a95c006074291734800f" } -propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = "aadc0998c0f07f08ab15a95c006074291734800f" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "aadc0998c0f07f08ab15a95c006074291734800f" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "aadc0998c0f07f08ab15a95c006074291734800f" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "8f8fbb74662b4e19b643c500d55d2d384a6cee5e" } +propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = "8f8fbb74662b4e19b643c500d55d2d384a6cee5e" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "8f8fbb74662b4e19b643c500d55d2d384a6cee5e" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "8f8fbb74662b4e19b643c500d55d2d384a6cee5e" } proptest = "1.5.0" qorb = "0.2.0" quote = "1.0" diff --git a/package-manifest.toml b/package-manifest.toml index 54e1236fe5..a220818b7e 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -578,10 +578,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "b7b9d5660b28ca5e865242b2bdecd032c0852d40" +source.commit = "1d82cc9fd9925814d097d39f7cbafd62fb8cfb7e" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "44e623730765f8fc0b702d107939552514530a33b306ca5e8bc8276ff0aaf79a" +source.sha256 = "c66b3f7ef87e17533a3bbf7d0c0c6f01adab031f9acf173399b4a3dda32d097b" output.type = "zone" output.intermediate_only = true @@ -590,10 +590,10 @@ service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "b7b9d5660b28ca5e865242b2bdecd032c0852d40" +source.commit = "1d82cc9fd9925814d097d39f7cbafd62fb8cfb7e" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "bc0a41d349646ec2111bff346db2c300001d646a99f33b05b39b78188e34ae41" +source.sha256 = "144d9a5846b1fddffbe1fc8c30db44b06934feb6bc726f26ceade43bfc38c2a0" output.type = "zone" output.intermediate_only = true @@ -607,10 +607,10 @@ service_name = "crucible_dtrace" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "b7b9d5660b28ca5e865242b2bdecd032c0852d40" +source.commit = "1d82cc9fd9925814d097d39f7cbafd62fb8cfb7e" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-dtrace.sha256.txt -source.sha256 = "64e37f7a062f7c8941fac3b95a81d98475e5c02ff01111554b0ddb7fc232f40f" +source.sha256 = "f6e304172b4a7af1dbd70d2506889e9364f61c488ff346f525256b3ce1e80eff" output.type = "tarball" # Refer to @@ -621,10 +621,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "aadc0998c0f07f08ab15a95c006074291734800f" +source.commit = "8f8fbb74662b4e19b643c500d55d2d384a6cee5e" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "3cd889201aaa8cc5b916fc8f8176ab5529e2fc1d5d57165ad9a660eb48affef9" +source.sha256 = "5dc0c116d2463d17a64a91941bc3e664746ce0c4b7cf54c73ae72e410fba0757" output.type = "zone" [package.mg-ddm-gz] From ce69e142a9816977ddbd546892f6aff75f09cbe0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 20 Nov 2024 09:51:52 -0800 Subject: [PATCH 2/4] [sled-storage] Automatically clean up test zpools, storage (#7111) `StorageManagerTestHarness` is a utility which allows sled agent tests to use real zpools and datasets atop temporary file-backed storage. This provides a decent test fidelity for storage-based parts of the sled agent. This harness previously took a liberal attitude towards zpool creation, but a conservative perspective on their deletion: rather than automatically destroying zpools on test failure, it would print commands for "how to delete these zpools yourself". This tendency was mostly borne out of fear: deleting zpools is a dangerous operation which should be done with caution. When working with the `StorageManagerTestHarness`, especially while iterating on tests with many expected failures, I've decided this is an Enormous Pain In The Butt. This PR changes the aforementioned tendency, and just attempts to delete all provisioned zpools we created in our tests. If we fail to do so, we'll *then* log an error message and let the user take responsibility for manual cleanup. --- sled-storage/src/manager_test_harness.rs | 51 +++++++++++++++++------- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/sled-storage/src/manager_test_harness.rs b/sled-storage/src/manager_test_harness.rs index 40c7c5fbed..068816ffa7 100644 --- a/sled-storage/src/manager_test_harness.rs +++ b/sled-storage/src/manager_test_harness.rs @@ -83,10 +83,9 @@ pub struct StorageManagerTestHarness { impl Drop for StorageManagerTestHarness { fn drop(&mut self) { if let Some(vdev_dir) = self.vdev_dir.take() { - eprintln!( + eprint!( "WARNING: StorageManagerTestHarness called without 'cleanup()'.\n\ - We may have leaked zpools, and not correctly deleted {}", - vdev_dir.path() + Attempting automated cleanup ... ", ); let pools = [ @@ -100,25 +99,49 @@ impl Drop for StorageManagerTestHarness { ), ]; - eprintln!( - "The following commands may need to be run to clean up state:" - ); - eprintln!("---"); + let mut failed_commands = vec![]; + for (prefix, pool) in pools { let Ok(entries) = pool.read_dir_utf8() else { continue; }; for entry in entries.flatten() { - eprintln!( - " pfexec zpool destroy {prefix}{} ", - entry.file_name() - ); + let pool_name = format!("{prefix}{}", entry.file_name()); + if let Err(_) = + std::process::Command::new(illumos_utils::PFEXEC) + .args(["zpool", "destroy", &pool_name]) + .status() + { + failed_commands + .push(format!("pfexec zpool destroy {pool_name}")); + } } } - eprintln!(" pfexec rm -rf {}", vdev_dir.path()); - eprintln!("---"); - panic!("Dropped without cleanup. See stderr for cleanup advice"); + let vdev_path = vdev_dir.path(); + if let Err(_) = std::process::Command::new(illumos_utils::PFEXEC) + .args(["rm", "-rf", vdev_path.as_str()]) + .status() + { + failed_commands.push(format!("pfexec rm -rf {vdev_path}")); + } + + if !failed_commands.is_empty() { + eprintln!("FAILED"); + eprintln!( + "The following commands may need to be run to clean up state:" + ); + eprintln!("---"); + for cmd in failed_commands { + eprintln!("{cmd}"); + } + eprintln!("---"); + panic!( + "Dropped without cleanup. See stderr for cleanup advice" + ); + } else { + eprintln!("OK"); + } } } } From ee22c0e86b8b9fee17b608a52f257685859350d0 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Wed, 20 Nov 2024 09:52:12 -0800 Subject: [PATCH 3/4] [sled-storage] Use test-configured mountpoint root for datasets (#7110) This change should only impact tests. The "mountpoint" configuration, by default, uses the `ZPOOL_MOUNTPOINT_ROOT` as a root path where datasets may be located: https://github.com/oxidecomputer/omicron/blob/48790e52a9b76ec91c315ca44f5c462d639e62f7/sled-storage/src/config.rs#L33-L35 In prod, this value is "/". Under tests, however, we may want more isolation between datasets, hence this configuration option. This PR respects that configuration option when configuring datasets. This will be relevant for Sled Agent tests, and otherwise effectively acts as a no-op. --- sled-storage/src/manager.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 69e0824356..62f5834209 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -955,8 +955,8 @@ impl StorageManager { err: None, }; - let mountpoint_path = - config.name.mountpoint(ZPOOL_MOUNTPOINT_ROOT.into()); + let mountpoint_root = &self.resources.disks().mount_config().root; + let mountpoint_path = config.name.mountpoint(mountpoint_root); let details = DatasetCreationDetails { zoned: config.name.dataset().zoned(), mountpoint: Mountpoint::Path(mountpoint_path), From a73bc0da5eeebe2f98d6a5633c09170d8e613843 Mon Sep 17 00:00:00 2001 From: John Gallagher Date: Wed, 20 Nov 2024 13:52:12 -0500 Subject: [PATCH 4/4] [reconfigurator] `BlueprintBuilder` cleanup 1/5 - move `BlueprintDisksEditor` to its own module (#7103) This is _mostly_ moving code around; specifically, `BlueprintDisksEditor` is now in its own module. The non-"moving code around" changes are: * The API stile is more imperative. * Generation 1 for `OmicronPhysicalDisksConfig` is now only the empty set of disks; if there are disks present, that will be at least generation 2. This caused some expectorate churn but seems probably fine? It matches how `OmicronZonesConfig` generations are numbered. --- .../tests/output/cmd-example-stdout | 20 +- .../planning/src/blueprint_builder/builder.rs | 249 +++++------------- .../blueprint_builder/builder/disks_editor.rs | 187 +++++++++++++ .../output/blueprint_builder_initial_diff.txt | 6 +- .../example_builder_zone_counts_blueprint.txt | 10 +- .../output/planner_basic_add_sled_2_3.txt | 8 +- .../output/planner_basic_add_sled_3_5.txt | 8 +- ...dataset_settings_modified_in_place_1_2.txt | 2 +- .../planner_decommissions_sleds_1_2.txt | 6 +- .../planner_decommissions_sleds_bp2.txt | 4 +- .../planner_deploy_all_keeper_nodes_1_2.txt | 6 +- .../planner_deploy_all_keeper_nodes_3_4.txt | 6 +- .../planner_deploy_all_keeper_nodes_4_5.txt | 6 +- ...r_deploy_all_keeper_nodes_4_collection.txt | 6 +- .../planner_deploy_all_keeper_nodes_5_6.txt | 6 +- ...lanner_expunge_clickhouse_clusters_3_4.txt | 6 +- ...lanner_expunge_clickhouse_clusters_5_6.txt | 4 +- ...ouse_zones_after_policy_is_changed_3_4.txt | 6 +- .../output/planner_nonprovisionable_1_2.txt | 10 +- .../output/planner_nonprovisionable_2_2a.txt | 6 +- .../output/planner_nonprovisionable_bp2.txt | 6 +- 21 files changed, 323 insertions(+), 245 deletions(-) create mode 100644 nexus/reconfigurator/planning/src/blueprint_builder/builder/disks_editor.rs diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout index fe07482aac..2bd20b40c7 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-example-stdout @@ -68,7 +68,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 2eb69596-f081-4e2d-9425-9994926e0832 (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -108,7 +108,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 32d8d836-4d8a-4e54-8fa9-f31d79c42646 (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -147,7 +147,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 89d02b1b-478c-401a-8e28-7a26f74fa41b (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -205,7 +205,7 @@ to: blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a sled 2eb69596-f081-4e2d-9425-9994926e0832 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -297,7 +297,7 @@ to: blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a sled 32d8d836-4d8a-4e54-8fa9-f31d79c42646 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -386,7 +386,7 @@ to: blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a sled 89d02b1b-478c-401a-8e28-7a26f74fa41b (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -495,7 +495,7 @@ to: blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a sled 2eb69596-f081-4e2d-9425-9994926e0832 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -587,7 +587,7 @@ to: blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a sled 32d8d836-4d8a-4e54-8fa9-f31d79c42646 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -676,7 +676,7 @@ to: blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a sled 89d02b1b-478c-401a-8e28-7a26f74fa41b (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -820,7 +820,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 sled: 89d02b1b-478c-401a-8e28-7a26f74fa41b (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index c9b058f017..7d79888649 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -4,12 +4,15 @@ //! Low-level facility for generating Blueprints +mod disks_editor; + use crate::ip_allocator::IpAllocator; use crate::planner::rng::PlannerRng; use crate::planner::zone_needs_expungement; use crate::planner::ZoneExpungeReason; use anyhow::anyhow; use clickhouse_admin_types::OXIMETER_CLUSTER; +use disks_editor::BlueprintDisksEditor; use ipnet::IpAdd; use nexus_inventory::now_db_precision; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; @@ -20,7 +23,6 @@ use nexus_types::deployment::BlueprintDatasetConfig; use nexus_types::deployment::BlueprintDatasetDisposition; use nexus_types::deployment::BlueprintDatasetsConfig; use nexus_types::deployment::BlueprintPhysicalDiskConfig; -use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; @@ -63,7 +65,6 @@ use omicron_common::policy::INTERNAL_DNS_REDUNDANCY; use omicron_uuid_kinds::DatasetUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; -use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use once_cell::unsync::OnceCell; @@ -281,7 +282,7 @@ pub struct BlueprintBuilder<'a> { // These fields will become part of the final blueprint. See the // corresponding fields in `Blueprint`. pub(super) zones: BlueprintZonesBuilder<'a>, - disks: BlueprintDisksBuilder<'a>, + disks: BlueprintDisksEditor, datasets: BlueprintDatasetsBuilder<'a>, sled_state: BTreeMap, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade, @@ -406,7 +407,9 @@ impl<'a> BlueprintBuilder<'a> { external_networking: OnceCell::new(), internal_dns_subnets: OnceCell::new(), zones: BlueprintZonesBuilder::new(parent_blueprint), - disks: BlueprintDisksBuilder::new(parent_blueprint), + disks: BlueprintDisksEditor::new( + parent_blueprint.blueprint_disks.clone(), + ), datasets: BlueprintDatasetsBuilder::new(parent_blueprint), sled_state, cockroachdb_setting_preserve_downgrade: parent_blueprint @@ -495,9 +498,8 @@ impl<'a> BlueprintBuilder<'a> { let blueprint_zones = self .zones .into_zones_map(self.input.all_sled_ids(SledFilter::Commissioned)); - let blueprint_disks = self - .disks - .into_disks_map(self.input.all_sled_ids(SledFilter::InService)); + let blueprint_disks = + self.disks.build(self.input.all_sled_ids(SledFilter::InService)); let blueprint_datasets = self .datasets .into_datasets_map(self.input.all_sled_ids(SledFilter::InService)); @@ -782,66 +784,45 @@ impl<'a> BlueprintBuilder<'a> { sled_id: SledUuid, resources: &SledResources, ) -> Result { - let (mut additions, removals) = { - // These are the disks known to our (last?) blueprint - let blueprint_disks: BTreeMap<_, _> = self - .disks - .current_sled_disks(sled_id) - .map(|disk| { - (PhysicalDiskUuid::from_untyped_uuid(disk.id), disk) - }) - .collect(); - - // These are the in-service disks as we observed them in the database, - // during the planning phase - let database_disks: BTreeMap<_, _> = resources - .all_disks(DiskFilter::InService) - .map(|(zpool, disk)| (disk.disk_id, (zpool, disk))) - .collect(); - - // Add any disks that appear in the database, but not the blueprint - let additions = database_disks - .iter() - .filter_map(|(disk_id, (zpool, disk))| { - if !blueprint_disks.contains_key(disk_id) { - Some(BlueprintPhysicalDiskConfig { - identity: disk.disk_identity.clone(), - id: disk_id.into_untyped_uuid(), - pool_id: **zpool, - }) - } else { - None - } - }) - .collect::>(); - - // Remove any disks that appear in the blueprint, but not the database - let removals: HashSet = blueprint_disks - .keys() - .filter_map(|disk_id| { - if !database_disks.contains_key(disk_id) { - Some(*disk_id) - } else { - None - } - }) - .collect(); + let mut added = 0; + let mut removed = 0; + + // These are the disks known to our (last?) blueprint + let mut blueprint_disks = self.disks.sled_disks_editor(sled_id); + let blueprint_disk_ids = + blueprint_disks.disks_ids().collect::>(); + + // These are the in-service disks as we observed them in the database, + // during the planning phase + let database_disks = resources + .all_disks(DiskFilter::InService) + .map(|(zpool, disk)| (disk.disk_id, (zpool, disk))); + let mut database_disk_ids = BTreeSet::new(); + + // Add any disks that appear in the database, but not the blueprint + for (disk_id, (zpool, disk)) in database_disks { + database_disk_ids.insert(disk_id); + if !blueprint_disks.contains_disk(&disk_id) { + blueprint_disks.add_disk(BlueprintPhysicalDiskConfig { + identity: disk.disk_identity.clone(), + id: disk_id.into_untyped_uuid(), + pool_id: *zpool, + }); + added += 1; + } + } - (additions, removals) - }; + // Remove any disks that appear in the blueprint, but not the database + for disk_id in blueprint_disk_ids { + if !database_disk_ids.contains(&disk_id) { + blueprint_disks.remove_disk(&disk_id); + removed += 1; + } + } - if additions.is_empty() && removals.is_empty() { + if added == 0 && removed == 0 { return Ok(EnsureMultiple::NotNeeded); } - let added = additions.len(); - let removed = removals.len(); - - let disks = &mut self.disks.change_sled_disks(sled_id).disks; - - disks.append(&mut additions); - disks.retain(|config| { - !removals.contains(&PhysicalDiskUuid::from_untyped_uuid(config.id)) - }); Ok(EnsureMultiple::Changed { added, updated: 0, expunged: 0, removed }) } @@ -890,7 +871,8 @@ impl<'a> BlueprintBuilder<'a> { // Ensure each zpool has a "Debug" and "Zone Root" dataset. let mut bp_zpools = self .disks - .current_sled_disks(sled_id) + .sled_disks_editor(sled_id) + .disks() .map(|disk_config| disk_config.pool_id) .collect::>(); // We iterate over the zpools in a deterministic order to ensure @@ -2196,102 +2178,6 @@ impl<'a> BlueprintZonesBuilder<'a> { } } -/// Helper for working with sets of disks on each sled -/// -/// Tracking the set of disks is slightly non-trivial because we need to -/// bump the per-sled generation number iff the disks are changed. So -/// we need to keep track of whether we've changed the disks relative -/// to the parent blueprint. We do this by keeping a copy of any -/// [`BlueprintPhysicalDisksConfig`] that we've changed and a _reference_ to -/// the parent blueprint's disks. This struct makes it easy for callers iterate -/// over the right set of disks. -struct BlueprintDisksBuilder<'a> { - changed_disks: BTreeMap, - parent_disks: &'a BTreeMap, -} - -impl<'a> BlueprintDisksBuilder<'a> { - pub fn new(parent_blueprint: &'a Blueprint) -> BlueprintDisksBuilder { - BlueprintDisksBuilder { - changed_disks: BTreeMap::new(), - parent_disks: &parent_blueprint.blueprint_disks, - } - } - - /// Returns a mutable reference to a sled's Omicron disks *because* we're - /// going to change them. - /// - /// Unlike [`BlueprintZonesBuilder::change_sled_zones`], it is essential - /// that the caller _does_ change them, because constructing this bumps the - /// generation number unconditionally. - pub fn change_sled_disks( - &mut self, - sled_id: SledUuid, - ) -> &mut BlueprintPhysicalDisksConfig { - self.changed_disks.entry(sled_id).or_insert_with(|| { - if let Some(old_sled_disks) = self.parent_disks.get(&sled_id) { - BlueprintPhysicalDisksConfig { - generation: old_sled_disks.generation.next(), - disks: old_sled_disks.disks.clone(), - } - } else { - // No requests have been sent to the disk previously, - // we should be able to use the first generation. - BlueprintPhysicalDisksConfig { - generation: Generation::new(), - disks: vec![], - } - } - }) - } - - /// Iterates over the list of Omicron disks currently configured for this - /// sled in the blueprint that's being built - pub fn current_sled_disks( - &self, - sled_id: SledUuid, - ) -> Box + '_> { - if let Some(sled_disks) = self - .changed_disks - .get(&sled_id) - .or_else(|| self.parent_disks.get(&sled_id)) - { - Box::new(sled_disks.disks.iter()) - } else { - Box::new(std::iter::empty()) - } - } - - /// Produces an owned map of disks for the requested sleds - pub fn into_disks_map( - mut self, - sled_ids: impl Iterator, - ) -> BTreeMap { - sled_ids - .map(|sled_id| { - // Start with self.changed_disks, which contains entries for any - // sled whose disks config is changing in this blueprint. - let mut disks = self - .changed_disks - .remove(&sled_id) - // If it's not there, use the config from the parent - // blueprint. - .or_else(|| self.parent_disks.get(&sled_id).cloned()) - // If it's not there either, then this must be a new sled - // and we haven't added any disks to it yet. Use the - // standard initial config. - .unwrap_or_else(|| BlueprintPhysicalDisksConfig { - generation: Generation::new(), - disks: vec![], - }); - disks.disks.sort_unstable_by_key(|d| d.id); - - (sled_id, disks) - }) - .collect() - } -} - /// Helper for working with sets of datasets on each sled struct BlueprintDatasetsBuilder<'a> { changed_datasets: BTreeMap, @@ -3146,16 +3032,16 @@ pub mod test { ) .expect("failed to create builder"); - assert!(builder.disks.changed_disks.is_empty()); - // In the parent_disks map, we expect entries to be present for - // each sled, but not have any disks in them. - for (sled_id, disks) in builder.disks.parent_disks { - assert_eq!( - disks.disks, - Vec::new(), - "for sled {}, expected no disks present in parent, \ - but found some", - sled_id + // In the map, we expect entries to be present for each sled, but + // not have any disks in them. + for sled_id in input.all_sled_ids(SledFilter::InService) { + let disks = builder + .disks + .current_sled_disks(&sled_id) + .expect("found disks config for sled"); + assert!( + disks.is_empty(), + "expected empty disks for sled {sled_id}, got {disks:?}" ); } @@ -3175,16 +3061,21 @@ pub mod test { ); } - assert!(!builder.disks.changed_disks.is_empty()); - // In the parent_disks map, we expect entries to be present for - // each sled, but not have any disks in them. - for (sled_id, disks) in builder.disks.parent_disks { + let new_disks = + builder.disks.build(input.all_sled_ids(SledFilter::InService)); + // We should have disks and a generation bump for every sled. + let parent_disk_gens = parent + .blueprint_disks + .iter() + .map(|(&sled_id, config)| (sled_id, config.generation)); + for (sled_id, parent_gen) in parent_disk_gens { + let new_sled_disks = new_disks + .get(&sled_id) + .expect("found child entry for sled present in parent"); + assert_eq!(new_sled_disks.generation, parent_gen.next()); assert_eq!( - disks.disks, - Vec::new(), - "for sled {}, expected no disks present in parent, \ - but found some", - sled_id + new_sled_disks.disks.len(), + usize::from(SledBuilder::DEFAULT_NPOOLS), ); } } diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder/disks_editor.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder/disks_editor.rs new file mode 100644 index 0000000000..feea6a050e --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder/disks_editor.rs @@ -0,0 +1,187 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Helper for editing the disks of a Blueprint + +use nexus_types::deployment::BlueprintPhysicalDiskConfig; +use nexus_types::deployment::BlueprintPhysicalDisksConfig; +use omicron_common::api::external::Generation; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; +use omicron_uuid_kinds::SledUuid; +use std::collections::BTreeMap; +use std::collections::BTreeSet; + +/// Helper for working with sets of disks on each sled +/// +/// Tracking the set of disks is slightly non-trivial because we need to +/// bump the per-sled generation number iff the disks are changed. So +/// we need to keep track of whether we've changed the disks relative +/// to the parent blueprint. +#[derive(Debug)] +pub(super) struct BlueprintDisksEditor { + current: BTreeMap, + changed: BTreeSet, +} + +impl BlueprintDisksEditor { + pub fn new( + current: BTreeMap, + ) -> Self { + let current = current + .into_iter() + .map(|(sled_id, config)| (sled_id, config.into())) + .collect(); + Self { current, changed: BTreeSet::new() } + } + + /// Get a helper to edit the disks of a specific sled. + /// + /// If any changes are made via the returned editor, the sled will be + /// recorded as needing a generation bump in its disk config when the editor + /// is dropped. + pub fn sled_disks_editor( + &mut self, + sled_id: SledUuid, + ) -> SledDisksEditor<'_> { + let config = + self.current.entry(sled_id).or_insert_with(DisksConfig::empty); + SledDisksEditor::new(sled_id, config, &mut self.changed) + } + + #[cfg(test)] + pub fn current_sled_disks( + &self, + sled_id: &SledUuid, + ) -> Option<&BTreeMap> { + let config = self.current.get(sled_id)?; + Some(&config.disks) + } + + /// Compile all edits into a new map suitable for a blueprint's + /// `blueprint_disks`, bumping the generation number for any sleds whose + /// disk config changed. + /// + /// Only sleds listed in `sled_ids` will be present in the returned map. + /// This primarily allows the caller to drop sleds that are no longer in + /// service. (Any new sleds will be given an empty set of disks, but + /// presumably any new sleds will have _some_ disks that will have already + /// been populated via a relevant `sled_disks_editor()` call.) + pub fn build( + mut self, + sled_ids: impl Iterator, + ) -> BTreeMap { + sled_ids + .map(|sled_id| { + let config = match self.current.remove(&sled_id) { + Some(mut config) => { + // Bump generation number for any sled whose DisksConfig + // changed + if self.changed.contains(&sled_id) { + config.generation = config.generation.next() + } + config.into() + } + None => DisksConfig::empty().into(), + }; + (sled_id, config) + }) + .collect() + } +} + +#[derive(Debug)] +pub(super) struct SledDisksEditor<'a> { + config: &'a mut DisksConfig, + changed: bool, + sled_id: SledUuid, + parent_changed_set: &'a mut BTreeSet, +} + +impl Drop for SledDisksEditor<'_> { + fn drop(&mut self) { + if self.changed { + self.parent_changed_set.insert(self.sled_id); + } + } +} + +impl<'a> SledDisksEditor<'a> { + fn new( + sled_id: SledUuid, + config: &'a mut DisksConfig, + parent_changed_set: &'a mut BTreeSet, + ) -> Self { + Self { config, changed: false, sled_id, parent_changed_set } + } + + pub fn disks(&self) -> impl Iterator { + self.config.disks.values() + } + + pub fn disks_ids(&self) -> impl Iterator + '_ { + self.config.disks.keys().copied() + } + + pub fn contains_disk(&self, disk_id: &PhysicalDiskUuid) -> bool { + self.config.disks.contains_key(disk_id) + } + + pub fn add_disk( + &mut self, + disk: BlueprintPhysicalDiskConfig, + ) -> Option { + self.changed = true; + self.config + .disks + .insert(PhysicalDiskUuid::from_untyped_uuid(disk.id), disk) + } + + pub fn remove_disk( + &mut self, + disk_id: &PhysicalDiskUuid, + ) -> Option { + self.changed = true; + self.config.disks.remove(disk_id) + } +} + +// We want add and remove to be cheap and easy to check whether they performed +// the requested operation, so we'll internally convert from the vec of disks to +// a map of disks keyed by disk ID. +#[derive(Debug)] +struct DisksConfig { + generation: Generation, + disks: BTreeMap, +} + +impl DisksConfig { + fn empty() -> Self { + Self { generation: Generation::new(), disks: BTreeMap::new() } + } +} + +impl From for BlueprintPhysicalDisksConfig { + fn from(config: DisksConfig) -> Self { + BlueprintPhysicalDisksConfig { + generation: config.generation, + disks: config.disks.into_values().collect(), + } + } +} + +impl From for DisksConfig { + fn from(config: BlueprintPhysicalDisksConfig) -> Self { + Self { + generation: config.generation, + disks: config + .disks + .into_iter() + .map(|disk| { + (PhysicalDiskUuid::from_untyped_uuid(disk.id), disk) + }) + .collect(), + } + } +} diff --git a/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt index 521476a7fe..3c09cfd731 100644 --- a/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt +++ b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt @@ -4,7 +4,7 @@ to: blueprint e4aeb3b3-272f-4967-be34-2d34daa46aa1 sled 08c7046b-c9c4-4368-881f-19a72df22143 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -96,7 +96,7 @@ to: blueprint e4aeb3b3-272f-4967-be34-2d34daa46aa1 sled 84ac367e-9b03-4e9d-a846-df1a08deee6c (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -185,7 +185,7 @@ to: blueprint e4aeb3b3-272f-4967-be34-2d34daa46aa1 sled be7f4375-2a6b-457f-b1a4-3074a715e5fe (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt index 92327f0adf..dfa6359642 100644 --- a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt +++ b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt @@ -3,7 +3,7 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 0dbf1e39-e265-4071-a8df-6d1225b46694 (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -46,7 +46,7 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 15cf73a6-445b-4d36-9232-5ed364019bc6 (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -87,7 +87,7 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 50e6c1c0-43b2-4abc-9041-41165597f639 (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -127,7 +127,7 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: 969ff976-df34-402c-a362-53db03a6b97f (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -167,7 +167,7 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 sled: ec5c0b37-b651-4c45-ac1c-24541ef9c44b (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt index cb282de4dc..e4ef5b6cc9 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt @@ -5,7 +5,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled 41f45d9f-766e-4ca6-a881-61ee45c80f57 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -97,7 +97,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled 43677374-8d2f-4deb-8a41-eeea506db8e0 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -186,7 +186,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled 590e3034-d946-4166-b0e5-2d0034197a07 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -277,7 +277,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 sled ec61eded-c34f-443d-a580-dadf757529c4 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt index 3588358602..7b4fba86bf 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt @@ -5,7 +5,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled 41f45d9f-766e-4ca6-a881-61ee45c80f57 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -97,7 +97,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled 43677374-8d2f-4deb-8a41-eeea506db8e0 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -186,7 +186,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled 590e3034-d946-4166-b0e5-2d0034197a07 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -277,7 +277,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 sled ec61eded-c34f-443d-a580-dadf757529c4 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt index ba30fda02e..8eca44a82e 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt @@ -5,7 +5,7 @@ to: blueprint fe13be30-94c2-4fa6-aad5-ae3c5028f6bb sled c52410de-5fea-4e77-b162-756d103523b3 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt index 436929bf73..206ddd71e7 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt @@ -5,7 +5,7 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e sled a1b477db-b629-48eb-911d-1ccdafca75b9 (active -> decommissioned): - physical disks from generation 1: + physical disks from generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -112,7 +112,7 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e sled d67ce8f0-a691-4010-b414-420d82e80527 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -205,7 +205,7 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e sled fefcf4cf-f7e7-46b3-b629-058526ce440e (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index 072e26df49..9418ead797 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -3,7 +3,7 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd sled: d67ce8f0-a691-4010-b414-420d82e80527 (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -44,7 +44,7 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd sled: fefcf4cf-f7e7-46b3-b629-058526ce440e (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt index 6039cd0ab9..7fbe5e3cea 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt @@ -5,7 +5,7 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -100,7 +100,7 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -195,7 +195,7 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d sled be531a62-9897-430d-acd2-ce14b4632627 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt index d27c85bd66..33646bb3d4 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt @@ -5,7 +5,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -100,7 +100,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -195,7 +195,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled be531a62-9897-430d-acd2-ce14b4632627 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt index 43bef3da22..98516d01d3 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt @@ -5,7 +5,7 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -102,7 +102,7 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -200,7 +200,7 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb sled be531a62-9897-430d-acd2-ce14b4632627 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_collection.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_collection.txt index dfd2a1cd27..7ebfabb743 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_collection.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_collection.txt @@ -4,7 +4,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -99,7 +99,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -194,7 +194,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 sled be531a62-9897-430d-acd2-ce14b4632627 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt index 6c84177b48..00d99b5aac 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt @@ -5,7 +5,7 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef sled 164d275d-a936-4f06-ad53-a32cb3c8d3c8 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -100,7 +100,7 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef sled 6a4c45f6-e02f-490c-bbfa-b32fb89e8e86 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -198,7 +198,7 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef sled be531a62-9897-430d-acd2-ce14b4632627 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt index 7c5275d284..10bc10cbef 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt @@ -5,7 +5,7 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c sled b340c044-bd87-4a3b-aee3-e6ccd9d3ff02 (active -> decommissioned): - physical disks from generation 1: + physical disks from generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -116,7 +116,7 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c sled cdba3bea-3407-4b6e-a029-19bf4a02fca7 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -217,7 +217,7 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c sled fe7b9b01-e803-41ea-9e39-db240dcd9029 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt index 19995f2fb3..661a3a386c 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt @@ -29,7 +29,7 @@ to: blueprint df68d4d4-5af4-4b56-95bb-1654a6957d4f sled cdba3bea-3407-4b6e-a029-19bf4a02fca7 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -130,7 +130,7 @@ to: blueprint df68d4d4-5af4-4b56-95bb-1654a6957d4f sled fe7b9b01-e803-41ea-9e39-db240dcd9029 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt index 37ce107263..f48c55cd22 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt @@ -5,7 +5,7 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 sled 883d9767-021c-4836-81cf-fa02d73fead0 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -101,7 +101,7 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 sled aae6114d-956b-4980-9759-b00b1ed893ee (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -198,7 +198,7 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 sled be4a3b25-dde1-40a4-b909-9fa4379a8510 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index a4b67c64ae..ff35a33f5d 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -5,7 +5,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -99,7 +99,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 48d95fef-bc9f-4f50-9a53-1e075836291d (active -> decommissioned): - physical disks from generation 1: + physical disks from generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -202,7 +202,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 68d24ac5-f341-49ea-a92a-0381b52ab387 (active): - physical disks from generation 1: + physical disks from generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -291,7 +291,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 75bc286f-2b4b-482c-9431-59272af529da (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -381,7 +381,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled affab35f-600a-4109-8ea0-34a067a4e0bc (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index aa6e431e98..2d12a455af 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -5,7 +5,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 75bc286f-2b4b-482c-9431-59272af529da (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -95,7 +95,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled affab35f-600a-4109-8ea0-34a067a4e0bc (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -211,7 +211,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9 (active): - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 5d777cd9d9..872a9d9d93 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -3,7 +3,7 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b sled: 2d1cb4f2-cf44-40fc-b118-85036eb732a9 (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -43,7 +43,7 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b sled: 75bc286f-2b4b-482c-9431-59272af529da (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ---------------------------------------------------------------------- @@ -83,7 +83,7 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b sled: affab35f-600a-4109-8ea0-34a067a4e0bc (active) - physical disks at generation 1: + physical disks at generation 2: ---------------------------------------------------------------------- vendor model serial ----------------------------------------------------------------------