diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index d7f8af04f8f..91782ee6758 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -70,6 +70,7 @@ use nexus_db_model::DnsName; use nexus_db_model::DnsVersion; use nexus_db_model::DnsZone; use nexus_db_model::ExternalIp; +use nexus_db_model::ExternalMulticastGroup; use nexus_db_model::HwBaseboardId; use nexus_db_model::Image; use nexus_db_model::Instance; @@ -79,8 +80,14 @@ use nexus_db_model::InvNvmeDiskFirmware; use nexus_db_model::InvPhysicalDisk; use nexus_db_model::IpAttachState; use nexus_db_model::IpKind; +use nexus_db_model::IpPool; +use nexus_db_model::IpPoolRange; +use nexus_db_model::IpPoolType; use nexus_db_model::Migration; use nexus_db_model::MigrationState; +use nexus_db_model::MulticastGroupMember; +use nexus_db_model::MulticastGroupMemberState; +use nexus_db_model::MulticastGroupState; use nexus_db_model::NetworkInterface; use nexus_db_model::NetworkInterfaceKind; use nexus_db_model::PhysicalDisk; @@ -101,6 +108,7 @@ use nexus_db_model::Snapshot; use nexus_db_model::SnapshotState; use nexus_db_model::SwCaboose; use nexus_db_model::SwRotPage; +use nexus_db_model::UnderlayMulticastGroup; use nexus_db_model::UpstairsRepairNotification; use nexus_db_model::UpstairsRepairProgress; use nexus_db_model::UserDataExportRecord; @@ -401,6 +409,8 @@ enum DbCommands { /// Print information about migrations #[clap(alias = "migration")] Migrations(MigrationsArgs), + /// Print information about multicast groups + Multicast(MulticastArgs), /// Print information about snapshots Snapshots(SnapshotArgs), /// Validate the contents of the database @@ -844,6 +854,85 @@ enum NetworkCommands { ListVnics, } +#[derive(Debug, Args, Clone)] +struct MulticastArgs { + #[command(subcommand)] + command: MulticastCommands, +} + +#[derive(Debug, Subcommand, Clone)] +enum MulticastCommands { + /// List all multicast groups + #[clap(alias = "ls")] + Groups(MulticastGroupsArgs), + /// List all multicast group members + Members(MulticastMembersArgs), + /// List multicast IP pools and their ranges + Pools, + /// Get info for a multicast group by IP address or name + #[clap(alias = "show")] + Info(MulticastInfoArgs), +} + +#[derive(Debug, Args, Clone)] +struct MulticastGroupsArgs { + /// Filter by state + #[arg( + long, + ignore_case = true, + value_parser = PossibleValuesParser::new( + MulticastGroupState::ALL_STATES + .iter() + .map(|v| PossibleValue::new(v.label())) + ).try_map(|s| s.parse::()), + )] + state: Option, + /// Filter by pool name + #[arg(long)] + pool: Option, +} + +#[derive(Debug, Args, Clone)] +struct MulticastMembersArgs { + /// Filter by group ID + #[arg(long)] + group_id: Option, + /// Filter by group IP address (e.g., 239.1.2.3) + #[arg(long)] + group_ip: Option, + /// Filter by group name + #[arg(long)] + group_name: Option, + /// Filter by state + #[arg( + long, + ignore_case = true, + value_parser = PossibleValuesParser::new( + MulticastGroupMemberState::ALL_STATES + .iter() + .map(|v| PossibleValue::new(v.label())) + ).try_map(|s| s.parse::()), + )] + state: Option, + /// Filter by sled ID + #[arg(long)] + sled_id: Option, +} + +#[derive(Debug, Args, Clone)] +#[group(required = true, multiple = false)] +struct MulticastInfoArgs { + /// Multicast group ID + #[arg(long)] + group_id: Option, + /// Multicast IP address (e.g., 239.1.2.3) + #[arg(long)] + ip: Option, + /// Multicast group name + #[arg(long)] + name: Option, +} + #[derive(Debug, Args, Clone)] struct MigrationsArgs { #[command(subcommand)] @@ -1367,6 +1456,29 @@ impl DbArgs { }) => { cmd_db_migrations_list(&datastore, &fetch_opts, args).await } + DbCommands::Multicast(MulticastArgs { + command: MulticastCommands::Groups(args), + }) => { + cmd_db_multicast_groups(&datastore, &fetch_opts, &args) + .await + } + DbCommands::Multicast(MulticastArgs { + command: MulticastCommands::Members(args), + }) => { + cmd_db_multicast_members(&datastore, &fetch_opts, &args) + .await + } + DbCommands::Multicast(MulticastArgs { + command: MulticastCommands::Pools, + }) => { + cmd_db_multicast_pools(&datastore, &fetch_opts).await + } + DbCommands::Multicast(MulticastArgs { + command: MulticastCommands::Info(args), + }) => { + cmd_db_multicast_info(&datastore, &fetch_opts, &args) + .await + } DbCommands::Snapshots(SnapshotArgs { command: SnapshotCommands::Info(uuid), }) => cmd_db_snapshot_info(&opctx, &datastore, uuid).await, @@ -7413,6 +7525,536 @@ impl From<&'_ Migration> for SingleInstanceMigrationRow { } } +// Multicast + +async fn cmd_db_multicast_groups( + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + args: &MulticastGroupsArgs, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::ip_pool::dsl as pool_dsl; + use nexus_db_schema::schema::multicast_group::dsl; + use nexus_db_schema::schema::underlay_multicast_group::dsl as underlay_dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + let mut query = dsl::multicast_group.into_boxed(); + if !fetch_opts.include_deleted { + query = query.filter(dsl::time_deleted.is_null()); + } + if let Some(state) = args.state { + query = query.filter(dsl::state.eq(state)); + } + if let Some(ref pool_name) = args.pool { + let pool_id: Uuid = pool_dsl::ip_pool + .filter(pool_dsl::name.eq(pool_name.clone())) + .filter(pool_dsl::time_deleted.is_null()) + .select(pool_dsl::id) + .first_async(&*conn) + .await + .with_context(|| { + format!("no pool found with name '{pool_name}'") + })?; + query = query.filter(dsl::ip_pool_id.eq(pool_id)); + } + + let groups: Vec = query + .order_by(dsl::time_created.desc()) + .limit(i64::from(u32::from(fetch_opts.fetch_limit))) + .select(ExternalMulticastGroup::as_select()) + .get_results_async(&*conn) + .await?; + + check_limit(&groups, fetch_opts.fetch_limit, || { + String::from("listing multicast groups") + }); + + // Batch lookup underlay IPs for groups that have underlay_group_id + let underlay_ids: Vec = + groups.iter().filter_map(|group| group.underlay_group_id).collect(); + let underlay_map: HashMap = + if underlay_ids.is_empty() { + HashMap::new() + } else { + underlay_dsl::underlay_multicast_group + .filter(underlay_dsl::id.eq_any(underlay_ids)) + .select((underlay_dsl::id, underlay_dsl::multicast_ip)) + .get_results_async::<(Uuid, ipnetwork::IpNetwork)>(&*conn) + .await + .unwrap_or_default() + .into_iter() + .collect() + }; + + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct MulticastGroupRow { + id: Uuid, + name: String, + state: MulticastGroupState, + multicast_ip: std::net::IpAddr, + #[tabled(display_with = "display_option_blank")] + underlay_ip: Option, + /// SSM source IPs or "ASM" for any-source multicast + sources: String, + vni: u32, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: DateTime, + } + + let rows: Vec = groups + .into_iter() + .map(|group| { + let sources = if group.source_ips.is_empty() { + "ASM".to_string() + } else { + group + .source_ips + .iter() + .map(|ip| ip.ip().to_string()) + .collect::>() + .join(",") + }; + let underlay_ip = group + .underlay_group_id + .and_then(|id| underlay_map.get(&id)) + .map(|ip| ip.ip()); + MulticastGroupRow { + id: group.identity.id, + name: group.identity.name.to_string(), + state: group.state, + multicast_ip: group.multicast_ip.ip(), + underlay_ip, + sources, + vni: u32::from(group.vni.0), + created: group.identity.time_created, + } + }) + .collect(); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + + Ok(()) +} + +async fn cmd_db_multicast_members( + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + args: &MulticastMembersArgs, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::multicast_group::dsl as group_dsl; + use nexus_db_schema::schema::multicast_group_member::dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + // Resolve group_ip or group_name to a group_id if specified + let resolved_group_id = match (&args.group_ip, &args.group_name) { + (Some(ip), _) => { + let group: ExternalMulticastGroup = group_dsl::multicast_group + .filter(group_dsl::time_deleted.is_null()) + .filter( + group_dsl::multicast_ip.eq(ipnetwork::IpNetwork::from(*ip)), + ) + .select(ExternalMulticastGroup::as_select()) + .first_async(&*conn) + .await + .with_context(|| format!("no multicast group with IP {ip}"))?; + Some(group.id()) + } + (None, Some(name)) => { + let group: ExternalMulticastGroup = group_dsl::multicast_group + .filter(group_dsl::time_deleted.is_null()) + .filter(group_dsl::name.eq(name.clone())) + .select(ExternalMulticastGroup::as_select()) + .first_async(&*conn) + .await + .with_context(|| { + format!("no multicast group with name '{name}'") + })?; + Some(group.id()) + } + (None, None) => args.group_id, + }; + + let mut query = dsl::multicast_group_member.into_boxed(); + if !fetch_opts.include_deleted { + query = query.filter(dsl::time_deleted.is_null()); + } + if let Some(group_id) = resolved_group_id { + query = query.filter(dsl::external_group_id.eq(group_id)); + } + if let Some(state) = args.state { + query = query.filter(dsl::state.eq(state)); + } + if let Some(sled_id) = args.sled_id { + query = query.filter(dsl::sled_id.eq(sled_id.into_untyped_uuid())); + } + + let members: Vec = query + .order_by(dsl::time_created.desc()) + .limit(i64::from(u32::from(fetch_opts.fetch_limit))) + .select(MulticastGroupMember::as_select()) + .get_results_async(&*conn) + .await?; + + check_limit(&members, fetch_opts.fetch_limit, || { + String::from("listing multicast group members") + }); + + // Batch lookup group names + let group_ids: Vec = + members.iter().map(|member| member.external_group_id).collect(); + let group_names: HashMap = if group_ids.is_empty() { + HashMap::new() + } else { + group_dsl::multicast_group + .filter(group_dsl::id.eq_any(group_ids)) + .select((group_dsl::id, group_dsl::name)) + .get_results_async::<(Uuid, String)>(&*conn) + .await + .unwrap_or_default() + .into_iter() + .collect() + }; + + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct MulticastMemberRow { + id: Uuid, + group_name: String, + parent_id: Uuid, + state: MulticastGroupMemberState, + multicast_ip: std::net::IpAddr, + #[tabled(display_with = "display_option_blank")] + sled_id: Option, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: DateTime, + } + + let rows: Vec = members + .into_iter() + .map(|member| { + let group_name = group_names + .get(&member.external_group_id) + .cloned() + .unwrap_or_else(|| member.external_group_id.to_string()); + MulticastMemberRow { + id: member.id, + group_name, + parent_id: member.parent_id, + state: member.state, + multicast_ip: member.multicast_ip.ip(), + sled_id: member.sled_id.map(SledUuid::from), + created: member.time_created, + } + }) + .collect(); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + + Ok(()) +} + +async fn cmd_db_multicast_pools( + datastore: &DataStore, + fetch_opts: &DbFetchOptions, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::ip_pool::dsl as pool_dsl; + use nexus_db_schema::schema::ip_pool_range::dsl as range_dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + // Get multicast pools + let mut query = pool_dsl::ip_pool.into_boxed(); + query = query.filter(pool_dsl::pool_type.eq(IpPoolType::Multicast)); + if !fetch_opts.include_deleted { + query = query.filter(pool_dsl::time_deleted.is_null()); + } + + let pools: Vec = query + .order_by(pool_dsl::time_created.desc()) + .limit(i64::from(u32::from(fetch_opts.fetch_limit))) + .select(IpPool::as_select()) + .get_results_async(&*conn) + .await?; + + check_limit(&pools, fetch_opts.fetch_limit, || { + String::from("listing multicast pools") + }); + + if pools.is_empty() { + println!("no multicast IP pools found"); + return Ok(()); + } + + // Get ranges for each pool + let pool_ids: Vec = pools.iter().map(|pool| pool.id()).collect(); + + let mut range_query = range_dsl::ip_pool_range.into_boxed(); + range_query = + range_query.filter(range_dsl::ip_pool_id.eq_any(pool_ids.clone())); + if !fetch_opts.include_deleted { + range_query = range_query.filter(range_dsl::time_deleted.is_null()); + } + + let ranges: Vec = range_query + .order_by(range_dsl::first_address) + .select(IpPoolRange::as_select()) + .get_results_async(&*conn) + .await?; + + // Build output combining pools and ranges + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct MulticastPoolRow { + pool_id: Uuid, + pool_name: String, + first_address: std::net::IpAddr, + last_address: std::net::IpAddr, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: chrono::DateTime, + } + + let pool_map: HashMap = + pools.iter().map(|pool| (pool.id(), pool)).collect(); + + let rows: Vec = ranges + .into_iter() + .filter_map(|range| { + pool_map.get(&range.ip_pool_id).map(|pool| MulticastPoolRow { + pool_id: pool.id(), + pool_name: pool.name().to_string(), + first_address: range.first_address.ip(), + last_address: range.last_address.ip(), + created: range.time_created, + }) + }) + .collect(); + + if rows.is_empty() { + println!("no multicast IP pool ranges found"); + return Ok(()); + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + + Ok(()) +} + +async fn cmd_db_multicast_info( + datastore: &DataStore, + fetch_opts: &DbFetchOptions, + args: &MulticastInfoArgs, +) -> Result<(), anyhow::Error> { + use nexus_db_schema::schema::instance::dsl as instance_dsl; + use nexus_db_schema::schema::ip_pool::dsl as pool_dsl; + use nexus_db_schema::schema::multicast_group::dsl as group_dsl; + use nexus_db_schema::schema::multicast_group_member::dsl as member_dsl; + use nexus_db_schema::schema::sled::dsl as sled_dsl; + use nexus_db_schema::schema::underlay_multicast_group::dsl as underlay_dsl; + + let conn = datastore.pool_connection_for_tests().await?; + + // Find the group by ID, IP, or name - pair filter with error message + let (mut query, not_found_msg) = + match (&args.group_id, &args.ip, &args.name) { + (Some(id), _, _) => ( + group_dsl::multicast_group + .filter(group_dsl::id.eq(*id)) + .into_boxed(), + format!("no multicast group found with ID {id}"), + ), + (None, Some(ip), _) => ( + group_dsl::multicast_group + .filter( + group_dsl::multicast_ip + .eq(ipnetwork::IpNetwork::from(*ip)), + ) + .into_boxed(), + format!("no multicast group found with IP {ip}"), + ), + (None, None, Some(name)) => ( + group_dsl::multicast_group + .filter(group_dsl::name.eq(name.clone())) + .into_boxed(), + format!("no multicast group found with name \"{name}\""), + ), + (None, None, None) => { + bail!("must specify --group-id, --ip, or --name") + } + }; + + if !fetch_opts.include_deleted { + query = query.filter(group_dsl::time_deleted.is_null()); + } + + // Fetch group with underlay in single query using LEFT JOIN + let result: Option<( + ExternalMulticastGroup, + Option, + )> = + query + .left_join(underlay_dsl::underlay_multicast_group.on( + underlay_dsl::id.nullable().eq(group_dsl::underlay_group_id), + )) + .select(( + ExternalMulticastGroup::as_select(), + Option::::as_select(), + )) + .first_async(&*conn) + .await + .optional()?; + + let (group, underlay) = match result { + Some((grp, ulay)) => (grp, ulay), + None => { + println!("{not_found_msg}"); + return Ok(()); + } + }; + + // Look up the pool name + let pool_name: String = pool_dsl::ip_pool + .filter(pool_dsl::id.eq(group.ip_pool_id)) + .select(pool_dsl::name) + .first_async(&*conn) + .await + .unwrap_or_else(|_| "".into()); + + // Print group details + println!("MULTICAST GROUP"); + println!(" id: {}", group.identity.id); + println!(" name: {}", group.identity.name); + println!(" state: {:?}", group.state); + println!(" multicast_ip: {}", group.multicast_ip); + println!(" vni: {}", u32::from(group.vni.0)); + println!(" source_ips: {:?}", group.source_ips); + println!(" ip_pool: {pool_name} ({})", group.ip_pool_id); + println!(" underlay_group: {:?}", group.underlay_group_id); + println!(" tag: {:?}", group.tag); + println!(" created: {}", group.identity.time_created); + if let Some(deleted) = group.identity.time_deleted { + println!(" deleted: {deleted}"); + } + + // Display underlay group if present + if let Some(underlay_group) = underlay { + println!("\nUNDERLAY GROUP"); + println!(" id: {}", underlay_group.id); + println!(" multicast_ip: {}", underlay_group.multicast_ip); + println!(" tag: {:?}", underlay_group.tag); + println!(" created: {}", underlay_group.time_created); + if let Some(deleted) = underlay_group.time_deleted { + println!(" deleted: {deleted}"); + } + } + + // Find members for this group + let mut member_query = member_dsl::multicast_group_member.into_boxed(); + member_query = member_query + .filter(member_dsl::external_group_id.eq(group.identity.id)); + if !fetch_opts.include_deleted { + member_query = member_query.filter(member_dsl::time_deleted.is_null()); + } + + let members: Vec = member_query + .order_by(member_dsl::time_created.desc()) + .select(MulticastGroupMember::as_select()) + .get_results_async(&*conn) + .await?; + + if members.is_empty() { + println!("\nMEMBERS: (none)"); + } else { + println!("\nMEMBERS ({}):", members.len()); + + // Batch lookup instance names (parent_id references instances) + let parent_ids: Vec = + members.iter().map(|member| member.parent_id).collect(); + let instances: Vec<(Uuid, String)> = instance_dsl::instance + .filter(instance_dsl::id.eq_any(parent_ids)) + .select((instance_dsl::id, instance_dsl::name)) + .get_results_async(&*conn) + .await + .unwrap_or_default(); + let instance_map: HashMap = + instances.into_iter().collect(); + + // Batch lookup sled serials + let sled_ids: Vec = members + .iter() + .filter_map(|member| member.sled_id.map(|s| s.into_untyped_uuid())) + .collect(); + let sleds: Vec<(Uuid, String)> = if sled_ids.is_empty() { + Vec::new() + } else { + sled_dsl::sled + .filter(sled_dsl::id.eq_any(sled_ids)) + .select((sled_dsl::id, sled_dsl::serial_number)) + .get_results_async(&*conn) + .await + .unwrap_or_default() + }; + let sled_map: HashMap = sleds.into_iter().collect(); + + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct MemberRow { + id: Uuid, + instance: String, + state: MulticastGroupMemberState, + sled: String, + #[tabled(display_with = "datetime_rfc3339_concise")] + created: DateTime, + } + + let rows: Vec = members + .into_iter() + .map(|member| { + let instance_name = instance_map + .get(&member.parent_id) + .cloned() + .unwrap_or_else(|| member.parent_id.to_string()); + let sled_serial = member + .sled_id + .and_then(|s| sled_map.get(&s.into_untyped_uuid()).cloned()) + .unwrap_or_else(|| "-".to_string()); + MemberRow { + id: member.id, + instance: instance_name, + state: member.state, + sled: sled_serial, + created: member.time_created, + } + }) + .collect(); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{table}"); + } + + Ok(()) +} + // VMMs async fn cmd_db_vmm_info( diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 23185f4bcef..ae161c5e19f 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -60,6 +60,7 @@ use nexus_types::internal_api::background::InstanceReincarnationStatus; use nexus_types::internal_api::background::InstanceUpdaterStatus; use nexus_types::internal_api::background::InventoryLoadStatus; use nexus_types::internal_api::background::LookupRegionPortStatus; +use nexus_types::internal_api::background::MulticastGroupReconcilerStatus; use nexus_types::internal_api::background::ProbeDistributorStatus; use nexus_types::internal_api::background::ReadOnlyRegionReplacementStartStatus; use nexus_types::internal_api::background::RegionReplacementDriverStatus; @@ -1193,6 +1194,9 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { "lookup_region_port" => { print_task_lookup_region_port(details); } + "multicast_reconciler" => { + print_task_multicast_reconciler(details); + } "phantom_disks" => { print_task_phantom_disks(details); } @@ -2109,6 +2113,76 @@ fn print_task_lookup_region_port(details: &serde_json::Value) { } } +fn print_task_multicast_reconciler(details: &serde_json::Value) { + let status = match serde_json::from_value::( + details.clone(), + ) { + Err(error) => { + eprintln!( + "warning: failed to interpret task details: {error:?}: {details:?}" + ); + return; + } + Ok(status) => status, + }; + + if status.disabled { + println!(" multicast feature is disabled"); + return; + } + + const GROUPS_CREATED: &str = "groups created (Creating->Active):"; + const GROUPS_DELETED: &str = "groups deleted (cleanup):"; + const GROUPS_VERIFIED: &str = "groups verified (Active):"; + const EMPTY_GROUPS_MARKED: &str = "empty groups marked for deletion:"; + const MEMBERS_PROCESSED: &str = "members processed:"; + const MEMBERS_DELETED: &str = "members deleted:"; + const WIDTH: usize = const_max_len(&[ + GROUPS_CREATED, + GROUPS_DELETED, + GROUPS_VERIFIED, + EMPTY_GROUPS_MARKED, + MEMBERS_PROCESSED, + MEMBERS_DELETED, + ]) + 1; + const NUM_WIDTH: usize = 3; + + if !status.errors.is_empty() { + println!( + " task did not complete successfully! ({} errors)", + status.errors.len() + ); + for error in &status.errors { + println!(" > {error}"); + } + } + + println!( + " {GROUPS_CREATED:NUM_WIDTH$}", + status.groups_created + ); + println!( + " {GROUPS_DELETED:NUM_WIDTH$}", + status.groups_deleted + ); + println!( + " {GROUPS_VERIFIED:NUM_WIDTH$}", + status.groups_verified + ); + println!( + " {EMPTY_GROUPS_MARKED:NUM_WIDTH$}", + status.empty_groups_marked + ); + println!( + " {MEMBERS_PROCESSED:NUM_WIDTH$}", + status.members_processed + ); + println!( + " {MEMBERS_DELETED:NUM_WIDTH$}", + status.members_deleted + ); +} + fn print_task_phantom_disks(details: &serde_json::Value) { #[derive(Deserialize)] struct TaskSuccess { diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index cd38fea5bb3..a2c142c8acb 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -85,6 +85,36 @@ stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () ============================================= +EXECUTING COMMAND: omdb ["db", "multicast", "groups"] +termination: Exited(0) +--------------------------------------------- +stdout: +ID NAME STATE MULTICAST_IP UNDERLAY_IP SOURCES VNI CREATED +--------------------------------------------- +stderr: +note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable +note: database schema version matches expected () +============================================= +EXECUTING COMMAND: omdb ["db", "multicast", "members"] +termination: Exited(0) +--------------------------------------------- +stdout: +ID GROUP_NAME PARENT_ID STATE MULTICAST_IP SLED_ID CREATED +--------------------------------------------- +stderr: +note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable +note: database schema version matches expected () +============================================= +EXECUTING COMMAND: omdb ["db", "multicast", "pools"] +termination: Exited(0) +--------------------------------------------- +stdout: +no multicast IP pools found +--------------------------------------------- +stderr: +note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable +note: database schema version matches expected () +============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) --------------------------------------------- @@ -713,7 +743,12 @@ task: "multicast_reconciler" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms -warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "empty_groups_marked": Number(0), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) + groups created (Creating->Active): 0 + groups deleted (cleanup): 0 + groups verified (Active): 0 + empty groups marked for deletion: 0 + members processed: 0 + members deleted: 0 task: "phantom_disks" configured period: every s @@ -1281,7 +1316,12 @@ task: "multicast_reconciler" configured period: every m last completed activation: , triggered by started at (s ago) and ran for ms -warning: unknown background task: "multicast_reconciler" (don't know how to interpret details: Object {"disabled": Bool(false), "empty_groups_marked": Number(0), "errors": Array [], "groups_created": Number(0), "groups_deleted": Number(0), "groups_verified": Number(0), "members_deleted": Number(0), "members_processed": Number(0)}) + groups created (Creating->Active): 0 + groups deleted (cleanup): 0 + groups verified (Active): 0 + empty groups marked for deletion: 0 + members processed: 0 + members deleted: 0 task: "phantom_disks" configured period: every s diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 1c9104e7f8c..8ce516f6817 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -96,6 +96,7 @@ async fn test_omdb_usage_errors() { &["db", "saga"], &["db", "snapshots"], &["db", "network"], + &["db", "multicast"], &["mgs"], &["nexus"], &["nexus", "background-tasks"], @@ -198,6 +199,9 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { &["db", "dns", "diff", "external", "2"], &["db", "dns", "names", "external", "2"], &["db", "instances"], + &["db", "multicast", "groups"], + &["db", "multicast", "members"], + &["db", "multicast", "pools"], &["db", "sleds"], &["db", "sleds", "-F", "discretionary"], &["mgs", "inventory"], diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index bd28399bc68..d4234c16923 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -140,6 +140,7 @@ Commands: instances Alias to `omdb instance list` network Print information about the network migrations Print information about migrations + multicast Print information about multicast groups snapshots Print information about snapshots validate Validate the contents of the database volumes Print information about volumes @@ -205,6 +206,7 @@ Commands: instances Alias to `omdb instance list` network Print information about the network migrations Print information about migrations + multicast Print information about multicast groups snapshots Print information about snapshots validate Validate the contents of the database volumes Print information about volumes @@ -847,6 +849,41 @@ Database Options: --include-deleted whether to include soft-deleted records when enumerating objects that can be soft-deleted +Safety Options: + -w, --destructive Allow potentially-destructive subcommands +============================================= +EXECUTING COMMAND: omdb ["db", "multicast"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +Print information about multicast groups + +Usage: omdb db multicast [OPTIONS] + +Commands: + groups List all multicast groups + members List all multicast group members + pools List multicast IP pools and their ranges + info Get info for a multicast group by IP address or name + help Print this message or the help of the given subcommand(s) + +Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + --color Color output [default: auto] [possible values: auto, always, never] + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] + --include-deleted whether to include soft-deleted records when enumerating objects + that can be soft-deleted + Safety Options: -w, --destructive Allow potentially-destructive subcommands ============================================= diff --git a/nexus/db-model/src/multicast_group.rs b/nexus/db-model/src/multicast_group.rs index 6c9deea3de4..b4070c0edbd 100644 --- a/nexus/db-model/src/multicast_group.rs +++ b/nexus/db-model/src/multicast_group.rs @@ -120,24 +120,90 @@ impl_enum_type!( Left => b"left" ); +impl MulticastGroupState { + pub const ALL_STATES: &'static [Self] = + &[Self::Creating, Self::Active, Self::Deleting, Self::Deleted]; + + pub fn label(&self) -> &'static str { + match self { + Self::Creating => "Creating", + Self::Active => "Active", + Self::Deleting => "Deleting", + Self::Deleted => "Deleted", + } + } +} + impl std::fmt::Display for MulticastGroupState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match self { - MulticastGroupState::Creating => "Creating", - MulticastGroupState::Active => "Active", - MulticastGroupState::Deleting => "Deleting", - MulticastGroupState::Deleted => "Deleted", - }) + f.write_str(self.label()) + } +} + +/// Error returned when parsing a `MulticastGroupState` from a string. +#[derive(Debug)] +pub struct MulticastGroupStateParseError(()); + +impl std::fmt::Display for MulticastGroupStateParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "invalid multicast group state") + } +} + +impl std::error::Error for MulticastGroupStateParseError {} + +impl std::str::FromStr for MulticastGroupState { + type Err = MulticastGroupStateParseError; + fn from_str(s: &str) -> Result { + for &v in Self::ALL_STATES { + if s.eq_ignore_ascii_case(v.label()) { + return Ok(v); + } + } + Err(MulticastGroupStateParseError(())) + } +} + +impl MulticastGroupMemberState { + pub const ALL_STATES: &'static [Self] = + &[Self::Joining, Self::Joined, Self::Left]; + + pub fn label(&self) -> &'static str { + match self { + Self::Joining => "Joining", + Self::Joined => "Joined", + Self::Left => "Left", + } } } impl std::fmt::Display for MulticastGroupMemberState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(match self { - MulticastGroupMemberState::Joining => "Joining", - MulticastGroupMemberState::Joined => "Joined", - MulticastGroupMemberState::Left => "Left", - }) + f.write_str(self.label()) + } +} + +/// Error returned when parsing a `MulticastGroupMemberState` from a string. +#[derive(Debug)] +pub struct MulticastGroupMemberStateParseError(()); + +impl std::fmt::Display for MulticastGroupMemberStateParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "invalid multicast group member state") + } +} + +impl std::error::Error for MulticastGroupMemberStateParseError {} + +impl std::str::FromStr for MulticastGroupMemberState { + type Err = MulticastGroupMemberStateParseError; + fn from_str(s: &str) -> Result { + for &v in Self::ALL_STATES { + if s.eq_ignore_ascii_case(v.label()) { + return Ok(v); + } + } + Err(MulticastGroupMemberStateParseError(())) } } diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index b6cc91c53db..a835b1d7c89 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -2816,8 +2816,12 @@ table! { } } -// Allow multicast tables to appear together for NOT EXISTS subqueries +// Allow multicast tables to appear together for JOINs and NOT EXISTS subqueries allow_tables_to_appear_in_same_query!(multicast_group, multicast_group_member); +allow_tables_to_appear_in_same_query!( + multicast_group, + underlay_multicast_group +); allow_tables_to_appear_in_same_query!(user_data_export, snapshot, image); diff --git a/nexus/tests/integration_tests/multicast/mod.rs b/nexus/tests/integration_tests/multicast/mod.rs index ed4c06c46fb..79b4ce0d43b 100644 --- a/nexus/tests/integration_tests/multicast/mod.rs +++ b/nexus/tests/integration_tests/multicast/mod.rs @@ -61,6 +61,7 @@ mod failures; mod groups; mod instances; mod networking_integration; +mod omdb; // Timeout constants for test operations const POLL_INTERVAL: Duration = Duration::from_millis(80); diff --git a/nexus/tests/integration_tests/multicast/omdb.rs b/nexus/tests/integration_tests/multicast/omdb.rs new file mode 100644 index 00000000000..a1d0b4f9f39 --- /dev/null +++ b/nexus/tests/integration_tests/multicast/omdb.rs @@ -0,0 +1,446 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests for omdb multicast commands with real data. +//! +//! These tests verify that omdb correctly formats multicast data by creating +//! actual multicast pools, groups, and members, then running omdb commands +//! and checking the output. + +use futures::future::join3; +use nexus_test_utils::http_testing::{AuthnMode, NexusRequest}; +use nexus_test_utils_macros::nexus_test; +use nexus_types::external_api::params::MulticastGroupMemberAdd; +use nexus_types::external_api::views::{MulticastGroup, MulticastGroupMember}; +use omicron_common::api::external::NameOrId; +use std::net::IpAddr; +use std::path::PathBuf; +use std::process::Command; + +use super::{ + ControlPlaneTestContext, create_multicast_ip_pool, + create_multicast_ip_pool_with_range, instance_for_multicast_groups, + mcast_group_url, wait_for_group_active, wait_for_member_state, +}; +use nexus_test_utils::resource_helpers::{ + create_default_ip_pool, create_project, object_create, +}; + +const PROJECT_NAME: &str = "omdb-test-project"; +const TARGET_DIR: &str = "target"; +const OMDB_BIN: &str = "omdb"; +const BUILD_PROFILES: &[&str] = &["debug", "release"]; + +/// Find the omdb binary path. +/// +/// Since omdb is not built as part of omicron-nexus tests, we look for it +/// in the target directory relative to CARGO_MANIFEST_DIR. +fn find_omdb() -> PathBuf { + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let workspace_root = manifest_dir.parent().unwrap(); + + for profile in BUILD_PROFILES { + let omdb_path = + workspace_root.join(TARGET_DIR).join(profile).join(OMDB_BIN); + if omdb_path.exists() { + return omdb_path; + } + } + + PathBuf::from(OMDB_BIN) +} + +/// Run an omdb command and return its stdout. +fn run_omdb(db_url: &str, args: &[&str]) -> String { + let cmd_path = find_omdb(); + let output = Command::new(&cmd_path) + .env("OMDB_DB_URL", db_url) + .args(args) + .output() + .expect( + "failed to execute `omdb` - ensure `cargo build -p omicron-omdb` has been run", + ); + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + panic!("omdb command failed with args {args:?}:\nstderr: {stderr}"); + } + + String::from_utf8_lossy(&output.stdout).to_string() +} + +/// Test omdb multicast pools command. +#[nexus_test] +async fn test_omdb_multicast_pools(cptestctx: &ControlPlaneTestContext) { + let db_url = cptestctx.database.listen_url().to_string(); + let client = &cptestctx.external_client; + + // Before creating any pools, should show "no multicast IP pools found" + let output = run_omdb(&db_url, &["db", "multicast", "pools"]); + assert!( + output.contains("no multicast IP pools found"), + "Expected empty pool message, got: {output}" + ); + + // Create a multicast pool + create_multicast_ip_pool(client, "test-mcast-pool").await; + + // Now should show the pool + let output = run_omdb(&db_url, &["db", "multicast", "pools"]); + assert!( + output.contains("test-mcast-pool"), + "Expected pool name in output, got: {output}" + ); + assert!( + output.contains("224.2.0.0"), + "Expected pool range start in output, got: {output}" + ); +} + +/// Test omdb multicast groups, members, and info commands. +/// +/// This consolidated test verifies all multicast commands work with actual data. +#[nexus_test] +async fn test_omdb_multicast_commands(cptestctx: &ControlPlaneTestContext) { + let db_url = cptestctx.database.listen_url().to_string(); + let client = &cptestctx.external_client; + + // Setup: create pools and project + let (_, _, _) = join3( + create_default_ip_pool(client), + create_project(client, PROJECT_NAME), + create_multicast_ip_pool(client, "test-mcast-pool"), + ) + .await; + + // Create an instance without multicast groups first + let instance = instance_for_multicast_groups( + cptestctx, + PROJECT_NAME, + "test-instance", + false, // don't start + &[], // no multicast groups yet + ) + .await; + + // Add a multicast member via API (this implicitly creates the group) + let member_add_url = format!( + "/v1/multicast-groups/test-mcast-group/members?project={PROJECT_NAME}" + ); + + object_create::<_, MulticastGroupMember>( + client, + &member_add_url, + &MulticastGroupMemberAdd { + instance: NameOrId::Id(instance.identity.id), + source_ips: None, // ASM (Any-Source Multicast) + }, + ) + .await; + + // Wait for the group to become "Active" + wait_for_group_active(client, "test-mcast-group").await; + + // Get the group details for later tests + let group_url = mcast_group_url("test-mcast-group"); + let group: MulticastGroup = NexusRequest::object_get(client, &group_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get group") + .parsed_body() + .expect("failed to parse group"); + + // Test: omdb db multicast groups + let output = run_omdb(&db_url, &["db", "multicast", "groups"]); + assert!( + output.contains("test-mcast-group"), + "Expected group name in groups output, got: {output}" + ); + + // Verify ASM (any-source multicast) is shown since we didn't specify source_ips + assert!( + output.contains("ASM"), + "Expected ASM in sources column, got: {output}" + ); + + // Test: omdb db multicast groups --state active + let output = + run_omdb(&db_url, &["db", "multicast", "groups", "--state", "active"]); + assert!( + output.contains("test-mcast-group"), + "Expected group name with state filter, got: {output}" + ); + + // Test: omdb db multicast groups --pool + let output = run_omdb( + &db_url, + &["db", "multicast", "groups", "--pool", "test-mcast-pool"], + ); + assert!( + output.contains("test-mcast-group"), + "Expected group name with pool filter, got: {output}" + ); + + // Test: omdb db multicast members + let output = run_omdb(&db_url, &["db", "multicast", "members"]); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID in members output, got: {output}" + ); + + // Verify the IP column shows the group's allocated IP + let group_ip = group.multicast_ip.to_string(); + assert!( + output.contains(&group_ip), + "Expected multicast IP {group_ip} in members output, got: {output}" + ); + + // Test: omdb db multicast members --group-name + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--group-name", "test-mcast-group"], + ); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with group-name filter, got: {output}" + ); + + // Test: omdb db multicast members --group-ip (reuses group_ip from above) + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--group-ip", &group_ip], + ); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with group-ip filter, got: {output}" + ); + + // Test: omdb db multicast members --group-id + let group_id = group.identity.id.to_string(); + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--group-id", &group_id], + ); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with group-id filter, got: {output}" + ); + + // Test: omdb db multicast members --state left + // Wait for the RPW reconciler to transition member to "Left" state + // (instance isn't running, so no sled_id assignment) + wait_for_member_state( + cptestctx, + "test-mcast-group", + instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Left, + ) + .await; + let output = + run_omdb(&db_url, &["db", "multicast", "members", "--state", "left"]); + assert!( + output.contains(&instance.identity.id.to_string()), + "Expected instance ID with state=left filter, got: {output}" + ); + + // Test: omdb db multicast members --sled-id + // Create a started instance so the member gets a sled_id + let started_instance = instance_for_multicast_groups( + cptestctx, + PROJECT_NAME, + "started-instance", + true, // start the instance + &[], + ) + .await; + + // Add member to a new group for the started instance + let sled_member_url = format!( + "/v1/multicast-groups/sled-test-group/members?project={PROJECT_NAME}" + ); + object_create::<_, MulticastGroupMember>( + client, + &sled_member_url, + &MulticastGroupMemberAdd { + instance: NameOrId::Id(started_instance.identity.id), + source_ips: None, + }, + ) + .await; + + wait_for_group_active(client, "sled-test-group").await; + + // Query members by sled_id - the started instance should be on first_sled + let sled_id = cptestctx.first_sled_id().to_string(); + let output = run_omdb( + &db_url, + &["db", "multicast", "members", "--sled-id", &sled_id], + ); + assert!( + output.contains(&started_instance.identity.id.to_string()), + "Expected started instance ID with sled-id filter, got: {output}" + ); + + // Test: omdb db multicast members --state joined + // Wait for the started instance's member to reach "Joined" state + wait_for_member_state( + cptestctx, + "sled-test-group", + started_instance.identity.id, + nexus_db_model::MulticastGroupMemberState::Joined, + ) + .await; + + // Now test the --state joined filter + let output_joined = + run_omdb(&db_url, &["db", "multicast", "members", "--state", "joined"]); + assert!( + output_joined.contains(&started_instance.identity.id.to_string()), + "Expected started instance in joined state, got: {output_joined}" + ); + + // Verify started instance is NOT in left state + let output_left = + run_omdb(&db_url, &["db", "multicast", "members", "--state", "left"]); + assert!( + !output_left.contains(&started_instance.identity.id.to_string()), + "Started instance should NOT be in left state, got: {output_left}" + ); + + // Test: combined filters (--group-name + --state) + // The started instance's member should appear when filtering by both + let output_combined = run_omdb( + &db_url, + &[ + "db", + "multicast", + "members", + "--group-name", + "sled-test-group", + "--state", + "joined", + ], + ); + assert!( + output_combined.contains(&started_instance.identity.id.to_string()), + "Expected started instance with combined filters, got: {output_combined}" + ); + + // Test: combined filters that should return empty (wrong group + state) + let output_combined_empty = run_omdb( + &db_url, + &[ + "db", + "multicast", + "members", + "--group-name", + "test-mcast-group", + "--state", + "joined", + ], + ); + // test-mcast-group has a non-started instance, so it should NOT be in joined state + assert!( + !output_combined_empty + .contains(&started_instance.identity.id.to_string()), + "Started instance should NOT appear in wrong group filter, got: {output_combined_empty}" + ); + + // Test: omdb db multicast info --name + let output = run_omdb( + &db_url, + &["db", "multicast", "info", "--name", "test-mcast-group"], + ); + assert!( + output.contains("MULTICAST GROUP"), + "Expected group header in info output, got: {output}" + ); + assert!( + output.contains("test-mcast-group"), + "Expected group name in info output, got: {output}" + ); + assert!( + output.contains("test-mcast-pool"), + "Expected pool name in info output, got: {output}" + ); + assert!( + output.contains("MEMBERS"), + "Expected members section in info output, got: {output}" + ); + + // Test: omdb db multicast info --ip + let output = + run_omdb(&db_url, &["db", "multicast", "info", "--ip", &group_ip]); + assert!( + output.contains("test-mcast-group"), + "Expected group name when querying by IP, got: {output}" + ); + + // Test: omdb db multicast info --group-id (reuses group_id from members test) + let output = run_omdb( + &db_url, + &["db", "multicast", "info", "--group-id", &group_id], + ); + assert!( + output.contains("test-mcast-group"), + "Expected group name when querying by ID, got: {output}" + ); + + // Test SSM (Source-Specific Multicast) - create a group with source IPs + // SSM requires IPs from the 232.x.x.x range, so create an SSM pool first + create_multicast_ip_pool_with_range( + client, + "test-ssm-pool", + (232, 1, 0, 0), // SSM range start + (232, 1, 0, 255), // SSM range end + ) + .await; + + let ssm_instance = instance_for_multicast_groups( + cptestctx, + PROJECT_NAME, + "ssm-instance", + false, + &[], + ) + .await; + + let ssm_member_url = format!( + "/v1/multicast-groups/ssm-group/members?project={PROJECT_NAME}" + ); + object_create::<_, MulticastGroupMember>( + client, + &ssm_member_url, + &MulticastGroupMemberAdd { + instance: NameOrId::Id(ssm_instance.identity.id), + source_ips: Some(vec![ + "10.0.0.1".parse::().unwrap(), + "10.0.0.2".parse::().unwrap(), + ]), + }, + ) + .await; + + wait_for_group_active(client, "ssm-group").await; + + // Verify SSM sources show in groups list + let output = run_omdb(&db_url, &["db", "multicast", "groups"]); + assert!( + output.contains("ssm-group"), + "Expected SSM group in output, got: {output}" + ); + assert!( + output.contains("10.0.0.1") && output.contains("10.0.0.2"), + "Expected SSM source IPs in output, got: {output}" + ); + + // Verify SSM sources show in info command + let output = + run_omdb(&db_url, &["db", "multicast", "info", "--name", "ssm-group"]); + assert!( + output.contains("10.0.0.1") || output.contains("10.0.0.2"), + "Expected SSM source IPs in info output, got: {output}" + ); +}