From 6fd5020120c681091daa7f6d2b2fa0ddc6bfa248 Mon Sep 17 00:00:00 2001 From: Abhinandan Purkait Date: Fri, 14 Jul 2023 12:35:21 +0000 Subject: [PATCH] chore: add snasphot and rebuild history to bundle, disable resouce level dump Signed-off-by: Abhinandan Purkait --- Cargo.lock | 1 - k8s/plugin/README.md | 72 +--- k8s/supportability/Cargo.toml | 1 - k8s/supportability/src/collect/common.rs | 8 +- .../src/collect/k8s_resources/client.rs | 169 +++++++++- .../k8s_resources/k8s_resource_dump.rs | 312 ++++++++++++------ .../src/collect/resource_dump.rs | 33 +- .../src/collect/resources/mod.rs | 1 + .../src/collect/resources/node.rs | 36 +- .../src/collect/resources/pool.rs | 42 +-- .../src/collect/resources/snapshot.rs | 130 ++++++++ .../src/collect/resources/traits.rs | 11 - .../src/collect/resources/utils.rs | 83 +---- .../src/collect/resources/volume.rs | 67 ++-- k8s/supportability/src/collect/system_dump.rs | 36 +- k8s/supportability/src/lib.rs | 30 +- k8s/supportability/src/operations.rs | 18 +- 17 files changed, 660 insertions(+), 390 deletions(-) create mode 100644 k8s/supportability/src/collect/resources/snapshot.rs diff --git a/Cargo.lock b/Cargo.lock index fb79d624b..230187753 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3310,7 +3310,6 @@ dependencies = [ "once_cell", "openapi", "platform", - "prettytable-rs", "pstor", "schemars", "serde", diff --git a/k8s/plugin/README.md b/k8s/plugin/README.md index 89d5e77fc..3ed148ec5 100644 --- a/k8s/plugin/README.md +++ b/k8s/plugin/README.md @@ -274,15 +274,9 @@ kubectl mayastor dump Usage: kubectl-mayastor dump [OPTIONS] Commands: - system Collects entire system information - volumes Collects information about all volumes and its descendants (replicas/pools/nodes) - volume Collects information about particular volume and its descendants matching to given volume ID - pools Collects information about all pools and its descendants (nodes) - pool Collects information about particular pool and its descendants matching to given pool ID - nodes Collects information about all nodes - node Collects information about particular node matching to given node ID - etcd Collects information from etcd - help Print this message or the help of the given subcommand(s) + system Collects entire system information + etcd Collects information from etcd + help Print this message or the help of the given subcommand(s) Options: -r, --rest @@ -300,7 +294,7 @@ Options: -d, --output-directory-path Output directory path to store archive file [default: ./] -n, --namespace - Kubernetes namespace of mayastor service[default: mayastor] + Kubernetes namespace of mayastor service [default: mayastor] -o, --output The Output, viz yaml, json [default: none] -j, --jaeger @@ -316,58 +310,12 @@ Supportability - collects state & log information of services and dumps it to a **Examples**: -1. To collect entire mayastor system information into an archive file - ```sh - ## Command - kubectl mayastor dump system -d -n - ``` - - - Example command while running inside Kubernetes cluster nodes / system which - has access to cluster node ports - ```sh - kubectl mayastor dump system -d /mayastor-dump -n mayastor - ``` - - Example command while running outside of Kubernetes cluster nodes where - nodes exist in private network (or) node ports are not exposed for outside cluster - ```sh - kubectl mayastor dump system -d /mayastor-dump -r http://127.0.0.1:30011 -l http://127.0.0.1:3100 -e http://127.0.0.1:2379 -n mayastor - ``` - -2. To collect information about all mayastor volumes into an archive file - ```sh - ## Command - kubectl mayastor dump volumes -d -n - ``` - - - Example command while running inside Kubernetes cluster nodes / system which - has access to cluster node ports - ```sh - kubectl mayastor dump volumes -d /mayastor-dump -n mayastor - ``` - - Example command while running outside of Kubernetes cluster nodes where - nodes exist in private network (or) node ports are not exposed for outside cluster - ```sh - kubectl mayastor dump volumes -d /mayastor-dump -r http://127.0.0.1:30011 -l http://127.0.0.1:3100 -e http://127.0.0.1:2379 -n mayastor - ``` - - **Note**: similarly to dump pools/nodes information then replace `volumes` with an associated resource type(`pools/nodes`). - -3. To collect information about particular volume into an archive file - ```sh - ## Command - kubectl mayastor dump volume -d -n - ``` - - - Example command while running inside Kubernetes cluster nodes / system which - has access to cluster node ports - ```sh - kubectl mayastor dump volume volume-1 -d /mayastor-dump -n mayastor - ``` - - Example command while running outside of Kubernetes cluster nodes where - nodes exist in private network (or) node ports are not exposed for outside cluster - ```sh - kubectl mayastor dump volume volume-1 -d /mayastor-dump -r http://127.0.0.1:30011 -l http://127.0.0.1:3100 -e http://127.0.0.1:2379 -n mayastor - ``` +To collect entire mayastor system information into an archive file +```sh +## Command +kubectl mayastor dump system -d -n +``` + `--disable-log-collection` can be used to disable collection of logs.
diff --git a/k8s/supportability/Cargo.toml b/k8s/supportability/Cargo.toml index 478a719f2..38907cd3f 100644 --- a/k8s/supportability/Cargo.toml +++ b/k8s/supportability/Cargo.toml @@ -24,7 +24,6 @@ clap = { version = "4.1.4", features = ["color", "derive"] } anyhow = "1.0.69" humantime = "2.1.0" async-trait = "0.1.64" -prettytable-rs = "^0.10" serde = "1.0.152" serde_json = "1.0.93" serde_yaml = "0.9.17" diff --git a/k8s/supportability/src/collect/common.rs b/k8s/supportability/src/collect/common.rs index a5fbba82c..2d382baa7 100644 --- a/k8s/supportability/src/collect/common.rs +++ b/k8s/supportability/src/collect/common.rs @@ -1,6 +1,9 @@ -use crate::collect::{error::Error, resources::traits::Topologer, rest_wrapper::RestClient}; +use crate::collect::{error::Error, rest_wrapper::RestClient}; use chrono::Local; +#[cfg(debug_assertions)] +use crate::collect::resources::traits::Topologer; + /// DumpConfig helps to create new instance of Dumper #[derive(Debug)] pub(crate) struct DumpConfig { @@ -20,7 +23,8 @@ pub(crate) struct DumpConfig { pub(crate) kube_config_path: Option, /// Specifies the timeout value to interact with other systems pub(crate) timeout: humantime::Duration, - /// Topologer implements functionality to build topological infotmation of system + #[cfg(debug_assertions)] + /// Topologer implements functionality to build topological information of system pub(crate) topologer: Option>, pub(crate) output_format: OutputFormat, } diff --git a/k8s/supportability/src/collect/k8s_resources/client.rs b/k8s/supportability/src/collect/k8s_resources/client.rs index 9234b3f2c..968c50d26 100644 --- a/k8s/supportability/src/collect/k8s_resources/client.rs +++ b/k8s/supportability/src/collect/k8s_resources/client.rs @@ -1,13 +1,24 @@ use crate::collect::k8s_resources::common::KUBERNETES_HOST_LABEL_KEY; +use k8s_operators::diskpool::crd::DiskPool; + use k8s_openapi::api::{ apps::v1::{DaemonSet, Deployment, StatefulSet}, core::v1::{Event, Node, Pod}, }; -use k8s_operators::diskpool::crd::DiskPool; -use kube::{api::ListParams, Api, Client, Resource}; - +use kube::{ + api::{DynamicObject, ListParams}, + discovery::{verbs, Scope}, + Api, Client, Discovery, Resource, +}; use std::{collections::HashMap, convert::TryFrom}; +const SNAPSHOT_GROUP: &str = "snapshot.storage.k8s.io"; +const SNAPSHOT_VERSION: &str = "v1"; +const VOLUME_SNAPSHOT_CLASS: &str = "VolumeSnapshotClass"; +const VOLUME_SNAPSHOT_CONTENT: &str = "VolumeSnapshotContent"; +const DRIVER: &str = "driver"; +const SPEC: &str = "spec"; + /// K8sResourceError holds errors that can obtain while fetching /// information of Kubernetes Objects #[allow(clippy::enum_variant_names)] @@ -87,6 +98,43 @@ impl ClientSet { self.client.clone() } + /// Get a new api for a `dynamic_object` for the provided GVK. + pub(crate) async fn dynamic_object_api( + &self, + namespace: Option<&str>, + group_name: &str, + version: &str, + kind: &str, + ) -> Result, K8sResourceError> { + let discovery = Discovery::new(self.kube_client()).run().await?; + for group in discovery.groups() { + if group.name() == group_name { + for (ar, caps) in group.recommended_resources() { + if !caps.supports_operation(verbs::LIST) { + continue; + } + if ar.version == version && ar.kind == kind { + let result = match namespace { + None if caps.scope == Scope::Cluster => { + Ok(Api::all_with(self.kube_client(), &ar)) + } + Some(ns) if caps.scope == Scope::Namespaced => { + Ok(Api::namespaced_with(self.kube_client(), ns, &ar)) + } + _ => Err(K8sResourceError::CustomError(format!( + "DynamicObject Api not available for {kind} of {group_name}/{version}" + ))), + }; + return result; + } + } + } + } + Err(K8sResourceError::CustomError(format!( + "DynamicObject Api not available for {kind} of {group_name}/{version}" + ))) + } + /// Fetch node objects from API-server then form and return map of node name to node object pub(crate) async fn get_nodes_map(&self) -> Result, K8sResourceError> { let node_api: Api = Api::all(self.client.clone()); @@ -164,6 +212,115 @@ impl ClientSet { Ok(pools.items) } + /// Fetch list of volume snapshot classes based on the driver if provided. + pub(crate) async fn list_volumesnapshot_classes( + &self, + driver_selector: Option<&str>, + label_selector: Option<&str>, + field_selector: Option<&str>, + ) -> Result, K8sResourceError> { + let list_params = ListParams::default() + .labels(label_selector.unwrap_or_default()) + .fields(field_selector.unwrap_or_default()); + let vsc_api: Api = self + .dynamic_object_api( + None, + SNAPSHOT_GROUP, + SNAPSHOT_VERSION, + VOLUME_SNAPSHOT_CLASS, + ) + .await?; + let vscs = match vsc_api.list(&list_params).await { + Ok(val) => val, + Err(kube_error) => match kube_error { + kube::Error::Api(e) => { + if e.code == 404 { + return Ok(vec![]); + } + return Err(K8sResourceError::ClientError(kube::Error::Api(e))); + } + _ => return Err(K8sResourceError::ClientError(kube_error)), + }, + }; + Ok(vscs + .items + .into_iter() + .filter(|item| match driver_selector { + None => true, + Some(driver_selector) => match item.data.get(DRIVER) { + None => false, + Some(value) => match value.as_str() { + None => false, + Some(driver) => driver == driver_selector, + }, + }, + }) + .collect()) + } + + /// Fetch list of volume snapshot contents based on the driver if provided. + pub(crate) async fn list_volumesnapshotcontents( + &self, + driver_selector: Option<&str>, + label_selector: Option<&str>, + field_selector: Option<&str>, + ) -> Result, K8sResourceError> { + let mut list_params = ListParams::default() + .labels(label_selector.unwrap_or_default()) + .fields(field_selector.unwrap_or_default()) + .limit(2); + let vsc_api: Api = self + .dynamic_object_api( + None, + SNAPSHOT_GROUP, + SNAPSHOT_VERSION, + VOLUME_SNAPSHOT_CONTENT, + ) + .await?; + + let mut vscs_filtered: Vec = vec![]; + loop { + let vscs = match vsc_api.list(&list_params).await { + Ok(val) => val, + Err(kube_error) => match kube_error { + kube::Error::Api(e) => { + if e.code == 404 { + return Ok(vec![]); + } + return Err(K8sResourceError::ClientError(kube::Error::Api(e))); + } + _ => return Err(K8sResourceError::ClientError(kube_error)), + }, + }; + vscs_filtered.append( + &mut vscs + .items + .into_iter() + .filter(|item| match driver_selector { + None => true, + Some(driver_selector) => match item.data.get(SPEC) { + None => false, + Some(value) => match value.get(DRIVER) { + None => false, + Some(value) => match value.as_str() { + None => false, + Some(driver) => driver == driver_selector, + }, + }, + }, + }) + .collect(), + ); + match vscs.metadata.continue_ { + Some(token) if !token.is_empty() => { + list_params = list_params.continue_token(token.as_str()) + } + _ => break, + }; + } + Ok(vscs_filtered) + } + /// Fetch list of k8s events associated to given label_selector & field_selector pub(crate) async fn get_events( &self, @@ -183,8 +340,10 @@ impl ClientSet { let mut result = events_api.list(&list_params).await?; events.append(&mut result.items); match result.metadata.continue_ { - None => break, - Some(token) => list_params = list_params.continue_token(token.as_str()), + Some(token) if !token.is_empty() => { + list_params = list_params.continue_token(token.as_str()) + } + _ => break, }; } diff --git a/k8s/supportability/src/collect/k8s_resources/k8s_resource_dump.rs b/k8s/supportability/src/collect/k8s_resources/k8s_resource_dump.rs index 661a453fa..4560ee3b1 100644 --- a/k8s/supportability/src/collect/k8s_resources/k8s_resource_dump.rs +++ b/k8s/supportability/src/collect/k8s_resources/k8s_resource_dump.rs @@ -12,7 +12,15 @@ use k8s_openapi::{ use k8s_operators::diskpool::crd::DiskPool; use kube::Resource; use serde::Serialize; -use std::{collections::HashSet, fs::File, io::Write, iter::FromIterator, path::PathBuf}; +use std::{ + collections::HashSet, + fs::File, + io::Write, + iter::FromIterator, + path::{Path, PathBuf}, +}; + +const MAYASTOR_CSI_DRIVER: &str = "io.openebs.csi-mayastor"; /// K8s resource dumper client #[derive(Clone)] @@ -109,124 +117,56 @@ impl K8sResourceDumperClient { // Create the root dir path let mut root_dir = PathBuf::from(root_path); root_dir.push("k8s_resources"); - create_directory_if_not_exist(root_dir.clone())?; + create_directory_if_not_exist(root_dir.to_path_buf())?; // Create the configurations path - let mut configurations_path = root_dir.clone(); + let mut configurations_path = root_dir.to_path_buf(); configurations_path.push("configurations"); // Create the configurations directory create_directory_if_not_exist(configurations_path.clone())?; let mut errors = Vec::new(); + + // Fetch all events in provided NAMESPACE + if let Err(error) = get_k8s_events(&self.k8s_client, &root_dir).await { + errors.push(error) + } + // Fetch all Daemonsets in provided NAMESPACE - log("\t Collecting daemonsets configuration".to_string()); - match self.k8s_client.get_daemonsets("", "").await { - Ok(daemonsets) => { - // Create all Daemonsets configurations - let _ = create_app_configurations( - daemonsets.into_iter().map(DaemonSet).collect(), - configurations_path.clone(), - ) - .map_err(|e| errors.push(e)); - } - Err(e) => { - errors.push(K8sResourceDumperError::K8sResourceError(e)); - } + if let Err(error) = get_k8s_daemonsets(&self.k8s_client, &configurations_path).await { + errors.push(error) } // Fetch all Deployments in provided NAMESPACE - log("\t Collecting deployments configuration".to_string()); - match self.k8s_client.get_deployments("", "").await { - Ok(deploys) => { - // Create all Daemonsets configurations - let _ = create_app_configurations( - deploys.into_iter().map(Deployment).collect(), - configurations_path.clone(), - ) - .map_err(|e| errors.push(e)); - } - Err(e) => { - errors.push(K8sResourceDumperError::K8sResourceError(e)); - } + if let Err(error) = get_k8s_deployments(&self.k8s_client, &configurations_path).await { + errors.push(error) } // Fetch all StatefulSets in provided NAMESPACE - log("\t Collecting statefulsets configuration".to_string()); - match self.k8s_client.get_statefulsets("", "").await { - Ok(statefulsets) => { - // Create all Daemonsets configurations - let _ = create_app_configurations( - statefulsets.into_iter().map(StatefulSet).collect(), - configurations_path.clone(), - ) - .map_err(|e| errors.push(e)); - } - Err(e) => { - errors.push(K8sResourceDumperError::K8sResourceError(e)); - } + if let Err(error) = get_k8s_statefulsets(&self.k8s_client, &configurations_path).await { + errors.push(error) } - // Fetch all events in provided NAMESPACE - log("\t Collecting Kubernetes events".to_string()); - match self.k8s_client.get_events("", "").await { - Ok(mut events) => { - // Sort the events based on event_time - events.sort_unstable_by_key(event_time); - // NOTE: Unmarshalling object recevied from K8s API-server will not fail - let _ = create_file_and_write( - root_dir.clone(), - "k8s_events.json".to_string(), - serde_json::to_string_pretty(&events)?, - ) - .map_err(|e| errors.push(K8sResourceDumperError::IOError(e))); - } - Err(e) => { - errors.push(K8sResourceDumperError::K8sResourceError(e)); - } + // Fetch all DiskPools in provided NAMESPACE + if let Err(error) = get_k8s_diskpools(&self.k8s_client, &root_dir, required_pools).await { + errors.push(error) } - // Fetch all DiskPools in provided NAMESPACE - log("\t Collecting Kubernetes disk pool resources".to_string()); - match self.k8s_client.list_pools(None, None).await { - Ok(disk_pools) => { - let filtered_pools = match required_pools { - Some(p_names) => { - let names: HashSet = HashSet::from_iter(p_names); - disk_pools - .into_iter() - .filter(|p| names.contains(p.meta().name.as_ref().unwrap())) - .collect::>() - } - None => disk_pools, - }; - // NOTE: Unmarshalling object recevied from K8s API-server will not fail - let _ = create_file_and_write( - root_dir.clone(), - "k8s_disk_pools.yaml".to_string(), - serde_yaml::to_string(&filtered_pools)?, - ) - .map_err(|e| errors.push(K8sResourceDumperError::IOError(e))); - } - Err(e) => { - errors.push(K8sResourceDumperError::K8sResourceError(e)); - } + // Fetch all VolumeSnapshotClasses for mayastor csi driver + if let Err(error) = get_k8s_vs_classes(&self.k8s_client, &root_dir).await { + errors.push(error) + } + + // Fetch all VolumeSnapshotContents for mayastor csi driver + if let Err(error) = get_k8s_vsnapshot_contents(&self.k8s_client, &root_dir).await { + errors.push(error) } // Fetch all Pods in provided NAMESPACE - log("\t Collecting Kuberbetes pod resources".to_string()); - match self.k8s_client.get_pods("", "").await { - Ok(pods) => { - let _ = create_file_and_write( - root_dir.clone(), - "pods.yaml".to_string(), - serde_yaml::to_string(&pods)?, - ) - .map_err(|e| errors.push(K8sResourceDumperError::IOError(e))); - } - Err(e) => { - errors.push(K8sResourceDumperError::K8sResourceError(e)); - } + if let Err(error) = get_k8s_pod_configurations(&self.k8s_client, &root_dir).await { + errors.push(error) } + if !errors.is_empty() { return Err(K8sResourceDumperError::MultipleErrors(errors)); } @@ -297,3 +237,181 @@ fn event_time(event: &Event) -> MicroTime { } event.event_time.as_ref().unwrap().clone() } + +async fn get_k8s_daemonsets( + k8s_client: &ClientSet, + configurations_path: &Path, +) -> Result<(), K8sResourceDumperError> { + // Fetch all Daemonsets in provided NAMESPACE + log("\t Collecting daemonsets configuration".to_string()); + match k8s_client.get_daemonsets("", "").await { + Ok(daemonsets) => { + // Create all Daemonsets configurations + create_app_configurations( + daemonsets.into_iter().map(DaemonSet).collect(), + configurations_path.to_path_buf(), + )?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} + +async fn get_k8s_deployments( + k8s_client: &ClientSet, + configurations_path: &Path, +) -> Result<(), K8sResourceDumperError> { + // Fetch all Deployments in provided NAMESPACE + log("\t Collecting deployments configuration".to_string()); + match k8s_client.get_deployments("", "").await { + Ok(deploys) => { + // Create all Daemonsets configurations + create_app_configurations( + deploys.into_iter().map(Deployment).collect(), + configurations_path.to_path_buf(), + )?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} + +async fn get_k8s_statefulsets( + k8s_client: &ClientSet, + configurations_path: &Path, +) -> Result<(), K8sResourceDumperError> { + // Fetch all StatefulSets in provided NAMESPACE + log("\t Collecting statefulsets configuration".to_string()); + match k8s_client.get_statefulsets("", "").await { + Ok(statefulsets) => { + // Create all Daemonsets configurations + create_app_configurations( + statefulsets.into_iter().map(StatefulSet).collect(), + configurations_path.to_path_buf(), + )?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} + +async fn get_k8s_diskpools( + k8s_client: &ClientSet, + root_dir: &Path, + required_pools: Option>, +) -> Result<(), K8sResourceDumperError> { + // Fetch all DiskPools in provided NAMESPACE + log("\t Collecting Kubernetes disk pool resources".to_string()); + match k8s_client.list_pools(None, None).await { + Ok(disk_pools) => { + let filtered_pools = match required_pools { + Some(p_names) => { + let names: HashSet = HashSet::from_iter(p_names); + disk_pools + .into_iter() + .filter(|p| names.contains(p.meta().name.as_ref().unwrap())) + .collect::>() + } + None => disk_pools, + }; + // NOTE: Unmarshalling object recevied from K8s API-server will not fail + create_file_and_write( + root_dir.to_path_buf(), + "k8s_disk_pools.yaml".to_string(), + serde_yaml::to_string(&filtered_pools)?, + ) + .map_err(K8sResourceDumperError::IOError)?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} + +async fn get_k8s_vs_classes( + k8s_client: &ClientSet, + root_dir: &Path, +) -> Result<(), K8sResourceDumperError> { + log("\t Collecting Kubernetes VolumeSnapshotClass resources".to_string()); + match k8s_client + .list_volumesnapshot_classes(Some(MAYASTOR_CSI_DRIVER), None, None) + .await + { + Ok(vscs) => { + // NOTE: Unmarshalling object recevied from K8s API-server will not fail + create_file_and_write( + root_dir.to_path_buf(), + "volume_snapshot_classes.yaml".to_string(), + serde_yaml::to_string(&vscs)?, + ) + .map_err(K8sResourceDumperError::IOError)?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} + +async fn get_k8s_vsnapshot_contents( + k8s_client: &ClientSet, + root_dir: &Path, +) -> Result<(), K8sResourceDumperError> { + log("\t Collecting Kubernetes VolumeSnapshotContents resources".to_string()); + match k8s_client + .list_volumesnapshotcontents(Some(MAYASTOR_CSI_DRIVER), None, None) + .await + { + Ok(vscs) => { + // NOTE: Unmarshalling object recevied from K8s API-server will not fail + create_file_and_write( + root_dir.to_path_buf(), + "volume_snapshot_contents.yaml".to_string(), + serde_yaml::to_string(&vscs)?, + ) + .map_err(K8sResourceDumperError::IOError)?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} + +async fn get_k8s_pod_configurations( + k8s_client: &ClientSet, + root_dir: &Path, +) -> Result<(), K8sResourceDumperError> { + // Fetch all Pods in provided NAMESPACE + log("\t Collecting Kuberbetes pod resources".to_string()); + match k8s_client.get_pods("", "").await { + Ok(pods) => { + create_file_and_write( + root_dir.to_path_buf(), + "pods.yaml".to_string(), + serde_yaml::to_string(&pods)?, + ) + .map_err(K8sResourceDumperError::IOError)?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} + +async fn get_k8s_events( + k8s_client: &ClientSet, + root_dir: &Path, +) -> Result<(), K8sResourceDumperError> { + // Fetch all events in provided NAMESPACE + log("\t Collecting Kubernetes events".to_string()); + match k8s_client.get_events("", "").await { + Ok(mut events) => { + // Sort the events based on event_time + events.sort_unstable_by_key(event_time); + // NOTE: Unmarshalling object recevied from K8s API-server will not fail + create_file_and_write( + root_dir.to_path_buf(), + "k8s_events.json".to_string(), + serde_json::to_string_pretty(&events)?, + ) + .map_err(K8sResourceDumperError::IOError)?; + Ok(()) + } + Err(error) => Err(K8sResourceDumperError::K8sResourceError(error)), + } +} diff --git a/k8s/supportability/src/collect/resource_dump.rs b/k8s/supportability/src/collect/resource_dump.rs index a54b15412..24631506c 100644 --- a/k8s/supportability/src/collect/resource_dump.rs +++ b/k8s/supportability/src/collect/resource_dump.rs @@ -1,28 +1,40 @@ use crate::{ collect::{ archive, common, - common::{DumpConfig, Stringer}, - constants::MAYASTOR_SERVICE, + common::DumpConfig, error::Error, - k8s_resources::k8s_resource_dump::K8sResourceDumperClient, - logs::{LogCollection, LogResource, Logger}, persistent_store::{etcd::EtcdStore, EtcdError}, - resources::traits::Topologer, - utils::{flush_tool_log_file, init_tool_log_file, write_to_log_file}, + utils::init_tool_log_file, }, log, OutputFormat, }; -use futures::future; + use std::{path::PathBuf, process}; +#[cfg(debug_assertions)] +use crate::collect::{ + common::Stringer, + constants::MAYASTOR_SERVICE, + k8s_resources::k8s_resource_dump::K8sResourceDumperClient, + logs::LogCollection, + logs::{LogResource, Logger}, + resources::traits::Topologer, + utils::{flush_tool_log_file, write_to_log_file}, +}; +#[cfg(debug_assertions)] +use futures::future; + /// Dumper interacts with various services to collect information like mayastor resource(s), /// mayastor service logs and state of mayastor artifacts and mayastor specific artifacts from /// etcd pub(crate) struct ResourceDumper { + #[cfg(debug_assertions)] topologer: Option>, archive: archive::Archive, dir_path: String, + #[cfg(debug_assertions)] logger: Box, + #[cfg(debug_assertions)] k8s_resource_dumper: K8sResourceDumperClient, etcd_dumper: Option, output_format: OutputFormat, @@ -69,6 +81,7 @@ impl ResourceDumper { } }; + #[cfg(debug_assertions)] let logger = match LogCollection::new_logger( config.kube_config_path.clone(), config.namespace.clone(), @@ -87,6 +100,7 @@ impl ResourceDumper { } }; + #[cfg(debug_assertions)] let k8s_resource_dumper = match K8sResourceDumperClient::new( config.kube_config_path.clone(), config.namespace.clone(), @@ -117,16 +131,20 @@ impl ResourceDumper { }; ResourceDumper { + #[cfg(debug_assertions)] topologer: config.topologer, archive, dir_path: new_dir, + #[cfg(debug_assertions)] logger, + #[cfg(debug_assertions)] k8s_resource_dumper, etcd_dumper, output_format: config.output_format, } } + #[cfg(debug_assertions)] /// Dumps information associated to given resource(s) pub(crate) async fn dump_info(&mut self, folder_path: String) -> Result<(), Error> { let mut errors = Vec::new(); @@ -268,6 +286,7 @@ impl ResourceDumper { Ok(()) } + #[cfg(debug_assertions)] /// Copies the temporary directory content into archive and delete temporary directory pub fn fill_archive_and_delete_tmp(&mut self) -> Result<(), Error> { // Log which is visible in archive system log file diff --git a/k8s/supportability/src/collect/resources/mod.rs b/k8s/supportability/src/collect/resources/mod.rs index 79733a928..6f7190989 100644 --- a/k8s/supportability/src/collect/resources/mod.rs +++ b/k8s/supportability/src/collect/resources/mod.rs @@ -5,6 +5,7 @@ pub mod error; pub mod node; pub mod pool; pub mod replica; +pub mod snapshot; pub mod traits; pub mod utils; pub mod volume; diff --git a/k8s/supportability/src/collect/resources/node.rs b/k8s/supportability/src/collect/resources/node.rs index c845fa972..020fd9dc2 100644 --- a/k8s/supportability/src/collect/resources/node.rs +++ b/k8s/supportability/src/collect/resources/node.rs @@ -1,15 +1,11 @@ use crate::{ collect::{ - logs::create_directory_if_not_exist, - resources, - resources::{traits, utils}, - rest_wrapper::RestClient, + logs::create_directory_if_not_exist, resources, resources::traits, rest_wrapper::RestClient, }, log, }; use async_trait::async_trait; use openapi::models::{BlockDevice, Node}; -use prettytable::Row; use resources::ResourceError; use serde::{Deserialize, Serialize}; use std::{ @@ -130,26 +126,6 @@ impl Topologer for NodeTopology { } } -/// TablePrinter holds methods to display node information in Tabular Manner -impl traits::TablePrinter for Node { - fn get_header_row(&self) -> Row { - utils::NODE_HEADERS.clone() - } - - fn create_rows(&self) -> Vec { - let mut row = vec![row![self.id]]; - let state = self.state.clone(); - if let Some(node_state) = state { - row = vec![row![node_state.id, node_state.status]]; - } - row - } - - fn get_resource_id(&self, row_data: &Row) -> Result { - Ok(row_data.get_cell(1).unwrap().get_content()) - } -} - // Wrapper around mayastor REST client #[derive(Debug)] pub(crate) struct NodeClientWrapper { @@ -205,16 +181,6 @@ impl NodeClientWrapper { impl Resourcer for NodeClientWrapper { type ID = String; - async fn read_resource_id(&self) -> Result { - let nodes = self.list_nodes().await?; - if nodes.is_empty() { - println!("No Node resources, Are daemonset pods in Running State?!!"); - return Err(ResourceError::CustomError("No Node resources".to_string())); - } - let node_id = utils::print_table_and_get_id(nodes)?; - Ok(node_id) - } - async fn get_topologer( &self, id: Option, diff --git a/k8s/supportability/src/collect/resources/pool.rs b/k8s/supportability/src/collect/resources/pool.rs index bbae2ed09..1eda2cc5d 100644 --- a/k8s/supportability/src/collect/resources/pool.rs +++ b/k8s/supportability/src/collect/resources/pool.rs @@ -1,15 +1,11 @@ use crate::{ collect::{ - logs::create_directory_if_not_exist, - resources, - resources::{traits, utils}, - rest_wrapper::RestClient, + logs::create_directory_if_not_exist, resources, resources::traits, rest_wrapper::RestClient, }, log, }; use async_trait::async_trait; use openapi::models::{BlockDevice, Node, Pool}; -use prettytable::Row; use resources::ResourceError; use serde::{Deserialize, Serialize}; use std::{ @@ -122,32 +118,6 @@ impl Topologer for PoolTopology { } } -// TablePrinter holds methods to display pool information in tabular manner -impl traits::TablePrinter for Pool { - fn get_header_row(&self) -> Row { - utils::POOL_HEADERS.clone() - } - - fn create_rows(&self) -> Vec { - let mut pool_node: String = String::new(); - let mut disks: String = String::new(); - // TODO: Is it Ok to assign pool status? - let mut pool_status = openapi::models::PoolStatus::Unknown; - if let Some(pool_spec) = &self.spec { - pool_node = pool_spec.node.clone(); - disks = pool_spec.disks.join(","); - } - if let Some(pool_state) = &self.state { - pool_status = pool_state.status; - } - vec![row![self.id, disks, pool_node, pool_status]] - } - - fn get_resource_id(&self, row_data: &Row) -> Result { - Ok(row_data.get_cell(1).unwrap().get_content()) - } -} - /// Wrapper around mayastor REST client which interns used to interact with REST client #[derive(Debug)] pub struct PoolClientWrapper { @@ -229,16 +199,6 @@ fn is_it_pool_device(pool_spec: openapi::models::PoolSpec, device: &BlockDevice) impl Resourcer for PoolClientWrapper { type ID = String; - async fn read_resource_id(&self) -> Result { - let pools = self.list_pools().await?; - if pools.is_empty() { - log("No Pool resources, Are Pools created?!!".to_string()); - return Err(ResourceError::CustomError("No Pool resources".to_string())); - } - let pool_id = utils::print_table_and_get_id(pools)?; - Ok(pool_id) - } - async fn get_topologer( &self, id: Option, diff --git a/k8s/supportability/src/collect/resources/snapshot.rs b/k8s/supportability/src/collect/resources/snapshot.rs new file mode 100644 index 000000000..a4d5688c2 --- /dev/null +++ b/k8s/supportability/src/collect/resources/snapshot.rs @@ -0,0 +1,130 @@ +use crate::collect::{ + logs::create_directory_if_not_exist, + resources::{ + traits::{ResourceInformation, Topologer}, + utils, ResourceError, Resourcer, + }, + rest_wrapper::RestClient, +}; +use async_trait::async_trait; +use openapi::models::VolumeSnapshot; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashSet, + fs::File, + io::Write, + path::{Path, PathBuf}, +}; + +/// Holds topological information of volume snapshot resource. +#[derive(Debug, Serialize, Deserialize, Clone)] +pub(crate) struct VolumeSnapshotTopology { + snapshot: VolumeSnapshot, +} + +/// Implements functionality to inspect topological information of snapshot resource. +impl Topologer for VolumeSnapshotTopology { + fn get_printable_topology(&self) -> Result<(String, String), ResourceError> { + let topology_as_pretty = serde_json::to_string_pretty(self)?; + let file_path = format!( + "snapshot-{}-topology.json", + self.snapshot.definition.spec.uuid + ); + Ok((file_path, topology_as_pretty)) + } + + fn dump_topology_info(&self, dir_path: String) -> Result<(), ResourceError> { + create_directory_if_not_exist(PathBuf::from(dir_path.clone()))?; + let file_path = Path::new(&dir_path).join(format!( + "snapshot-{}-topology.json", + self.snapshot.definition.spec.uuid + )); + let mut topo_file = File::create(file_path)?; + let topology_as_pretty = serde_json::to_string_pretty(self)?; + topo_file.write_all(topology_as_pretty.as_bytes())?; + topo_file.flush()?; + Ok(()) + } + + fn get_unhealthy_resource_info(&self) -> HashSet { + // Not neeeded for snapshot topology. + unimplemented!() + } + + fn get_all_resource_info(&self) -> HashSet { + // Not neeeded for snapshot topology. + unimplemented!() + } + + fn get_k8s_resource_names(&self) -> Vec { + // Not neeeded for snapshot topology. + unimplemented!() + } +} + +/// Wrapper around mayastor REST client. +#[derive(Debug)] +pub(crate) struct VolumeSnapshotClientWrapper { + rest_client: RestClient, +} + +impl VolumeSnapshotClientWrapper { + /// Builds new instance of VolumeSnapshotClientWrapper + pub(crate) fn new(client: RestClient) -> Self { + VolumeSnapshotClientWrapper { + rest_client: client, + } + } + + async fn list_snapshots(&self) -> Result, ResourceError> { + let mut volume_snapshots: Vec = Vec::new(); + let mut next_token: Option = Some(0); + let max_entries: isize = utils::MAX_RESOURCE_ENTRIES; + loop { + let snapshot_api_resp = self + .rest_client + .snapshots_api() + .get_volumes_snapshots(max_entries, None, None, next_token) + .await? + .into_body(); + volume_snapshots.extend(snapshot_api_resp.entries); + if snapshot_api_resp.next_token.is_none() { + break; + } + next_token = snapshot_api_resp.next_token; + } + Ok(volume_snapshots) + } + + async fn get_snapshot(&self, id: openapi::apis::Uuid) -> Result { + let snapshot = self + .rest_client + .snapshots_api() + .get_volumes_snapshot(&id) + .await? + .into_body(); + Ok(snapshot) + } +} + +#[async_trait(?Send)] +impl Resourcer for VolumeSnapshotClientWrapper { + type ID = openapi::apis::Uuid; + + async fn get_topologer( + &self, + id: Option, + ) -> Result, ResourceError> { + if let Some(snapshot_id) = id { + let snapshot = self.get_snapshot(snapshot_id).await?; + return Ok(Box::new(VolumeSnapshotTopology { snapshot })); + } + let snapshots_topology: Vec = self + .list_snapshots() + .await? + .into_iter() + .map(|snapshot| VolumeSnapshotTopology { snapshot }) + .collect(); + Ok(Box::new(snapshots_topology)) + } +} diff --git a/k8s/supportability/src/collect/resources/traits.rs b/k8s/supportability/src/collect/resources/traits.rs index 48ae71252..c4f920573 100644 --- a/k8s/supportability/src/collect/resources/traits.rs +++ b/k8s/supportability/src/collect/resources/traits.rs @@ -2,7 +2,6 @@ use crate::collect::{constants::DATA_PLANE_CONTAINER_NAME, resources::error::Res use async_trait::async_trait; use downcast_rs::{impl_downcast, Downcast}; use lazy_static::lazy_static; -use prettytable::Row; use std::{ collections::{HashMap, HashSet}, fmt::Debug, @@ -72,13 +71,6 @@ impl ResourceInformation { } } -/// Implements functionality for displaying information in tabular manner and reading inputs -pub(crate) trait TablePrinter { - fn get_header_row(&self) -> Row; - fn create_rows(&self) -> Vec; - fn get_resource_id(&self, row_data: &Row) -> Result; -} - /// Implements functionality to inspect topology information pub(crate) trait Topologer: Downcast + Debug { fn get_printable_topology(&self) -> Result<(String, String), ResourceError>; @@ -93,9 +85,6 @@ impl_downcast!(Topologer); #[async_trait(?Send)] pub(crate) trait Resourcer { type ID; - async fn read_resource_id(&self) -> Result { - panic!("read_resource_id is not yet implemented"); - } async fn get_topologer( &self, _id: Option, diff --git a/k8s/supportability/src/collect/resources/utils.rs b/k8s/supportability/src/collect/resources/utils.rs index dfb0046b4..8b96e7258 100644 --- a/k8s/supportability/src/collect/resources/utils.rs +++ b/k8s/supportability/src/collect/resources/utils.rs @@ -1,90 +1,13 @@ use crate::collect::resources::{ - traits::{ResourceInformation, TablePrinter, Topologer}, + traits::{ResourceInformation, Topologer}, ResourceError, }; -use lazy_static::lazy_static; -use prettytable::{format, Row, Table}; -use serde::{ser, Serialize}; -use std::{collections::HashSet, io}; +use serde::Serialize; +use std::collections::HashSet; /// Defines maximum entries REST service can fetch at one network call pub(crate) const MAX_RESOURCE_ENTRIES: isize = 200; -// Constants to store the table headers of the Tabular output formats. -lazy_static! { - /// Represents list of Volume table headers - pub(crate) static ref VOLUME_HEADERS: Row = row!["INDEX", "ID", "STATUS", "SIZE",]; - /// Represents list of pool table headers - pub(crate) static ref POOL_HEADERS: Row = row!["INDEX", "ID", "DISKS", "NODE", "STATUS",]; - /// Represents list of node table headers - pub(crate) static ref NODE_HEADERS: Row = row!["INDEX", "ID", "STATUS"]; -} - -/// Prints list of resources in tabular format and read input based on index -pub(crate) fn print_table_and_get_id(obj: T) -> Result -where - T: TablePrinter, - T: ser::Serialize, -{ - let rows: Vec = obj.create_rows(); - let header: Row = obj.get_header_row(); - let mut table = Table::new(); - - table.set_format(*format::consts::FORMAT_CLEAN); - table.set_titles(header); - for row in rows { - table.add_row(row); - } - table.printstd(); - - println!("Please enter index of resource: "); - let mut index: usize; - let mut input_line = String::new(); - loop { - input_line.clear(); - let _no_of_chars = io::stdin().read_line(&mut input_line)?; - let trimmed_input = input_line.trim().trim_end_matches('\n'); - if trimmed_input.is_empty() { - continue; - } - index = trimmed_input.parse::().unwrap(); - if index > table.len() { - println!("Please enter number in range of (1, {})", table.len()); - continue; - } - break; - } - index -= 1; - let row_data = table.get_row(index).ok_or_else(|| { - ResourceError::CustomError("Unable to get resource information".to_string()) - })?; - obj.get_resource_id(row_data) -} - -impl TablePrinter for Vec -where - T: TablePrinter, -{ - fn create_rows(&self) -> Vec { - self.iter() - .enumerate() - .flat_map(|(index, r)| -> Vec { - let mut row_data: Row = r.create_rows()[0].clone(); - row_data.insert_cell(0, cell![index + 1]); - vec![row_data] - }) - .collect() - } - - fn get_header_row(&self) -> Row { - self.get(0).map(|obj| obj.get_header_row()).unwrap() - } - - fn get_resource_id(&self, row_data: &Row) -> Result { - self.get(0).unwrap().get_resource_id(row_data) - } -} - impl Topologer for Vec where T: Topologer + Serialize, diff --git a/k8s/supportability/src/collect/resources/volume.rs b/k8s/supportability/src/collect/resources/volume.rs index 53efb6e14..ac1c309da 100644 --- a/k8s/supportability/src/collect/resources/volume.rs +++ b/k8s/supportability/src/collect/resources/volume.rs @@ -11,8 +11,7 @@ use crate::{ log, }; use async_trait::async_trait; -use openapi::models::{Nexus, Volume}; -use prettytable::Row; +use openapi::models::{Nexus, RebuildHistory, Volume}; use resources::ResourceError; use serde::{Deserialize, Serialize}; use std::{ @@ -32,6 +31,7 @@ pub(crate) struct VolumeTopology { volume: Volume, target: Option, replicas_topology: Vec, + rebuild_history: Option, } /// Implements functionality to inspect topological information of volume resource @@ -93,22 +93,6 @@ impl Topologer for VolumeTopology { } } -// TablePrinter implements functionality to list volume resources in tabular format -impl traits::TablePrinter for openapi::models::Volume { - fn get_header_row(&self) -> Row { - utils::VOLUME_HEADERS.clone() - } - - fn create_rows(&self) -> Vec { - let state = self.state.clone(); - vec![row![state.uuid, state.status, state.size]] - } - - fn get_resource_id(&self, row_data: &Row) -> Result { - Ok(row_data.get_cell(1).unwrap().get_content()) - } -} - // Wrapper around mayastor REST client #[derive(Debug)] pub(crate) struct VolumeClientWrapper { @@ -162,25 +146,25 @@ impl VolumeClientWrapper { let topology = self.replica_client.get_replica_topology(id).await?; Ok(topology) } + + async fn get_rebuild_history( + &self, + id: openapi::apis::Uuid, + ) -> Result { + let rebuild_history = self + .rest_client + .volumes_api() + .get_rebuild_history(&id) + .await? + .into_body(); + Ok(rebuild_history) + } } #[async_trait(?Send)] impl Resourcer for VolumeClientWrapper { type ID = openapi::apis::Uuid; - async fn read_resource_id(&self) -> Result { - let volumes = self.list_volumes().await?; - if volumes.is_empty() { - log("No Volume resources, Are Volumes created?!!".to_string()); - return Err(ResourceError::CustomError( - "Volume resources doesn't exist".to_string(), - )); - } - let uuid_str = utils::print_table_and_get_id(volumes)?; - let volume_uuid = openapi::apis::Uuid::parse_str(uuid_str.as_str())?; - Ok(volume_uuid) - } - async fn get_topologer( &self, id: Option, @@ -206,11 +190,21 @@ impl Resourcer for VolumeClientWrapper { replicas_topology.push(topology); } } + let rebuild_history = match self.get_rebuild_history(volume_id).await { + Ok(rebuild_history) => Some(rebuild_history), + Err(error) => { + log(format!( + "Could not fetch rebuild history for {volume_id}, error: {error:?}" + )); + None + } + }; return Ok(Box::new(VolumeTopology { volume: volume.clone(), target: volume.state.target, replicas_topology, + rebuild_history, })); } @@ -238,10 +232,21 @@ impl Resourcer for VolumeClientWrapper { replicas_topology.push(topology); } } + let rebuild_history = match self.get_rebuild_history(volume.spec.uuid).await { + Ok(rebuild_history) => Some(rebuild_history), + Err(error) => { + log(format!( + "Could not fetch rebuild history for {}, error: {error:?}", + volume.spec.uuid + )); + None + } + }; volumes_topology.push(VolumeTopology { volume: volume.clone(), target: volume.state.target.clone(), replicas_topology, + rebuild_history, }) } Ok(Box::new(volumes_topology)) diff --git a/k8s/supportability/src/collect/system_dump.rs b/k8s/supportability/src/collect/system_dump.rs index fba9e666b..bebd3fafd 100644 --- a/k8s/supportability/src/collect/system_dump.rs +++ b/k8s/supportability/src/collect/system_dump.rs @@ -8,8 +8,9 @@ use crate::{ logs::{LogCollection, LogError, LogResource, Logger}, persistent_store::etcd::EtcdStore, resources::{ - node::NodeClientWrapper, pool::PoolClientWrapper, traits::Topologer, - volume::VolumeClientWrapper, Resourcer, + node::NodeClientWrapper, pool::PoolClientWrapper, + snapshot::VolumeSnapshotClientWrapper, traits::Topologer, volume::VolumeClientWrapper, + Resourcer, }, rest_wrapper::RestClient, utils::{flush_tool_log_file, init_tool_log_file, write_to_log_file}, @@ -28,6 +29,7 @@ pub(crate) struct SystemDumper { logger: Box, k8s_resource_dumper: K8sResourceDumperClient, etcd_dumper: Option, + disable_log_collection: bool, } impl SystemDumper { @@ -35,7 +37,10 @@ impl SystemDumper { /// 1.1 Create new archive in given directory and create temporary directory /// in given directory to generate dump files /// 1.2 Instantiate all required objects to interact with various other modules - pub(crate) async fn get_or_panic_system_dumper(config: DumpConfig) -> Self { + pub(crate) async fn get_or_panic_system_dumper( + config: DumpConfig, + disable_log_collection: bool, + ) -> Self { // Creates a temporary directory inside user provided directory, to store // artifacts. If creation is failed then we can't continue the process. let new_dir = match common::create_and_get_tmp_directory(config.output_directory.clone()) { @@ -114,6 +119,7 @@ impl SystemDumper { logger, k8s_resource_dumper, etcd_dumper, + disable_log_collection, } } @@ -178,6 +184,22 @@ impl SystemDumper { Err(e) => errors.push(Error::ResourceError(e)), }; + match VolumeSnapshotClientWrapper::new(self.rest_client.clone()) + .get_topologer(None) + .await + { + Ok(topologer) => { + log("\t Collecting snapshot topology information".to_string()); + let _ = topologer + .dump_topology_info(format!("{}/topology/snapshot", self.dir_path.clone())) + .map_err(|e| { + errors.push(Error::ResourceError(e)); + log("\t Failed to dump snapshot topology information".to_string()); + }); + } + Err(e) => errors.push(Error::ResourceError(e)), + }; + // Dump information of all pools topologies exist in the system match PoolClientWrapper::new(self.rest_client.clone()) .get_topologer(None) @@ -216,9 +238,11 @@ impl SystemDumper { }; log("Completed collection of topology information".to_string()); - if let Err(error) = self.collect_and_dump_loki_logs(node_topologer).await { - log("Error occurred while collecting logs".to_string()); - errors.push(Error::LogCollectionError(error)); + if !self.disable_log_collection { + if let Err(error) = self.collect_and_dump_loki_logs(node_topologer).await { + log("Error occurred while collecting logs".to_string()); + errors.push(Error::LogCollectionError(error)); + } } log("Collecting Kubernetes resources specific to mayastor service".to_string()); diff --git a/k8s/supportability/src/lib.rs b/k8s/supportability/src/lib.rs index 8472dcd1f..1cc98abe3 100644 --- a/k8s/supportability/src/lib.rs +++ b/k8s/supportability/src/lib.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate prettytable; - pub mod collect; pub mod operations; @@ -8,14 +5,14 @@ use collect::{ common::DumpConfig, error::Error, resource_dump::ResourceDumper, - resources::{ - node::NodeClientWrapper, pool::PoolClientWrapper, traits::Topologer, - volume::VolumeClientWrapper, Resourcer, - }, + resources::{node::NodeClientWrapper, Resourcer}, rest_wrapper, }; use operations::{Operations, Resource}; +#[cfg(debug_assertions)] +use collect::resources::{pool::PoolClientWrapper, traits::Topologer, volume::VolumeClientWrapper}; + use crate::collect::{common::OutputFormat, utils::log}; use std::path::PathBuf; @@ -103,6 +100,7 @@ impl SupportArgs { resource: Resource, ) -> Result<(), Error> { let cli_args = self; + #[cfg(debug_assertions)] let topologer: Box; let mut config = DumpConfig { rest_client: rest_client.clone(), @@ -113,6 +111,7 @@ impl SupportArgs { since: cli_args.since, kube_config_path, timeout: cli_args.timeout, + #[cfg(debug_assertions)] topologer: None, output_format: OutputFormat::Tar, }; @@ -120,7 +119,8 @@ impl SupportArgs { match resource { Resource::Loki => { let mut system_dumper = - collect::system_dump::SystemDumper::get_or_panic_system_dumper(config).await; + collect::system_dump::SystemDumper::get_or_panic_system_dumper(config, true) + .await; let node_topologer = NodeClientWrapper::new(system_dumper.rest_client()) .get_topologer(None) .await @@ -135,9 +135,13 @@ impl SupportArgs { errors.push(e); } } - Resource::System => { + Resource::System(args) => { let mut system_dumper = - collect::system_dump::SystemDumper::get_or_panic_system_dumper(config).await; + collect::system_dump::SystemDumper::get_or_panic_system_dumper( + config, + args.disable_log_collection, + ) + .await; if let Err(e) = system_dumper.dump_system().await { // NOTE: We also need to log error content into Supportability log file log(format!("Failed to dump system state, error: {e:?}")); @@ -148,6 +152,7 @@ impl SupportArgs { errors.push(e); } } + #[cfg(debug_assertions)] Resource::Volumes => { let volume_client = VolumeClientWrapper::new(rest_client); topologer = volume_client.get_topologer(None).await?; @@ -162,6 +167,7 @@ impl SupportArgs { errors.push(e); } } + #[cfg(debug_assertions)] Resource::Volume { id } => { let volume_client = VolumeClientWrapper::new(rest_client); topologer = volume_client.get_topologer(Some(id)).await?; @@ -178,6 +184,7 @@ impl SupportArgs { errors.push(e); } } + #[cfg(debug_assertions)] Resource::Pools => { let pool_client = PoolClientWrapper::new(rest_client); topologer = pool_client.get_topologer(None).await?; @@ -192,6 +199,7 @@ impl SupportArgs { errors.push(e); } } + #[cfg(debug_assertions)] Resource::Pool { id } => { let pool_client = PoolClientWrapper::new(rest_client); topologer = pool_client.get_topologer(Some(id.to_string())).await?; @@ -208,6 +216,7 @@ impl SupportArgs { errors.push(e); } } + #[cfg(debug_assertions)] Resource::Nodes => { let node_client = NodeClientWrapper { rest_client }; topologer = node_client.get_topologer(None).await?; @@ -222,6 +231,7 @@ impl SupportArgs { errors.push(e); } } + #[cfg(debug_assertions)] Resource::Node { id } => { let node_client = NodeClientWrapper { rest_client }; topologer = node_client.get_topologer(Some(id.to_string())).await?; diff --git a/k8s/supportability/src/operations.rs b/k8s/supportability/src/operations.rs index 9a460d1c9..1ef56b931 100644 --- a/k8s/supportability/src/operations.rs +++ b/k8s/supportability/src/operations.rs @@ -1,9 +1,12 @@ +#[cfg(debug_assertions)] /// Represents type of VolumeID pub(crate) type VolumeID = openapi::apis::Uuid; +#[cfg(debug_assertions)] /// Represents type of PoolID pub(crate) type PoolID = String; +#[cfg(debug_assertions)] /// Represents type of NodeID pub(crate) type NodeID = String; @@ -15,34 +18,47 @@ pub(crate) enum Operations { Dump(Resource), } +#[derive(Debug, Clone, clap::Args)] +pub(crate) struct SystemDumpArgs { + /// Set this to disable log collection + #[clap(global = true, long)] + pub(crate) disable_log_collection: bool, +} + /// Resources on which operation can be performed #[derive(clap::Subcommand, Clone, Debug)] pub(crate) enum Resource { /// Collects entire system information - System, + System(SystemDumpArgs), + #[cfg(debug_assertions)] /// Collects information about all volumes and its descendants (replicas/pools/nodes) #[clap(name = "volumes")] Volumes, + #[cfg(debug_assertions)] /// Collects information about particular volume and its descendants matching /// to given volume ID #[clap(name = "volume")] Volume { id: VolumeID }, + #[cfg(debug_assertions)] /// Collects information about all pools and its descendants (nodes) #[clap(name = "pools")] Pools, + #[cfg(debug_assertions)] /// Collects information about particular pool and its descendants matching /// to given pool ID #[clap(name = "pool")] Pool { id: PoolID }, + #[cfg(debug_assertions)] /// Collects information about all nodes #[clap(name = "nodes")] Nodes, + #[cfg(debug_assertions)] /// Collects information about particular node matching to given node ID #[clap(name = "node")] Node { id: NodeID },