From 62e1cebac60ec2f5bd4e15615696aec126be9099 Mon Sep 17 00:00:00 2001 From: mulhern Date: Thu, 22 Aug 2024 17:21:31 -0400 Subject: [PATCH] Use types to help track different sizes Assign types to the four summary sizes from BlockDevMgr Signed-off-by: mulhern --- .../strat_engine/backstore/backstore/v1.rs | 10 +- .../strat_engine/backstore/backstore/v2.rs | 16 +- .../strat_engine/backstore/blockdevmgr.rs | 155 ++++++++++++++---- .../strat_engine/backstore/cache_tier.rs | 78 +++++---- .../strat_engine/backstore/data_tier.rs | 36 ++-- src/engine/strat_engine/pool/v1.rs | 8 +- src/engine/strat_engine/pool/v2.rs | 2 +- 7 files changed, 215 insertions(+), 90 deletions(-) diff --git a/src/engine/strat_engine/backstore/backstore/v1.rs b/src/engine/strat_engine/backstore/backstore/v1.rs index 07066c85c5..e125a8b508 100644 --- a/src/engine/strat_engine/backstore/backstore/v1.rs +++ b/src/engine/strat_engine/backstore/backstore/v1.rs @@ -19,7 +19,7 @@ use crate::{ backstore::{ backstore::InternalBackstore, blockdev::{v1::StratBlockDev, InternalBlockDev}, - blockdevmgr::BlockDevMgr, + blockdevmgr::{BlockDevMgr, BlockDevMgrMetaSize, BlockDevMgrSize}, cache_tier::CacheTier, data_tier::DataTier, devices::UnownedDevices, @@ -125,11 +125,11 @@ impl InternalBackstore for Backstore { } fn datatier_usable_size(&self) -> Sectors { - self.data_tier.usable_size() + self.data_tier.usable_size().sectors() } fn available_in_backstore(&self) -> Sectors { - self.data_tier.usable_size() - self.next + self.data_tier.usable_size().sectors() - self.next } fn alloc( @@ -513,7 +513,7 @@ impl Backstore { } /// The current size of all the blockdevs in the data tier. - pub fn datatier_size(&self) -> Sectors { + pub fn datatier_size(&self) -> BlockDevMgrSize { self.data_tier.size() } @@ -605,7 +605,7 @@ impl Backstore { /// The number of sectors in the backstore given up to Stratis metadata /// on devices in the data tier. - pub fn datatier_metadata_size(&self) -> Sectors { + pub fn datatier_metadata_size(&self) -> BlockDevMgrMetaSize { self.data_tier.metadata_size() } diff --git a/src/engine/strat_engine/backstore/backstore/v2.rs b/src/engine/strat_engine/backstore/backstore/v2.rs index 5d4d6f99eb..c7ec5dfd3a 100644 --- a/src/engine/strat_engine/backstore/backstore/v2.rs +++ b/src/engine/strat_engine/backstore/backstore/v2.rs @@ -19,9 +19,13 @@ use crate::{ engine::{ strat_engine::{ backstore::{ - backstore::InternalBackstore, blockdev::v2::StratBlockDev, - blockdevmgr::BlockDevMgr, cache_tier::CacheTier, data_tier::DataTier, - devices::UnownedDevices, shared::BlockSizeSummary, + backstore::InternalBackstore, + blockdev::v2::StratBlockDev, + blockdevmgr::{BlockDevMgr, BlockDevMgrSize}, + cache_tier::CacheTier, + data_tier::DataTier, + devices::UnownedDevices, + shared::BlockSizeSummary, }, crypt::{crypt_metadata_size, handle::v2::CryptHandle, interpret_clevis_config}, dm::{get_dm, list_of_backstore_devices, remove_optional_devices, DEVICEMAPPER_PATH}, @@ -170,7 +174,7 @@ impl InternalBackstore for Backstore { } fn datatier_usable_size(&self) -> Sectors { - self.datatier_size() - self.datatier_metadata_size() + self.datatier_size().sectors() - self.datatier_metadata_size() } fn available_in_backstore(&self) -> Sectors { @@ -750,7 +754,7 @@ impl Backstore { } /// The current size of all the blockdevs in the data tier. - pub fn datatier_size(&self) -> Sectors { + pub fn datatier_size(&self) -> BlockDevMgrSize { self.data_tier.size() } @@ -857,7 +861,7 @@ impl Backstore { /// Metadata size on the data tier, including crypt metadata space. pub fn datatier_metadata_size(&self) -> Sectors { - self.datatier_crypt_meta_size() + self.data_tier.metadata_size() + self.datatier_crypt_meta_size() + self.data_tier.metadata_size().sectors() } /// Write the given data to the data tier's devices. diff --git a/src/engine/strat_engine/backstore/blockdevmgr.rs b/src/engine/strat_engine/backstore/blockdevmgr.rs index 04525081c3..55cc26a6b8 100644 --- a/src/engine/strat_engine/backstore/blockdevmgr.rs +++ b/src/engine/strat_engine/backstore/blockdevmgr.rs @@ -68,6 +68,87 @@ impl TimeStamp { } } +/// Type for the block dev mgr size. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct BlockDevMgrSize(pub Sectors); + +impl BlockDevMgrSize { + pub fn sectors(self) -> Sectors { + self.0 + } +} + +impl From<&[B]> for BlockDevMgrSize +where + B: InternalBlockDev, +{ + fn from(blockdevs: &[B]) -> Self { + BlockDevMgrSize(blockdevs.iter().map(|b| b.total_size().sectors()).sum()) + } +} + +/// All metadata allocated to individual block devs in the backstore. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct BlockDevMgrMetaSize(pub Sectors); + +impl BlockDevMgrMetaSize { + pub fn sectors(self) -> Sectors { + self.0 + } +} + +impl From<&[B]> for BlockDevMgrMetaSize +where + B: InternalBlockDev, +{ + fn from(blockdevs: &[B]) -> Self { + BlockDevMgrMetaSize(blockdevs.iter().map(|bd| bd.metadata_size()).sum()) + } +} + +/// All space that has not been allocated for any purpose. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct BlockDevMgrAvailSpace(pub Sectors); + +impl BlockDevMgrAvailSpace { + pub fn sectors(self) -> Sectors { + self.0 + } +} + +impl From<&[B]> for BlockDevMgrAvailSpace +where + B: InternalBlockDev, +{ + fn from(blockdevs: &[B]) -> Self { + BlockDevMgrAvailSpace(blockdevs.iter().map(|bd| bd.available()).sum()) + } +} + +/// All space that has not been allocated to metadata on the devices. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct BlockDevMgrUsableSpace(pub Sectors); + +impl BlockDevMgrUsableSpace { + pub fn sectors(self) -> Sectors { + self.0 + } +} + +impl From<&[B]> for BlockDevMgrUsableSpace +where + B: InternalBlockDev, +{ + fn from(blockdevs: &[B]) -> Self { + BlockDevMgrUsableSpace( + blockdevs + .iter() + .map(|b| b.total_size().sectors() - b.metadata_size()) + .sum(), + ) + } +} + #[derive(Debug)] pub struct BlockDevMgr { /// All the block devices that belong to this block dev manager. @@ -77,6 +158,10 @@ pub struct BlockDevMgr { last_update_time: TimeStamp, } +pub struct Sizer<'a, B> { + block_devs: &'a [B], +} + impl BlockDevMgr { /// Initialize a new StratBlockDevMgr with specified pool and devices. pub fn initialize( @@ -249,6 +334,27 @@ impl BlockDevMgr { } } +impl Sizer<'_, B> +where + B: InternalBlockDev, +{ + pub fn avail_space(&self) -> BlockDevMgrAvailSpace { + self.block_devs.into() + } + + pub fn size(&self) -> BlockDevMgrSize { + self.block_devs.into() + } + + pub fn metadata_size(&self) -> BlockDevMgrMetaSize { + self.block_devs.into() + } + + pub fn usable_size(&self) -> BlockDevMgrUsableSpace { + self.block_devs.into() + } +} + impl BlockDevMgr where B: InternalBlockDev, @@ -346,7 +452,7 @@ where /// nothing. pub fn alloc(&mut self, sizes: &[Sectors]) -> Option>> { let total_needed: Sectors = sizes.iter().cloned().sum(); - if self.avail_space() < total_needed { + if self.sizer().avail_space().sectors() < total_needed { return None; } @@ -444,27 +550,10 @@ where }) } - // SIZE methods - - /// The number of sectors not allocated for any purpose. - pub fn avail_space(&self) -> Sectors { - self.block_devs.iter().map(|bd| bd.available()).sum() - } - - /// The current size of all the blockdevs. - /// self.size() > self.avail_space() because some sectors are certainly - /// allocated for Stratis metadata - pub fn size(&self) -> Sectors { - self.block_devs - .iter() - .map(|b| b.total_size().sectors()) - .sum() - } - - /// The number of sectors given over to Stratis metadata - /// self.allocated_size() - self.metadata_size() >= self.avail_space() - pub fn metadata_size(&self) -> Sectors { - self.block_devs.iter().map(|bd| bd.metadata_size()).sum() + pub fn sizer(&self) -> Sizer<'_, B> { + Sizer { + block_devs: &self.block_devs, + } } pub fn grow(&mut self, dev: DevUuid) -> StratisResult { @@ -546,13 +635,18 @@ mod tests { None, ) .unwrap(); - assert_eq!(mgr.avail_space() + mgr.metadata_size(), mgr.size()); + assert_eq!( + mgr.sizer().avail_space().sectors() + mgr.sizer().metadata_size().sectors(), + mgr.sizer().size().sectors() + ); let allocated = Sectors(2); mgr.alloc(&[allocated]).unwrap(); assert_eq!( - mgr.avail_space() + allocated + mgr.metadata_size(), - mgr.size() + mgr.sizer().avail_space().sectors() + + allocated + + mgr.sizer().metadata_size().sectors(), + mgr.sizer().size().sectors() ); } @@ -764,13 +858,18 @@ mod tests { MDADataSize::default(), ) .unwrap(); - assert_eq!(mgr.avail_space() + mgr.metadata_size(), mgr.size()); + assert_eq!( + mgr.sizer().avail_space().sectors() + mgr.sizer().metadata_size().sectors(), + mgr.sizer().size().sectors() + ); let allocated = Sectors(2); mgr.alloc(&[allocated]).unwrap(); assert_eq!( - mgr.avail_space() + allocated + mgr.metadata_size(), - mgr.size() + mgr.sizer().avail_space().sectors() + + allocated + + mgr.sizer().metadata_size().sectors(), + mgr.sizer().size().sectors() ); } diff --git a/src/engine/strat_engine/backstore/cache_tier.rs b/src/engine/strat_engine/backstore/cache_tier.rs index c520ebc030..493f9c2cf3 100644 --- a/src/engine/strat_engine/backstore/cache_tier.rs +++ b/src/engine/strat_engine/backstore/cache_tier.rs @@ -14,7 +14,7 @@ use crate::{ strat_engine::{ backstore::{ blockdev::{v1, v2, InternalBlockDev}, - blockdevmgr::BlockDevMgr, + blockdevmgr::{BlockDevMgr, BlockDevMgrAvailSpace}, devices::UnownedDevices, shared::{metadata_to_segment, AllocatedAbove, BlkDevSegment, BlockDevPartition}, }, @@ -75,11 +75,11 @@ impl CacheTier { .block_mgr .add(pool_name, pool_uuid, devices, sector_size)?; - let avail_space = self.block_mgr.avail_space(); + let avail_space = self.block_mgr.sizer().avail_space(); // FIXME: This check will become unnecessary when cache metadata device // can be increased dynamically. - if avail_space + self.cache_segments.size() > MAX_CACHE_SIZE { + if avail_space.sectors() + self.cache_segments.size() > MAX_CACHE_SIZE { self.block_mgr.remove_blockdevs(&uuids)?; return Err(StratisError::Msg(format!( "The size of the cache sub-device may not exceed {MAX_CACHE_SIZE}" @@ -88,7 +88,7 @@ impl CacheTier { let segments = self .block_mgr - .alloc(&[avail_space]) + .alloc(&[avail_space.sectors()]) .expect("asked for exactly the space available, must get") .iter() .flat_map(|s| s.iter()) @@ -151,11 +151,11 @@ impl CacheTier { ) -> StratisResult<(Vec, (bool, bool))> { let uuids = self.block_mgr.add(pool_uuid, devices)?; - let avail_space = self.block_mgr.avail_space(); + let avail_space = self.block_mgr.sizer().avail_space(); // FIXME: This check will become unnecessary when cache metadata device // can be increased dynamically. - if avail_space + self.cache_segments.size() > MAX_CACHE_SIZE { + if avail_space.sectors() + self.cache_segments.size() > MAX_CACHE_SIZE { self.block_mgr.remove_blockdevs(&uuids)?; return Err(StratisError::Msg(format!( "The size of the cache sub-device may not exceed {MAX_CACHE_SIZE}" @@ -164,7 +164,7 @@ impl CacheTier { let segments = self .block_mgr - .alloc(&[avail_space]) + .alloc(&[avail_space.sectors()]) .expect("asked for exactly the space available, must get") .iter() .flat_map(|s| s.iter()) @@ -215,10 +215,10 @@ where block_mgr: BlockDevMgr, cache_tier_save: &CacheTierSave, ) -> BDARecordResult> { - if block_mgr.avail_space() != Sectors(0) { + if block_mgr.sizer().avail_space() != BlockDevMgrAvailSpace(Sectors(0)) { let err_msg = format!( "{} unallocated to device; probable metadata corruption", - block_mgr.avail_space() + block_mgr.sizer().avail_space().sectors() ); return Err((StratisError::Msg(err_msg), block_mgr.into_bdas())); } @@ -260,19 +260,21 @@ where /// /// WARNING: metadata changing event pub fn new(mut block_mgr: BlockDevMgr) -> StratisResult> { - let avail_space = block_mgr.avail_space(); + let avail_space = block_mgr.sizer().avail_space(); // FIXME: Come up with a better way to choose metadata device size let meta_space = Sectors(IEC::Mi); assert!( - meta_space < avail_space, + meta_space < avail_space.sectors(), "every block device must be at least one GiB" ); + let cache_data_space = avail_space.sectors() - meta_space; + // FIXME: This check will become unnecessary when cache metadata device // can be increased dynamically. - if avail_space - meta_space > MAX_CACHE_SIZE { + if cache_data_space > MAX_CACHE_SIZE { block_mgr.destroy_all()?; return Err(StratisError::Msg(format!( "The size of the cache sub-device may not exceed {MAX_CACHE_SIZE}" @@ -280,7 +282,7 @@ where } let mut segments = block_mgr - .alloc(&[meta_space, avail_space - meta_space]) + .alloc(&[meta_space, cache_data_space]) .expect("asked for exactly the space available, must get"); let cache_segments = AllocatedAbove { inner: segments.pop().expect("segments.len() == 2"), @@ -410,12 +412,15 @@ mod tests { // the tier. let cache_metadata_size = cache_tier.meta_segments.size(); - let metadata_size = cache_tier.block_mgr.metadata_size(); - let size = cache_tier.block_mgr.size(); + let metadata_size = cache_tier.block_mgr.sizer().metadata_size(); + let size = cache_tier.block_mgr.sizer().size(); - assert_eq!(cache_tier.block_mgr.avail_space(), Sectors(0)); assert_eq!( - size - metadata_size, + cache_tier.block_mgr.sizer().avail_space(), + BlockDevMgrAvailSpace(Sectors(0)) + ); + assert_eq!( + size.sectors() - metadata_size.sectors(), cache_tier.cache_segments.size() + cache_metadata_size ); @@ -427,12 +432,18 @@ mod tests { assert!(cache); assert!(!meta); - assert_eq!(cache_tier.block_mgr.avail_space(), Sectors(0)); - assert!(cache_tier.block_mgr.size() > size); - assert!(cache_tier.block_mgr.metadata_size() > metadata_size); + assert_eq!( + cache_tier.block_mgr.sizer().avail_space(), + BlockDevMgrAvailSpace(Sectors(0)) + ); + assert!(cache_tier.block_mgr.sizer().size().sectors() > size.sectors()); + assert!( + cache_tier.block_mgr.sizer().metadata_size().sectors() > metadata_size.sectors() + ); assert_eq!( - cache_tier.block_mgr.size() - cache_tier.block_mgr.metadata_size(), + cache_tier.block_mgr.sizer().size().sectors() + - cache_tier.block_mgr.sizer().metadata_size().sectors(), cache_tier.cache_segments.size() + cache_metadata_size ); @@ -483,12 +494,15 @@ mod tests { // the tier. let cache_metadata_size = cache_tier.meta_segments.size(); - let metadata_size = cache_tier.block_mgr.metadata_size(); - let size = cache_tier.block_mgr.size(); + let metadata_size = cache_tier.block_mgr.sizer().metadata_size(); + let size = cache_tier.block_mgr.sizer().size(); - assert_eq!(cache_tier.block_mgr.avail_space(), Sectors(0)); assert_eq!( - size - metadata_size, + cache_tier.block_mgr.sizer().avail_space(), + BlockDevMgrAvailSpace(Sectors(0)) + ); + assert_eq!( + size.sectors() - metadata_size.sectors(), cache_tier.cache_segments.size() + cache_metadata_size ); @@ -498,12 +512,18 @@ mod tests { assert!(cache); assert!(!meta); - assert_eq!(cache_tier.block_mgr.avail_space(), Sectors(0)); - assert!(cache_tier.block_mgr.size() > size); - assert!(cache_tier.block_mgr.metadata_size() > metadata_size); + assert_eq!( + cache_tier.block_mgr.sizer().avail_space(), + BlockDevMgrAvailSpace(Sectors(0)) + ); + assert!(cache_tier.block_mgr.sizer().size().sectors() > size.sectors()); + assert!( + cache_tier.block_mgr.sizer().metadata_size().sectors() > metadata_size.sectors() + ); assert_eq!( - cache_tier.block_mgr.size() - cache_tier.block_mgr.metadata_size(), + cache_tier.block_mgr.sizer().size().sectors() + - cache_tier.block_mgr.sizer().metadata_size().sectors(), cache_tier.cache_segments.size() + cache_metadata_size ); diff --git a/src/engine/strat_engine/backstore/data_tier.rs b/src/engine/strat_engine/backstore/data_tier.rs index 2fb17c414f..3dcfbe63ef 100644 --- a/src/engine/strat_engine/backstore/data_tier.rs +++ b/src/engine/strat_engine/backstore/data_tier.rs @@ -18,7 +18,9 @@ use crate::{ v2::{self, integrity_meta_space, raid_meta_space}, InternalBlockDev, }, - blockdevmgr::BlockDevMgr, + blockdevmgr::{ + BlockDevMgr, BlockDevMgrMetaSize, BlockDevMgrSize, BlockDevMgrUsableSpace, + }, devices::UnownedDevices, shared::{metadata_to_segment, AllocatedAbove, BlkDevSegment, BlockDevPartition}, }, @@ -238,18 +240,18 @@ where } /// The total size of all the blockdevs combined - pub fn size(&self) -> Sectors { - self.block_mgr.size() + pub fn size(&self) -> BlockDevMgrSize { + self.block_mgr.sizer().size() } /// The number of sectors used for metadata by all the blockdevs - pub fn metadata_size(&self) -> Sectors { - self.block_mgr.metadata_size() + pub fn metadata_size(&self) -> BlockDevMgrMetaSize { + self.block_mgr.sizer().metadata_size() } /// The total usable size of all the blockdevs combined - pub fn usable_size(&self) -> Sectors { - self.size() - self.metadata_size() + pub fn usable_size(&self) -> BlockDevMgrUsableSpace { + self.block_mgr.sizer().usable_size() } /// Destroy the store. Wipe its blockdevs. @@ -365,11 +367,11 @@ mod tests { // A data_tier w/ some devices but nothing allocated let size = data_tier.size(); assert_eq!(data_tier.allocated(), Sectors(0)); - assert!(size != Sectors(0)); + assert!(size != BlockDevMgrSize(Sectors(0))); let last_request_amount = size; - let request_amount = data_tier.block_mgr.avail_space() / 2usize; + let request_amount = data_tier.block_mgr.sizer().avail_space().sectors() / 2usize; assert!(request_amount != Sectors(0)); assert!(data_tier.alloc(&[request_amount])); @@ -385,17 +387,17 @@ mod tests { data_tier.invariant(); // A data tier w/ additional blockdevs added - assert!(data_tier.size() > size); + assert!(data_tier.size().sectors() > size.sectors()); assert_eq!(data_tier.allocated(), allocated); assert_eq!(paths.len(), data_tier.blockdevs().len()); let size = data_tier.size(); // Allocate enough to get into the newly added block devices - assert!(data_tier.alloc(&[last_request_amount])); + assert!(data_tier.alloc(&[last_request_amount.sectors()])); data_tier.invariant(); - assert!(data_tier.allocated() >= request_amount + last_request_amount); + assert!(data_tier.allocated() >= request_amount + last_request_amount.sectors()); assert_eq!(data_tier.size(), size); data_tier.destroy().unwrap(); @@ -447,11 +449,11 @@ mod tests { // A data_tier w/ some devices but nothing allocated let size = data_tier.size(); assert_eq!(data_tier.allocated(), Sectors(0)); - assert!(size != Sectors(0)); + assert!(size != BlockDevMgrSize(Sectors(0))); let last_request_amount = size; - let request_amount = data_tier.block_mgr.avail_space() / 2usize; + let request_amount = data_tier.block_mgr.sizer().avail_space().sectors() / 2usize; assert!(request_amount != Sectors(0)); assert!(data_tier.alloc(&[request_amount])); @@ -467,17 +469,17 @@ mod tests { data_tier.invariant(); // A data tier w/ additional blockdevs added - assert!(data_tier.size() > size); + assert!(data_tier.size().sectors() > size.sectors()); assert_eq!(data_tier.allocated(), allocated); assert_eq!(paths.len(), data_tier.blockdevs().len()); let size = data_tier.size(); // Allocate enough to get into the newly added block devices - assert!(data_tier.alloc(&[last_request_amount])); + assert!(data_tier.alloc(&[last_request_amount.sectors()])); data_tier.invariant(); - assert!(data_tier.allocated() >= request_amount + last_request_amount); + assert!(data_tier.allocated() >= request_amount + last_request_amount.sectors()); assert_eq!(data_tier.size(), size); data_tier.destroy().unwrap(); diff --git a/src/engine/strat_engine/pool/v1.rs b/src/engine/strat_engine/pool/v1.rs index d90e268d2c..03a5eb8f46 100644 --- a/src/engine/strat_engine/pool/v1.rs +++ b/src/engine/strat_engine/pool/v1.rs @@ -235,7 +235,7 @@ impl StratPool { } }; - let metadata_size = backstore.datatier_metadata_size(); + let metadata_size = backstore.datatier_metadata_size().sectors(); let mut pool = StratPool { backstore, thin_pool: thinpool, @@ -299,7 +299,7 @@ impl StratPool { let mut needs_save = metadata.thinpool_dev.fs_limit.is_none() || metadata.thinpool_dev.feature_args.is_none(); - let metadata_size = backstore.datatier_metadata_size(); + let metadata_size = backstore.datatier_metadata_size().sectors(); let mut pool = StratPool { backstore, thin_pool: thinpool, @@ -1110,7 +1110,7 @@ impl Pool for StratPool { } fn total_physical_size(&self) -> Sectors { - self.backstore.datatier_size() + self.backstore.datatier_size().sectors() } fn total_allocated_size(&self) -> Sectors { @@ -1343,7 +1343,7 @@ impl<'a> DumpState<'a> for StratPool { } fn dump(&mut self, _: Self::DumpInput) -> Self::State { - self.metadata_size = self.backstore.datatier_metadata_size(); + self.metadata_size = self.backstore.datatier_metadata_size().sectors(); StratPoolState { metadata_size: self.metadata_size.bytes(), out_of_alloc_space: self.thin_pool.out_of_alloc_space(), diff --git a/src/engine/strat_engine/pool/v2.rs b/src/engine/strat_engine/pool/v2.rs index 2a45991d76..01fd9503fd 100644 --- a/src/engine/strat_engine/pool/v2.rs +++ b/src/engine/strat_engine/pool/v2.rs @@ -1015,7 +1015,7 @@ impl Pool for StratPool { } fn total_physical_size(&self) -> Sectors { - self.backstore.datatier_size() + self.backstore.datatier_size().sectors() } fn total_allocated_size(&self) -> Sectors {