Skip to content

Commit

Permalink
Add a function for calculating the size limit of log volumes (#621)
Browse files Browse the repository at this point in the history
## Description

Add a function for calculating the size limit of log volumes.
  • Loading branch information
siegfriedweber committed Jul 13, 2023
1 parent 5d2ca16 commit 2c5da0b
Show file tree
Hide file tree
Showing 3 changed files with 127 additions and 5 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file.

## [Unreleased]

### Added

- Add a function for calculating the size limit of log volumes ([#621]).

[#621]: https://github.com/stackabletech/operator-rs/pull/621

## [0.43.0] - 2023-07-06

### Added
Expand Down
21 changes: 21 additions & 0 deletions src/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use k8s_openapi::apimachinery::pkg::api::resource::Quantity;
use crate::error::{Error, OperatorResult};
use std::{
fmt::Display,
iter::Sum,
ops::{Add, AddAssign, Div, Mul, Sub, SubAssign},
str::FromStr,
};
Expand Down Expand Up @@ -206,6 +207,14 @@ impl MemoryQuantity {
}
}

/// Ceils the value of this MemoryQuantity.
pub fn ceil(&self) -> Self {
Self {
value: self.value.ceil(),
unit: self.unit,
}
}

/// If the MemoryQuantity value is smaller than 1 (starts with a zero), convert it to a smaller
/// unit until the non fractional part of the value is not zero anymore.
/// This can fail if the quantity is smaller than 1kB.
Expand Down Expand Up @@ -347,6 +356,18 @@ impl Add<MemoryQuantity> for MemoryQuantity {
}
}

impl Sum<MemoryQuantity> for MemoryQuantity {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(
MemoryQuantity {
value: 0.0,
unit: BinaryMultiple::Kibi,
},
MemoryQuantity::add,
)
}
}

impl AddAssign<MemoryQuantity> for MemoryQuantity {
fn add_assign(&mut self, rhs: MemoryQuantity) {
self.value += rhs.value;
Expand Down
105 changes: 100 additions & 5 deletions src/product_logging/framework.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,17 @@
//! Log aggregation framework

use std::cmp;

use k8s_openapi::api::core::v1::ResourceRequirements;
use std::{cmp, ops::Mul};

use crate::{
builder::ContainerBuilder, commons::product_image_selection::ResolvedProductImage,
k8s_openapi::api::core::v1::Container, kube::Resource, role_utils::RoleGroupRef,
builder::ContainerBuilder,
commons::product_image_selection::ResolvedProductImage,
k8s_openapi::{
api::core::v1::{Container, ResourceRequirements},
apimachinery::pkg::api::resource::Quantity,
},
kube::Resource,
memory::{BinaryMultiple, MemoryQuantity},
role_utils::RoleGroupRef,
};

use super::spec::{
Expand All @@ -26,6 +31,74 @@ const SHUTDOWN_FILE: &str = "shutdown";
/// File name of the Vector config file
pub const VECTOR_CONFIG_FILE: &str = "vector.toml";

/// Calculate the size limit for the log volume.
///
/// The size limit must be much larger than the sum of the given maximum log file sizes for the
/// following reasons:
/// - The log file rollover occurs when the log file exceeds the maximum log file size. Depending
/// on the size of the last log entries, the file can be several kilobytes larger than defined.
/// - The actual disk usage depends on the block size of the file system.
/// - OpenShift sometimes reserves more than twice the amount of blocks than needed. For instance,
/// a ZooKeeper log file with 4,127,151 bytes occupied 4,032 blocks. Then log entries were written
/// and the actual file size increased to 4,132,477 bytes which occupied 8,128 blocks.
///
/// This function is meant to be used for log volumes up to a size of approximately 100 MiB. The
/// overhead might not be acceptable for larger volumes, however this needs to be tested
/// beforehand.
///
/// # Example
///
/// ```
/// use stackable_operator::{
/// builder::{
/// PodBuilder,
/// meta::ObjectMetaBuilder,
/// },
/// memory::{
/// BinaryMultiple,
/// MemoryQuantity,
/// },
/// };
/// # use stackable_operator::product_logging;
///
/// const MAX_INIT_CONTAINER_LOG_FILES_SIZE: MemoryQuantity = MemoryQuantity {
/// value: 1.0,
/// unit: BinaryMultiple::Mebi,
/// };
/// const MAX_MAIN_CONTAINER_LOG_FILES_SIZE: MemoryQuantity = MemoryQuantity {
/// value: 10.0,
/// unit: BinaryMultiple::Mebi,
/// };
///
/// PodBuilder::new()
/// .metadata(ObjectMetaBuilder::default().build())
/// .add_empty_dir_volume(
/// "log",
/// Some(product_logging::framework::calculate_log_volume_size_limit(
/// &[
/// MAX_INIT_CONTAINER_LOG_FILES_SIZE,
/// MAX_MAIN_CONTAINER_LOG_FILES_SIZE,
/// ],
/// )),
/// )
/// .build()
/// .unwrap();
/// ```
pub fn calculate_log_volume_size_limit(max_log_files_size: &[MemoryQuantity]) -> Quantity {
let log_volume_size_limit = max_log_files_size
.iter()
.cloned()
.sum::<MemoryQuantity>()
.scale_to(BinaryMultiple::Mebi)
// According to the reasons mentioned in the function documentation, the multiplier must be
// greater than 2. Manual tests with ZooKeeper 3.8 in an OpenShift cluster showed that 3 is
// absolutely sufficient.
.mul(3.0)
// Avoid bulky numbers due to the floating-point arithmetic.
.ceil();
log_volume_size_limit.into()
}

/// Create a Bash command which filters stdout and stderr according to the given log configuration
/// and additionally stores the output in log files
///
Expand Down Expand Up @@ -1048,8 +1121,30 @@ touch {stackable_log_dir}/{VECTOR_LOG_DIR}/{SHUTDOWN_FILE}"
mod tests {
use super::*;
use crate::product_logging::spec::{AppenderConfig, LoggerConfig};
use rstest::rstest;
use std::collections::BTreeMap;

#[rstest]
#[case("0Mi", &[])]
#[case("3Mi", &["1Mi"])]
#[case("5Mi", &["1.5Mi"])]
#[case("1Mi", &["100Ki"])]
#[case("3076Mi", &["1Ki", "1Mi", "1Gi"])]
fn test_calculate_log_volume_size_limit(
#[case] expected_log_volume_size_limit: &str,
#[case] max_log_files_sizes: &[&str],
) {
let input = max_log_files_sizes
.iter()
.map(|v| MemoryQuantity::try_from(Quantity(v.to_string())).unwrap())
.collect::<Vec<_>>();
let calculated_log_volume_size_limit = calculate_log_volume_size_limit(&input);
assert_eq!(
Quantity(expected_log_volume_size_limit.to_string()),
calculated_log_volume_size_limit
);
}

#[test]
fn test_create_log4j2_config() {
let log_config = AutomaticContainerLogConfig {
Expand Down

0 comments on commit 2c5da0b

Please sign in to comment.