From 672b468180bf213add86c88c8176ee34a5f93709 Mon Sep 17 00:00:00 2001 From: Razvan-Daniel Mihai <84674+razvan@users.noreply.github.com> Date: Mon, 21 Aug 2023 17:47:15 +0200 Subject: [PATCH] Remove redundant image policy. --- deploy/helm/spark-k8s-operator/crds/crds.yaml | 17 --- rust/crd/src/lib.rs | 102 +----------------- .../src/spark_k8s_controller.rs | 32 +++--- 3 files changed, 16 insertions(+), 135 deletions(-) diff --git a/deploy/helm/spark-k8s-operator/crds/crds.yaml b/deploy/helm/spark-k8s-operator/crds/crds.yaml index 16fcfd04..8c4551ad 100644 --- a/deploy/helm/spark-k8s-operator/crds/crds.yaml +++ b/deploy/helm/spark-k8s-operator/crds/crds.yaml @@ -10163,23 +10163,6 @@ spec: nullable: true type: string type: object - sparkImagePullPolicy: - enum: - - Always - - IfNotPresent - - Never - nullable: true - type: string - sparkImagePullSecrets: - items: - description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - type: object - nullable: true - type: array stopped: nullable: true type: boolean diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 04bb679e..515df0b4 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -36,10 +36,7 @@ use stackable_operator::{ merge::{Atomic, Merge}, }, k8s_openapi::{ - api::core::v1::{ - EmptyDirVolumeSource, EnvVar, LocalObjectReference, PodTemplateSpec, Volume, - VolumeMount, - }, + api::core::v1::{EmptyDirVolumeSource, EnvVar, PodTemplateSpec, Volume, VolumeMount}, apimachinery::pkg::api::resource::Quantity, }, kube::{CustomResource, ResourceExt}, @@ -49,7 +46,7 @@ use stackable_operator::{ role_utils::pod_overrides_schema, schemars::{self, JsonSchema}, }; -use strum::{Display, EnumIter, EnumString}; +use strum::{Display, EnumIter}; #[derive(Snafu, Debug)] pub enum Error { @@ -178,10 +175,6 @@ pub struct SparkApplicationSpec { #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, pub spark_image: ProductImage, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub spark_image_pull_policy: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub spark_image_pull_secrets: Option>, /// Name of the Vector aggregator discovery ConfigMap. /// It must contain the key `ADDRESS` with the address of the Vector aggregator. #[serde(skip_serializing_if = "Option::is_none")] @@ -210,13 +203,6 @@ pub struct SparkApplicationSpec { pub log_file_directory: Option, } -#[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize, Display, EnumString)] -pub enum ImagePullPolicy { - Always, - IfNotPresent, - Never, -} - #[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Eq, Serialize)] #[serde(rename_all = "camelCase")] pub struct JobDependencies { @@ -247,14 +233,6 @@ impl SparkApplication { self.spec.image.as_deref() } - pub fn spark_image_pull_policy(&self) -> Option { - self.spec.spark_image_pull_policy.clone() - } - - pub fn spark_image_pull_secrets(&self) -> Option> { - self.spec.spark_image_pull_secrets.clone() - } - pub fn version(&self) -> Option<&str> { self.spec.version.as_deref() } @@ -1072,11 +1050,9 @@ impl ExecutorConfig { #[cfg(test)] mod tests { - use crate::{ - cores_from_quantity, resources_to_executor_props, ExecutorConfig, ImagePullPolicy, - }; + use crate::DriverConfig; + use crate::{cores_from_quantity, resources_to_executor_props, ExecutorConfig}; use crate::{resources_to_driver_props, SparkApplication}; - use crate::{DriverConfig, LocalObjectReference}; use crate::{Quantity, SparkStorageConfig}; use rstest::rstest; use stackable_operator::builder::ObjectMetaBuilder; @@ -1087,7 +1063,6 @@ mod tests { use stackable_operator::k8s_openapi::api::core::v1::PodTemplateSpec; use stackable_operator::product_logging::spec::Logging; use std::collections::{BTreeMap, HashMap}; - use std::str::FromStr; #[test] fn test_spark_examples_s3() { @@ -1258,75 +1233,6 @@ spec: assert!(spark_application.spec.image.is_none()); } - #[test] - fn test_image_actions() { - let spark_application = serde_yaml::from_str::( - r#" ---- -apiVersion: spark.stackable.tech/v1alpha1 -kind: SparkApplication -metadata: - name: spark-pi-local - namespace: default -spec: - version: "1.0" - sparkImage: - productVersion: 3.2.1 - sparkImagePullPolicy: Always - sparkImagePullSecrets: - - name: myregistrykey - mode: cluster - mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: local:///stackable/spark/examples/jars/spark-examples.jar - sparkConf: - spark.kubernetes.node.selector.node: "2" - driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" - executor: - cores: 1 - instances: 1 - memory: "512m" - "#, - ) - .unwrap(); - - assert_eq!( - Some(vec![LocalObjectReference { - name: Some("myregistrykey".to_string()) - }]), - spark_application.spark_image_pull_secrets() - ); - assert_eq!( - Some(ImagePullPolicy::Always), - spark_application.spark_image_pull_policy() - ); - } - - #[test] - fn test_image_pull_policy_ser() { - assert_eq!("Never", ImagePullPolicy::Never.to_string()); - assert_eq!("Always", ImagePullPolicy::Always.to_string()); - assert_eq!("IfNotPresent", ImagePullPolicy::IfNotPresent.to_string()); - } - - #[test] - fn test_image_pull_policy_de() { - assert_eq!( - ImagePullPolicy::Always, - ImagePullPolicy::from_str("Always").unwrap() - ); - assert_eq!( - ImagePullPolicy::Never, - ImagePullPolicy::from_str("Never").unwrap() - ); - assert_eq!( - ImagePullPolicy::IfNotPresent, - ImagePullPolicy::from_str("IfNotPresent").unwrap() - ); - } - #[test] fn test_default_resource_limits() { let spark_application = serde_yaml::from_str::( diff --git a/rust/operator-binary/src/spark_k8s_controller.rs b/rust/operator-binary/src/spark_k8s_controller.rs index 21f0f555..176a9500 100644 --- a/rust/operator-binary/src/spark_k8s_controller.rs +++ b/rust/operator-binary/src/spark_k8s_controller.rs @@ -325,7 +325,7 @@ fn init_containers( logging: &Logging, s3conn: &Option, s3logdir: &Option, - spark_image: &str, + spark_image: &ResolvedProductImage, ) -> Result> { let mut jcb = ContainerBuilder::new(&SparkContainer::Job.to_string()) .context(IllegalContainerNameSnafu)?; @@ -383,14 +383,12 @@ fn init_containers( "pip install --target={VOLUME_MOUNT_PATH_REQ} {req}" )); - rcb.image(spark_image) + rcb.image(&spark_image.image) .command(vec!["/bin/bash".to_string(), "-c".to_string()]) .args(vec![args.join(" && ")]) .add_volume_mount(VOLUME_MOUNT_NAME_REQ, VOLUME_MOUNT_PATH_REQ) - .add_volume_mount(VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_PATH_LOG); - if let Some(image_pull_policy) = spark_application.spark_image_pull_policy() { - rcb.image_pull_policy(image_pull_policy.to_string()); - } + .add_volume_mount(VOLUME_MOUNT_NAME_LOG, VOLUME_MOUNT_PATH_LOG) + .image_pull_policy(&spark_image.image_pull_policy); rcb.resources( ResourceRequirementsBuilder::new() @@ -418,7 +416,7 @@ fn init_containers( format!("{STACKABLE_MOUNT_PATH_TLS}/{cert_secret}"), ); } - tcb.image(spark_image) + tcb.image(&spark_image.image) .command(vec!["/bin/bash".to_string(), "-c".to_string()]) .args(vec![args.join(" && ")]) .add_volume_mount(STACKABLE_TRUST_STORE_NAME, STACKABLE_TRUST_STORE) @@ -453,7 +451,8 @@ fn pod_template( let mut cb = ContainerBuilder::new(&container_name).context(IllegalContainerNameSnafu)?; cb.add_volume_mounts(config.volume_mounts.clone()) .add_env_vars(env.to_vec()) - .resources(config.resources.clone().into()); + .resources(config.resources.clone().into()) + .image_pull_policy(&spark_image.image_pull_policy); if config.logging.enable_vector_agent { cb.add_env_var( @@ -467,10 +466,6 @@ fn pod_template( ); } - if let Some(image_pull_policy) = spark_application.spark_image_pull_policy() { - cb.image_pull_policy(image_pull_policy.to_string()); - } - let mut pb = PodBuilder::new(); pb.metadata( ObjectMetaBuilder::new() @@ -493,7 +488,7 @@ fn pod_template( &config.logging, s3conn, s3logdir, - &spark_image.image, + spark_image, ) .unwrap(); @@ -501,7 +496,7 @@ fn pod_template( pb.add_init_container(init_container.clone()); } - if let Some(image_pull_secrets) = spark_application.spark_image_pull_secrets() { + if let Some(image_pull_secrets) = spark_image.pull_secrets.as_ref() { pb.image_pull_secrets( image_pull_secrets .iter() @@ -697,11 +692,8 @@ fn spark_job( ), ) // TODO: move this to the image - .add_env_var("SPARK_CONF_DIR", "/stackable/spark/conf"); - - if let Some(image_pull_policy) = spark_application.spark_image_pull_policy() { - cb.image_pull_policy(image_pull_policy.to_string()); - } + .add_env_var("SPARK_CONF_DIR", "/stackable/spark/conf") + .image_pull_policy(&spark_image.image_pull_policy); let mut volumes = vec![ VolumeBuilder::new(VOLUME_MOUNT_NAME_CONFIG) @@ -754,7 +746,7 @@ fn spark_job( restart_policy: Some("Never".to_string()), service_account_name: serviceaccount.metadata.name.clone(), volumes: Some(volumes), - image_pull_secrets: spark_application.spark_image_pull_secrets(), + image_pull_secrets: spark_image.pull_secrets.clone(), security_context: Some(security_context()), ..PodSpec::default() }),