Skip to content

Commit

Permalink
Merge pull request #21 from scarmuega/feat/namespace-aware
Browse files Browse the repository at this point in the history
feat: make control-plane aware of its namespace
  • Loading branch information
Quantumplation authored Nov 8, 2024
2 parents 941591c + 21055a2 commit 277ff0f
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 11 deletions.
25 changes: 25 additions & 0 deletions src/model/cluster/crd.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,30 @@
use std::collections::BTreeMap;

use k8s_openapi::apimachinery::pkg::api::resource::Quantity;
use kube::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};

#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
pub struct ResourcesInner {
pub cpu: String,
pub memory: String,
}
impl From<&ResourcesInner> for BTreeMap<String, Quantity> {
fn from(value: &ResourcesInner) -> Self {
BTreeMap::from([
("cpu".to_string(), Quantity(value.cpu.clone())),
("memory".to_string(), Quantity(value.memory.clone())),
])
}
}

#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
pub struct Resources {
pub requests: ResourcesInner,
pub limits: ResourcesInner,
}

#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(
kind = "HydraDoomNode",
Expand All @@ -26,6 +49,8 @@ pub struct HydraDoomNodeSpec {
pub seed_input: String,
pub commit_inputs: Vec<String>,
pub start_chain_from: Option<String>,
pub asleep: Option<bool>,
pub resources: Option<Resources>,
}

#[derive(Deserialize, Serialize, Clone, Default, Debug, JsonSchema)]
Expand Down
22 changes: 12 additions & 10 deletions src/model/cluster/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,14 @@ mod node;

pub use crd::*;
pub use node::*;
use tracing::info;

const DEFAULT_NAMESPACE: &str = "hydra-doom";

fn define_namespace() -> String {
std::env::var("KUBERNETES_NAMESPACE").unwrap_or_else(|_| DEFAULT_NAMESPACE.to_string())
}

#[derive(Clone)]
pub struct ClusterState {
store: kube::runtime::reflector::Store<HydraDoomNode>,
Expand All @@ -27,12 +32,16 @@ impl ClusterState {
let admin_key_envelope: KeyEnvelope = serde_json::from_reader(
File::open(admin_key_file).context("unable to open key file")?,
)?;

let admin_sk: SecretKey = admin_key_envelope
.try_into()
.context("Failed to get secret key from file")?;

let namespace = define_namespace();
info!(namespace, "running inside namespace");

let client = kube::Client::try_default().await?;
let nodes: kube::Api<crd::HydraDoomNode> = kube::Api::all(client);
let nodes: kube::Api<crd::HydraDoomNode> = kube::Api::namespaced(client, &namespace);

let (store, writer) = kube::runtime::reflector::store();

Expand All @@ -54,10 +63,6 @@ impl ClusterState {
})
}

pub async fn remote(k8s_api_url: String) -> anyhow::Result<Self> {
todo!()
}

pub fn get_warm_node(&self) -> anyhow::Result<Arc<HydraDoomNode>> {
self.store
.state()
Expand All @@ -69,16 +74,13 @@ impl ClusterState {
}

pub fn get_all_nodes(&self) -> Vec<Arc<crd::HydraDoomNode>> {
println!(
"{:?}",
self.store.state().iter().cloned().collect::<Vec<_>>()
);
self.store.state().iter().cloned().collect()
}

pub fn get_node_by_id(&self, id: &str) -> Option<Arc<HydraDoomNode>> {
let ns = define_namespace();
self.store
.get(&ObjectRef::<HydraDoomNode>::new(id).within(DEFAULT_NAMESPACE))
.get(&ObjectRef::<HydraDoomNode>::new(id).within(&ns))
}
}

Expand Down
3 changes: 2 additions & 1 deletion src/model/cluster/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ impl NodeClient {
.tx_builder
.add_player(player_key, utxos, Network::Testnet)
.context("failed to build transaction")?;

debug!("add player tx: {}", hex::encode(&add_player_tx.tx_bytes));

let tx_hash = add_player_tx.tx_hash.0.to_vec();
Expand All @@ -134,7 +135,7 @@ impl NodeClient {
&self.connection.to_websocket_url(),
newtx,
// TODO: make this configurable
Duration::from_secs(10),
Duration::from_secs(30),
)
.await?;

Expand Down

0 comments on commit 277ff0f

Please sign in to comment.