Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tidy DelWorkload handling in a pre-snapshot context #1403

Merged
merged 9 commits into from
Dec 20, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 55 additions & 7 deletions src/inpod/statemanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ impl WorkloadProxyManagerState {
));
};
if !self.snapshot_received {
debug!("got workload add before snapshot");
self.snapshot_names.insert(poddata.workload_uid.clone());
}
let netns =
Expand Down Expand Up @@ -172,12 +173,20 @@ impl WorkloadProxyManagerState {
"pod delete request, shutting down proxy"
);
if !self.snapshot_received {
// TODO: consider if this is an error. if not, do this instead:
// self.snapshot_names.remove(&workload_uid)
// self.pending_workloads.remove(&workload_uid)
return Err(Error::ProtocolError(
"pod delete received before snapshot".into(),
));
debug!("got workload delete before snapshot");
// Since we insert here on AddWorkload before we get a snapshot,
// make sure we also opportunistically remove here before we
// get a snapshot
//
// Note that even though AddWorkload starts the workload, we do *not* need
// to stop it here, as it should be auto-dropped subsequently during snapshot
// reconcile(), when we actually get the `SnapshotSent` notification.
self.snapshot_names.remove(&workload_uid);
// `reconcile()` will drop this workload later, but if the workload never successfully
// starts it will stay in the pending queue (which `reconcile()` can't remove it from),
// so clear the pending queue here.
self.pending_workloads.remove(&workload_uid);
return Ok(());
bleggett marked this conversation as resolved.
Show resolved Hide resolved
}
self.del_workload(&workload_uid);
Ok(())
Expand Down Expand Up @@ -536,7 +545,7 @@ mod tests {
}

#[tokio::test]
async fn idemepotency_add_workload_fails_and_then_deleted() {
async fn idempotency_add_workload_fails_and_then_deleted() {
let fixture = fixture!();
let mut state = fixture.state;

Expand Down Expand Up @@ -567,6 +576,45 @@ mod tests {
state.drain().await;
}

#[tokio::test]
async fn del_workload_before_snapshot_removes_from_snapshot_and_pending() {
let fixture = fixture!();
let mut state = fixture.state;

let ns = new_netns();

// to make the proxy fail, bind to its ports in its netns
let _sock = create_proxy_conflict(&ns);

let data = WorkloadData {
netns: ns,
workload_uid: uid(0),
workload_info: workload_info(),
};

let ret = state.process_msg(WorkloadMessage::AddWorkload(data)).await;

assert!(state.snapshot_names.len() == 1);
assert!(ret.is_err());
assert!(state.have_pending());

state
.process_msg(WorkloadMessage::DelWorkload(uid(0)))
.await
.unwrap();

assert!(state.snapshot_names.is_empty());

state
.process_msg(WorkloadMessage::WorkloadSnapshotSent)
.await
.unwrap();

assert!(state.snapshot_names.is_empty());
assert!(!state.have_pending());
state.drain().await;
}

#[tokio::test]
async fn add_delete_add_workload_starts_only_one_proxy() {
let fixture = fixture!();
Expand Down