Skip to content

Commit

Permalink
chore: handle this evil workflow_info test (#543)
Browse files Browse the repository at this point in the history
  • Loading branch information
Zeeshan Lakhani committed Jan 30, 2024
1 parent 483ab19 commit ec49d6c
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 16 deletions.
2 changes: 1 addition & 1 deletion .config/nextest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ threads-required = 1
slow-timeout = { period = "60s", terminate-after = 1 }

[profile.ci]
retries = { backoff = "exponential", count = 3, delay = "30s", jitter = true, max-delay = "300s" }
retries = { backoff = "exponential", count = 4, delay = "30s", jitter = true, max-delay = "300s" }
failure-output = "immediate-final"
leak-timeout = "800ms"
slow-timeout = { period = "100s", terminate-after = 2 }
Expand Down
5 changes: 3 additions & 2 deletions homestar-runtime/tests/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -241,9 +241,9 @@ fn test_workflow_run_integration() -> Result<()> {
}

#[test]
#[serial_test::file_serial]
#[serial_test::parallel]
#[cfg(not(windows))]
fn test_daemon_serial() -> Result<()> {
fn test_daemon_integration() -> Result<()> {
let proc_info = ProcInfo::new().unwrap();
let rpc_port = proc_info.rpc_port;
let metrics_port = proc_info.metrics_port;
Expand All @@ -258,6 +258,7 @@ fn test_daemon_serial() -> Result<()> {
[node.network.rpc]
host = "127.0.0.1"
port = {rpc_port}
server_timeout = 300
[node.network.webserver]
port = {ws_port}
"#
Expand Down
27 changes: 14 additions & 13 deletions homestar-runtime/tests/network/dht.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ use crate::{
utils::{
check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output,
wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME,
ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, SECP256K1MULTIHASH,
ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, ED25519MULTIHASH5,
SECP256K1MULTIHASH,
},
};
use anyhow::Result;
Expand Down Expand Up @@ -567,7 +568,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> {
#[test]
#[allow(unused_must_use)]
#[flaky_test::flaky_test]
#[serial_test::parallel]
#[serial_test::serial]
fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> {
let proc_info1 = ProcInfo::new().unwrap();
let proc_info2 = ProcInfo::new().unwrap();
Expand All @@ -580,15 +581,15 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> {
let ws_port2 = proc_info2.ws_port;
let listen_addr1 = listen_addr(proc_info1.listen_port);
let listen_addr2 = listen_addr(proc_info2.listen_port);
let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH);
let node_addrb = multiaddr(proc_info2.listen_port, SECP256K1MULTIHASH);
let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH2);
let node_addrb = multiaddr(proc_info2.listen_port, ED25519MULTIHASH5);
let toml1 = format!(
r#"
[node]
[node.network.keypair_config]
existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }}
existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }}
[node.network.libp2p]
idle_connection_timeout = 180
idle_connection_timeout = 240
listen_address = "{listen_addr1}"
node_addresses = ["{node_addrb}"]
[node.network.libp2p.dht]
Expand Down Expand Up @@ -649,9 +650,9 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> {
r#"
[node]
[node.network.keypair_config]
existing = {{ key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" }}
existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" }}
[node.network.libp2p]
idle_connection_timeout = 180
idle_connection_timeout = 240
listen_address = "{listen_addr2}"
node_addresses = ["{node_addra}"]
[node.network.libp2p.dht]
Expand Down Expand Up @@ -734,7 +735,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> {
// We want node two to request workflow info directly from node one
// because of timeouts not because workflow info was missing from the
// DHT, so we give node one time to put add workflow info to the DHT.
tokio::time::sleep(Duration::from_secs(7)).await;
tokio::time::sleep(Duration::from_secs(9)).await;

// Run the same workflow run on node two.
// Node two should be request workflow info from
Expand All @@ -751,7 +752,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> {
// Poll for sent workflow info message
let sent_workflow_info_cid: Cid;
loop {
if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(60)).await {
if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await {
let json: serde_json::Value =
serde_json::from_slice(&msg.unwrap().unwrap()).unwrap();

Expand Down Expand Up @@ -826,15 +827,15 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> {
// Check node two got workflow info providers
let got_workflow_info_provider_logged = check_for_line_with(
stdout2.clone(),
vec!["got workflow info providers", ED25519MULTIHASH],
vec!["got workflow info providers", ED25519MULTIHASH2],
);

// Check node one sent workflow info
let sent_workflow_info_logged = check_for_line_with(
stdout1.clone(),
vec![
"sent workflow info to peer",
SECP256K1MULTIHASH,
ED25519MULTIHASH5,
"bafyrmibetj4cwo5lfz63zc4qtjvs4xmzvsxucggruo6rnvw7x62fggrii4",
],
);
Expand All @@ -844,7 +845,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> {
stdout2.clone(),
vec![
"received workflow info from peer",
ED25519MULTIHASH,
ED25519MULTIHASH2,
"bafyrmibetj4cwo5lfz63zc4qtjvs4xmzvsxucggruo6rnvw7x62fggrii4",
],
);
Expand Down

0 comments on commit ec49d6c

Please sign in to comment.