Skip to content

Commit

Permalink
Merge pull request #402 from getlipa/feature/query-lsp-info-sooner-af…
Browse files Browse the repository at this point in the history
…ter-failure

Query lsp info sooner after failure
  • Loading branch information
danielgranhao authored May 31, 2023
2 parents aaf5b58 + 92a9f39 commit 996c1e3
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 25 deletions.
12 changes: 9 additions & 3 deletions eel/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ use crate::payment::{FiatValues, Payment, PaymentState, PaymentType};
use crate::random::generate_random_bytes;
use crate::rapid_sync_client::RapidSyncClient;
use crate::storage_persister::StoragePersister;
use crate::task_manager::{RestartIfFailedPeriod, TaskManager, TaskPeriods};
use crate::task_manager::{PeriodConfig, RestartIfFailedPeriod, TaskManager, TaskPeriods};
use crate::tx_broadcaster::TxBroadcaster;
use crate::types::{ChainMonitor, ChannelManager, PeerManager, RapidGossipSync, Router, TxSync};

Expand Down Expand Up @@ -84,7 +84,10 @@ use tokio::time::Duration;

const FOREGROUND_PERIODS: TaskPeriods = TaskPeriods {
sync_blockchain: Duration::from_secs(5 * 60),
update_lsp_info: Some(Duration::from_secs(10 * 60)),
update_lsp_info: Some(PeriodConfig {
success_period: Duration::from_secs(10 * 60),
failure_period: Duration::from_secs(5),
}),
reconnect_to_lsp: Duration::from_secs(10),
update_fees: Some(Duration::from_secs(5 * 60)),
update_graph: Some(RestartIfFailedPeriod::from_secs(2 * 60)),
Expand Down Expand Up @@ -721,7 +724,10 @@ fn get_foreground_periods() -> TaskPeriods {
let period = Duration::from_secs(period);
TaskPeriods {
sync_blockchain: period,
update_lsp_info: Some(period),
update_lsp_info: Some(PeriodConfig {
success_period: period,
failure_period: period,
}),
reconnect_to_lsp: period,
update_fees: Some(period),
update_graph: Some(period),
Expand Down
21 changes: 15 additions & 6 deletions eel/src/task_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,14 @@ use tokio::time::Duration;

pub(crate) type RestartIfFailedPeriod = Duration;

pub(crate) struct PeriodConfig {
pub failure_period: Duration,
pub success_period: Duration,
}

pub(crate) struct TaskPeriods {
pub sync_blockchain: Duration,
pub update_lsp_info: Option<Duration>,
pub update_lsp_info: Option<PeriodConfig>,
pub reconnect_to_lsp: Duration,
pub update_fees: Option<Duration>,
pub update_graph: Option<RestartIfFailedPeriod>,
Expand Down Expand Up @@ -101,8 +106,8 @@ impl TaskManager {
.push(self.start_blockchain_sync(periods.sync_blockchain));

// LSP info update.
if let Some(period) = periods.update_lsp_info {
self.task_handles.push(self.start_lsp_info_update(period));
if let Some(config) = periods.update_lsp_info {
self.task_handles.push(self.start_lsp_info_update(config));
}

// Reconnect to LSP LN node.
Expand Down Expand Up @@ -155,11 +160,11 @@ impl TaskManager {
})
}

fn start_lsp_info_update(&self, period: Duration) -> RepeatingTaskHandle {
fn start_lsp_info_update(&self, config: PeriodConfig) -> RepeatingTaskHandle {
let peer_manager = Arc::clone(&self.peer_manager);
let lsp_client = Arc::clone(&self.lsp_client);
let lsp_info = Arc::clone(&self.lsp_info);
self.runtime_handle.spawn_repeating_task(period, move || {
self.runtime_handle.spawn_self_restarting_task(move || {
let peer_manager = Arc::clone(&peer_manager);
let lsp_client = Arc::clone(&lsp_client);
let lsp_info = Arc::clone(&lsp_info);
Expand All @@ -179,8 +184,12 @@ impl TaskManager {
error!("Connecting to peer {} failed: {}", peer, e);
}
}
Some(config.success_period)
}
Err(e) => {
error!("Failed to query LSP, retrying in 10 seconds: {}", e);
Some(config.failure_period)
}
Err(e) => error!("Failed to query LSP: {}", e),
}
}
})
Expand Down
36 changes: 20 additions & 16 deletions eel/tests/p2p_connection_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,16 @@ mod setup_env;
#[cfg(feature = "nigiri")]
mod p2p_connection_test {
use bitcoin::hashes::hex::ToHex;
use eel::errors::RuntimeErrorCode;
use perro::runtime_error;
use serial_test::file_parallel;
use serial_test::file_serial;
use std::thread::sleep;

use crate::setup::mocked_storage_node;
use crate::setup_env::nigiri;
use crate::setup_env::nigiri::NodeInstance;
use crate::wait_for_eq;
use crate::{wait_for_eq, wait_for_ok};

#[test]
#[file_parallel(key, "/tmp/3l-int-tests-lock")]
Expand All @@ -30,24 +32,26 @@ mod p2p_connection_test {
#[test]
#[file_serial(key, "/tmp/3l-int-tests-lock")]
fn test_p2p_connection_with_unreliable_lsp() {
nigiri::ensure_environment_running();
// Start the node when lspd isn't available
nigiri::pause_lspd();
let node = mocked_storage_node().start_or_panic();

// Test disconnect when LSP is down.
{
// Let's shutdown LSPD LND.
nigiri::pause_lspd();
wait_for_eq!(node.get_node_info().num_peers, 0);
}
assert_eq!(
node.query_lsp_fee(),
Err(runtime_error(
RuntimeErrorCode::LspServiceUnavailable,
"Failed to get LSP info"
))
);
assert_eq!(node.get_node_info().num_peers, 0);

// Test reconnect when LSP is back.
{
// Now let's start LSPD LND again.
nigiri::start_lspd();
nigiri::wait_for_healthy_lspd();
wait_for_eq!(node.get_node_info().num_peers, 1);
let peers = nigiri::list_peers(NodeInstance::LspdLnd).unwrap();
assert!(peers.contains(&node.get_node_info().node_pubkey.to_hex()));
}
nigiri::start_lspd();
nigiri::ensure_environment_running();
nigiri::wait_for_healthy_lspd();
wait_for_eq!(node.get_node_info().num_peers, 1);
let peers = nigiri::list_peers(NodeInstance::LspdLnd).unwrap();
assert!(peers.contains(&node.get_node_info().node_pubkey.to_hex()));
wait_for_ok!(node.query_lsp_fee());
}
}

0 comments on commit 996c1e3

Please sign in to comment.