Skip to content

Commit

Permalink
Prepare 0.3.3 (#96)
Browse files Browse the repository at this point in the history
* bump version

* update changelog

* add small timeout for self-heal full data loss test

* add specific cache raft shutdown call
  • Loading branch information
sebadob authored Dec 23, 2024
1 parent 0345f2c commit 33fb6be
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 8 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Changelog

## UNRELEASED
## v0.3.3

Fixes an issue where the DB writer would panic because of a mismatch in DB Migrations validation. This could have
happened if migrations are applied between applications starts. The `hiqlite::Client` in that case would only send the
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ members = ["hiqlite"]
exclude = ["examples"]

[workspace.package]
version = "0.3.3-20241213"
version = "0.3.3"
edition = "2021"
license = "Apache-2.0"
authors = ["Sebastian Dobe <[email protected]"]
Expand Down
10 changes: 8 additions & 2 deletions hiqlite/src/client/mgmt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,12 @@ impl Client {
#[cfg(feature = "sqlite")] tx_client_db: &flume::Sender<ClientStreamReq>,
tx_shutdown: &Option<watch::Sender<bool>>,
) -> Result<(), Error> {
#[cfg(feature = "cache")]
{
info!("Shutting down raft cache layer");
state.raft_cache.raft.shutdown().await?;
}

#[cfg(feature = "sqlite")]
{
info!("Shutting down raft sqlite layer");
Expand Down Expand Up @@ -228,10 +234,10 @@ impl Client {
}
}

#[cfg(feature = "sqlite")]
let _ = tx_client_db.send_async(ClientStreamReq::Shutdown).await;
#[cfg(feature = "cache")]
let _ = tx_client_cache.send_async(ClientStreamReq::Shutdown).await;
#[cfg(feature = "sqlite")]
let _ = tx_client_db.send_async(ClientStreamReq::Shutdown).await;

if let Some(tx) = tx_shutdown {
let _ = tx.send(true);
Expand Down
5 changes: 3 additions & 2 deletions hiqlite/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -358,8 +358,6 @@ async fn try_become(
// -> We must check this after each error to get smooth rolling releases.
if let Some((Some(leader_id), Some(node))) = err.is_forward_to_leader() {
if leader_id == this_node {
info!("This node became the raft leader in the meantime - skipping init");

if !helpers::is_raft_initialized(state, raft_type).await? {
let leader = helpers::get_raft_leader(state, raft_type).await;
let metrics = helpers::get_raft_metrics(state, raft_type).await;
Expand All @@ -371,13 +369,16 @@ async fn try_become(
Because the in-memory Raft does not save the state between restarts, you must way at least
for the duration of a leader heartbeat timeout before trying to re-join the cluster.
Raft Type: {raft_type:?}
This node: {this_node}
Leader: {leader:?}: {node:?}
Metrics: {metrics:?}
"#
);
}

info!("This node became the raft leader in the meantime - skipping init");

return Ok(SkipBecome::Yes);
}
}
Expand Down
7 changes: 5 additions & 2 deletions hiqlite/tests/cluster/self_heal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,13 +164,16 @@ async fn shutdown_lock_sm_db_restart(client: Client, node_id: u64) -> Result<Cli
async fn shutdown_remove_all_restart(client: Client, node_id: u64) -> Result<Client, Error> {
log(format!("Shutting down client {}", node_id));
client.shutdown().await?;
// time::sleep(Duration::from_millis(2000)).await;
time::sleep(Duration::from_secs(1)).await;

let folder = folder_base(node_id);
log(format!("Deleting {}", folder));
fs::remove_dir_all(folder).await?;

log(format!("Re-starting client {}", node_id));
log(format!(
"Re-starting client {} after full data deletion",
node_id
));
let client = start_node_with_cache::<Cache>(build_config(node_id).await).await?;
time::sleep(Duration::from_millis(150)).await;

Expand Down

0 comments on commit 33fb6be

Please sign in to comment.