Skip to content

Commit

Permalink
Add debug info
Browse files Browse the repository at this point in the history
  • Loading branch information
boundless-forest committed Apr 22, 2024
1 parent 6365fe9 commit 7445992
Show file tree
Hide file tree
Showing 5 changed files with 41 additions and 30 deletions.
27 changes: 14 additions & 13 deletions client/db/src/sql/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -314,22 +314,24 @@ where
let block_number =
UniqueSaturatedInto::<u32>::unique_saturated_into(header_number) as i32;
let is_canon = match client.hash(header_number) {
Ok(Some(inner_hash)) => (inner_hash == hash) as i32,
Ok(Some(inner_hash)) => {
(inner_hash == hash) as i32
},
Ok(None) => {
log::debug!(target: "frontier-sql", "[Metadata] Missing header for block #{block_number} ({hash:?})");
log::debug!(target: "bear", "[Metadata] Missing header for block #{block_number} ({hash:?})");
0
}
Err(err) => {
log::debug!(
target: "frontier-sql",
target: "bear",
"[Metadata] Failed to retrieve header for block #{block_number} ({hash:?}): {err:?}",
);
0
}
};

log::trace!(
target: "frontier-sql",
log::debug!(
target: "bear",
"[Metadata] Prepared block metadata for #{block_number} ({hash:?}) canon={is_canon}",
);
Ok(BlockMetadata {
Expand Down Expand Up @@ -441,6 +443,7 @@ where
BE: BackendT<Block> + 'static,
BE::State: StateBackend<BlakeTwo256>,
{
log::debug!(target: "bear", "Insert logs for the block, hash: {:?}", block_hash);
let pool = self.pool().clone();
let overrides = self.overrides.clone();
let _ = async {
Expand Down Expand Up @@ -505,11 +508,10 @@ where
}
.await
.map_err(|e| {
log::error!(target: "frontier-sql", "{e}");
log::error!(target: "bear", "index block logs error: {e}");
});
// https://www.sqlite.org/pragma.html#pragma_optimize
let _ = sqlx::query("PRAGMA optimize").execute(&pool).await;
log::debug!(target: "frontier-sql", "Batch committed");
}

fn get_logs<Client, BE>(
Expand Down Expand Up @@ -557,10 +559,7 @@ where
});
}
}
log::debug!(
target: "frontier-sql",
"Ready to commit {log_count} logs from {transaction_count} transactions"
);
log::debug!(target: "bear", "Ready to commit {:?} logs for block {:?}", log_count, substrate_block_hash);
logs
}

Expand Down Expand Up @@ -904,7 +903,7 @@ impl<Block: BlockT<Hash = H256>> fc_api::LogIndexerBackend<Block> for Backend<Bl
log::debug!(target: "frontier-sql", "Sqlite progress_handler triggered for {log_key2}");
false
});
log::debug!(target: "frontier-sql", "Query: {sql:?} - {log_key}");
log::debug!(target: "bear", "Query in the sql backend: {sql:?} - {log_key}");

let mut out: Vec<FilteredLog<Block>> = vec![];
let mut rows = query.fetch(&mut *conn);
Expand Down Expand Up @@ -949,11 +948,13 @@ impl<Block: BlockT<Hash = H256>> fc_api::LogIndexerBackend<Block> for Backend<Bl
.remove_progress_handler();

if let Some(err) = maybe_err {
log::error!(target: "frontier-sql", "Failed to query sql db: {err:?} - {log_key}");
log::error!(target: "bear", "Failed to query sql db: {err:?} - {log_key}");
return Err("Failed to query sql db with statement".to_string());
}

log::info!(target: "frontier-sql", "FILTER remove handler - {log_key}");

log::debug!(target: "bear", "Query result in the sql backend: {:?}", out);
Ok(out)
}
}
Expand Down
2 changes: 1 addition & 1 deletion client/mapping-sync/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.

#![deny(unused_crate_dependencies)]
// #![deny(unused_crate_dependencies)]
#![allow(clippy::too_many_arguments)]

pub mod kv;
Expand Down
34 changes: 19 additions & 15 deletions client/mapping-sync/src/sql/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ where
// Attempt to resume from last indexed block. If there is no data in the db, sync genesis.
match indexer_backend.get_last_indexed_canon_block().await.ok() {
Some(last_block_hash) => {
log::debug!(target: "frontier-sql", "Resume from last block {last_block_hash:?}");
log::debug!(target: "frontier-sql", "===> Resume from last block {last_block_hash:?}");
if let Some(parent_hash) = client
.header(last_block_hash)
.ok()
Expand All @@ -113,6 +113,7 @@ where
};
}
WorkerCommand::IndexLeaves(leaves) => {
log::debug!(target: "bear", "===> receive index leaves command after timeout: leaves: {:?}", leaves);
for leaf in leaves {
index_block_and_ancestors(
client.clone(),
Expand All @@ -124,6 +125,7 @@ where
}
}
WorkerCommand::IndexBestBlock(block_hash) => {
log::debug!(target: "bear", "===> receive index best block command, block_hash: {:?}", block_hash);
index_canonical_block_and_ancestors(
client.clone(),
substrate_backend.clone(),
Expand All @@ -144,15 +146,17 @@ where
enacted,
retracted,
} => {
log::debug!(target: "bear", "===> receive canonicalize command, common: {common:?}, enacted: {enacted:?}, retracted: {retracted:?}");
canonicalize_blocks(indexer_backend.clone(), common, enacted, retracted)
.await;
}
WorkerCommand::CheckIndexedBlocks => {
log::debug!(target: "bear", "===> receive check indexed blocks command");
// Fix any indexed blocks that did not have their logs indexed
if let Some(block_hash) =
indexer_backend.get_first_pending_canon_block().await
{
log::debug!(target: "frontier-sql", "Indexing pending canonical block {block_hash:?}");
log::debug!(target: "bear", "Indexing pending canonical block, add missing logs {block_hash:?}");
indexer_backend
.index_block_logs(client.clone(), block_hash)
.await;
Expand Down Expand Up @@ -224,7 +228,7 @@ where
}
notification = notifications.next() => if let Some(notification) = notification {
log::debug!(
target: "frontier-sql",
target: "bear",
"📣 New notification: #{} {:?} (parent {}), best = {}",
notification.header.number(),
notification.hash,
Expand All @@ -234,7 +238,7 @@ where
if notification.is_new_best {
if let Some(tree_route) = notification.tree_route {
log::debug!(
target: "frontier-sql",
target: "bear",
"🔀 Re-org happened at new best {}, proceeding to canonicalize db",
notification.hash
);
Expand Down Expand Up @@ -296,14 +300,13 @@ async fn index_block_and_ancestors<Block, Backend, Client>(
break;
}

log::debug!(target: "frontier-sql", "🛠️ Importing {hash:?}");
log::debug!(target: "bear", "🛠️ Importing {hash:?}");
let _ = indexer_backend
.insert_block_metadata(client.clone(), hash)
.await
.map_err(|e| {
log::error!(target: "frontier-sql", "{e}");
log::error!(target: "bear", "{e}");
});
log::debug!(target: "frontier-sql", "Inserted block metadata");
indexer_backend.index_block_logs(client.clone(), hash).await;

if let Ok(Some(header)) = blockchain_backend.header(hash) {
Expand Down Expand Up @@ -342,7 +345,7 @@ async fn index_canonical_block_and_ancestors<Block, Backend, Client>(

// exit if canonical block is already imported
if status.indexed && status.canon {
log::debug!(target: "frontier-sql", "🔴 Block {hash:?} already imported");
log::debug!(target: "bear", "🔴 Block {hash:?} already imported");
break;
}

Expand All @@ -364,14 +367,13 @@ async fn index_canonical_block_and_ancestors<Block, Backend, Client>(
}

// Else, import the new block
log::debug!(target: "frontier-sql", "🛠️ Importing {hash:?}");
log::debug!(target: "bear", "🛠️ Importing {hash:?}");
let _ = indexer_backend
.insert_block_metadata(client.clone(), hash)
.await
.map_err(|e| {
log::error!(target: "frontier-sql", "{e}");
log::error!(target: "bear", "insert block metadata {e}");
});
log::debug!(target: "frontier-sql", "Inserted block metadata {hash:?}");
indexer_backend.index_block_logs(client.clone(), hash).await;

if let Ok(Some(header)) = blockchain_backend.header(hash) {
Expand Down Expand Up @@ -416,13 +418,13 @@ async fn index_missing_blocks<Block, Client, Backend>(
Backend::State: StateBackend<BlakeTwo256>,
{
if let Some(block_number) = indexer_backend.get_first_missing_canon_block().await {
log::debug!(target: "frontier-sql", "Missing {block_number:?}");
log::debug!(target: "bear", "Inserting missing block {block_number:?}");
if block_number == 0 {
index_genesis_block(client.clone(), indexer_backend.clone()).await;
} else if let Ok(Some(block_hash)) = client.hash(block_number.unique_saturated_into()) {
log::debug!(
target: "frontier-sql",
"Indexing past canonical blocks from #{} {:?}",
target: "bear",
"Indexing past canonical blocks from #{} {:?}, add missing block",
block_number,
block_hash,
);
Expand Down Expand Up @@ -1190,6 +1192,7 @@ mod test {
best_block_hashes.insert(0, block_hash);
parent_hash = block_hash;
}
println!("===> bear: testcases: {:?}", best_block_hashes);

// Mark the block as canon and indexed
let block_resume_at = best_block_hashes[0];
Expand Down Expand Up @@ -1245,6 +1248,7 @@ mod test {
.map(|row| H256::from_slice(&row.get::<Vec<u8>, _>(0)[..]))
.collect::<Vec<H256>>();
let expected_imported_blocks = best_block_hashes.clone();
println!("===> bear: testcases expected result: {:?}", expected_imported_blocks);
assert_eq!(expected_imported_blocks, actual_imported_blocks);
}

Expand Down Expand Up @@ -1348,7 +1352,7 @@ mod test {
futures_timer::Delay::new(std::time::Duration::from_millis(200)).await;

// Import 3 blocks as part of normal operation, storing them oldest first.
sync_oracle_wrapper.set_sync_status(false);
// sync_oracle_wrapper.set_sync_status(false);
let mut parent_hash = client
.hash(sp_runtime::traits::Zero::zero())
.unwrap()
Expand Down
6 changes: 6 additions & 0 deletions client/rpc/src/eth/filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -465,6 +465,7 @@ where
}

async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
log::debug!(target: "bear", "===> RPC logs request, filter: {:?}", filter);
let client = Arc::clone(&self.client);
let block_data_cache = Arc::clone(&self.block_data_cache);
let backend = Arc::clone(&self.backend);
Expand Down Expand Up @@ -510,6 +511,7 @@ where
.map(|s| s.unique_saturated_into())
.unwrap_or(best_number);

log::debug!(target: "bear", "actually params, from_number: {:?}, to_number: {:?}", from_number, current_number);
if backend.is_indexed() {
let _ = filter_range_logs_indexed(
client.as_ref(),
Expand All @@ -535,6 +537,7 @@ where
.await?;
}
}
log::debug!(target: "bear", "===> RPC logs return, ret.length: {:?}", ret.len());
Ok(ret)
}
}
Expand Down Expand Up @@ -597,6 +600,7 @@ where
)
.await
{
log::debug!(target: "bear", "Fetch {} logs from the backend sql db", logs.len());
let time_fetch = timer_fetch.elapsed().as_millis();
let timer_post = Instant::now();

Expand Down Expand Up @@ -671,6 +675,8 @@ where
time_fetch,
time_post,
);
} else {
log::debug!(target: "bear", "Failed to query sql db in the filter_range_logs_indexed.");
}

log::info!(
Expand Down
2 changes: 1 addition & 1 deletion client/rpc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
clippy::len_zero,
clippy::new_without_default
)]
#![deny(unused_crate_dependencies)]
// #![deny(unused_crate_dependencies)]

mod eth;
mod eth_pubsub;
Expand Down

0 comments on commit 7445992

Please sign in to comment.