diff --git a/src/stash/src/tests.rs b/src/stash/src/tests.rs index f23d1892f081c..50879a0d0bca3 100644 --- a/src/stash/src/tests.rs +++ b/src/stash/src/tests.rs @@ -11,6 +11,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::convert::Infallible; use std::time::Duration; +use crate::transaction::MAX_INSERT_ARGUMENTS; use crate::{ AppendBatch, Data, DebugStashFactory, Stash, StashCollection, StashError, StashFactory, TableTransaction, Timestamp, TypedCollection, INSERT_BATCH_SPLIT_SIZE, @@ -570,7 +571,12 @@ async fn test_stash_batch_large_number_updates() { Stash::with_debug_stash(|mut stash| async move { let col = collection::(&mut stash, "c1").await.unwrap(); let mut batch = make_batch(&col, &mut stash).await.unwrap(); - for i in 0..500_000 { + // Back of the envelope math would produce 12 batches of updates. + // + // Each update statement takes 4 arguments, so we have a total of + // MAX_INSERT_ARGUMENTS * 4 * 3 arguments, leading to 12 batches of updates. + for i in 0..(MAX_INSERT_ARGUMENTS * 3) { + let i = i.into(); col.append_to_batch(&mut batch, &i, &(i + 1), 1); } append(&mut stash, vec![batch]).await.unwrap(); diff --git a/src/stash/src/transaction.rs b/src/stash/src/transaction.rs index cd2f42dc661de..f35d8827a2155 100644 --- a/src/stash/src/transaction.rs +++ b/src/stash/src/transaction.rs @@ -43,7 +43,7 @@ pub const INSERT_BATCH_SPLIT_SIZE: usize = 2 * 1024 * 1024; /// [`tokio_postgres`] has a maximum number of arguments it supports when executing a query. This /// is the limit at which to split a batch to make sure we don't try to include too many elements /// in any one update. -const MAX_INSERT_ARGUMENTS: u16 = u16::MAX / 4; +pub(crate) const MAX_INSERT_ARGUMENTS: u16 = u16::MAX / 4; impl Stash { /// Transactionally executes closure `f`.