@@ -3,6 +3,7 @@ use std::{
33 collections:: VecDeque ,
44} ;
55
6+ use anyhow:: Context ;
67use async_trait:: async_trait;
78use common:: {
89 document:: GenericDocument ,
@@ -24,6 +25,7 @@ use common::{
2425 } ,
2526 version:: Version ,
2627} ;
28+ use maplit:: btreemap;
2729
2830use super :: {
2931 query_scanned_too_many_documents_error,
@@ -35,6 +37,7 @@ use super::{
3537} ;
3638use crate :: {
3739 metrics,
40+ transaction:: IndexRangeRequest ,
3841 Transaction ,
3942} ;
4043
@@ -138,80 +141,94 @@ impl<T: QueryType> IndexRange<T> {
138141 }
139142 }
140143
141- #[ convex_macro:: instrument_future]
142- async fn _next < RT : Runtime > (
144+ fn start_next < RT : Runtime > (
143145 & mut self ,
144146 tx : & mut Transaction < RT > ,
145147 prefetch_hint : Option < usize > ,
146- ) -> anyhow:: Result < Option < ( GenericDocument < T :: T > , WriteTimestamp ) > > {
147- loop {
148- // If we have an end cursor, for correctness we need to process
149- // the entire interval, so ignore `maximum_rows_read` and `maximum_bytes_read`.
150- let enforce_limits = self . cursor_interval . end_inclusive . is_none ( ) ;
148+ ) -> anyhow:: Result < Result < Option < ( GenericDocument < T :: T > , WriteTimestamp ) > , IndexRangeRequest > >
149+ {
150+ // If we have an end cursor, for correctness we need to process
151+ // the entire interval, so ignore `maximum_rows_read` and `maximum_bytes_read`.
152+ let enforce_limits = self . cursor_interval . end_inclusive . is_none ( ) ;
151153
152- if enforce_limits
153- && let Some ( maximum_bytes_read) = self . maximum_bytes_read
154- && self . returned_bytes >= maximum_bytes_read
155- {
156- // If we're over our data budget, throw an error.
157- // We do this after we've already exceeded the limit to ensure that
158- // paginated queries always scan at least one item so they can
159- // make progress.
160- return Err ( query_scanned_too_much_data ( self . returned_bytes ) . into ( ) ) ;
161- }
154+ if enforce_limits
155+ && let Some ( maximum_bytes_read) = self . maximum_bytes_read
156+ && self . returned_bytes >= maximum_bytes_read
157+ {
158+ // If we're over our data budget, throw an error.
159+ // We do this after we've already exceeded the limit to ensure that
160+ // paginated queries always scan at least one item so they can
161+ // make progress.
162+ return Err ( query_scanned_too_much_data ( self . returned_bytes ) . into ( ) ) ;
163+ }
162164
163- if let Some ( ( index_position, v, timestamp) ) = self . page . pop_front ( ) {
164- let index_bytes = index_position. len ( ) ;
165- if let Some ( intermediate_cursors) = & mut self . intermediate_cursors {
166- intermediate_cursors. push ( CursorPosition :: After ( index_position. clone ( ) ) ) ;
167- }
168- self . cursor_interval . curr_exclusive = Some ( CursorPosition :: After ( index_position) ) ;
169- self . returned_results += 1 ;
170- T :: record_read_document ( tx, & v, self . printable_index_name . table ( ) ) ?;
171- // Database bandwidth for index reads
172- tx. usage_tracker . track_database_egress_size (
173- self . printable_index_name . table ( ) . to_string ( ) ,
174- index_bytes as u64 ,
175- self . printable_index_name . is_system_owned ( ) ,
176- ) ;
177- self . returned_bytes += v. size ( ) ;
178- return Ok ( Some ( ( v, timestamp) ) ) ;
179- }
180- if let Some ( CursorPosition :: End ) = self . cursor_interval . curr_exclusive {
181- return Ok ( None ) ;
182- }
183- if self . unfetched_interval . is_empty ( ) {
184- // We're out of results. If we have an end cursor then we must
185- // have reached it. Otherwise we're at the end of the entire
186- // query.
187- self . cursor_interval . curr_exclusive = Some (
188- self . cursor_interval
189- . end_inclusive
190- . clone ( )
191- . unwrap_or ( CursorPosition :: End ) ,
192- ) ;
193- return Ok ( None ) ;
165+ if let Some ( ( index_position, v, timestamp) ) = self . page . pop_front ( ) {
166+ let index_bytes = index_position. len ( ) ;
167+ if let Some ( intermediate_cursors) = & mut self . intermediate_cursors {
168+ intermediate_cursors. push ( CursorPosition :: After ( index_position. clone ( ) ) ) ;
194169 }
170+ self . cursor_interval . curr_exclusive = Some ( CursorPosition :: After ( index_position) ) ;
171+ self . returned_results += 1 ;
172+ T :: record_read_document ( tx, & v, self . printable_index_name . table ( ) ) ?;
173+ // Database bandwidth for index reads
174+ tx. usage_tracker . track_database_egress_size (
175+ self . printable_index_name . table ( ) . to_string ( ) ,
176+ index_bytes as u64 ,
177+ self . printable_index_name . is_system_owned ( ) ,
178+ ) ;
179+ self . returned_bytes += v. size ( ) ;
180+ return Ok ( Ok ( Some ( ( v, timestamp) ) ) ) ;
181+ }
182+ if let Some ( CursorPosition :: End ) = self . cursor_interval . curr_exclusive {
183+ return Ok ( Ok ( None ) ) ;
184+ }
185+ if self . unfetched_interval . is_empty ( ) {
186+ // We're out of results. If we have an end cursor then we must
187+ // have reached it. Otherwise we're at the end of the entire
188+ // query.
189+ self . cursor_interval . curr_exclusive = Some (
190+ self . cursor_interval
191+ . end_inclusive
192+ . clone ( )
193+ . unwrap_or ( CursorPosition :: End ) ,
194+ ) ;
195+ return Ok ( Ok ( None ) ) ;
196+ }
195197
196- let mut max_rows = prefetch_hint
197- . unwrap_or ( DEFAULT_QUERY_PREFETCH )
198- . clamp ( 1 , MAX_QUERY_FETCH ) ;
198+ let mut max_rows = prefetch_hint
199+ . unwrap_or ( DEFAULT_QUERY_PREFETCH )
200+ . clamp ( 1 , MAX_QUERY_FETCH ) ;
199201
200- if enforce_limits && let Some ( maximum_rows_read) = self . maximum_rows_read {
201- if self . rows_read >= maximum_rows_read {
202- return Err ( query_scanned_too_many_documents_error ( self . rows_read ) . into ( ) ) ;
203- }
204- max_rows = cmp:: min ( max_rows, maximum_rows_read - self . rows_read ) ;
202+ if enforce_limits && let Some ( maximum_rows_read) = self . maximum_rows_read {
203+ if self . rows_read >= maximum_rows_read {
204+ return Err ( query_scanned_too_many_documents_error ( self . rows_read ) . into ( ) ) ;
205205 }
206- let ( page, fetch_cursor) = T :: index_range (
207- tx,
208- & self . stable_index_name ,
209- & self . unfetched_interval ,
210- self . order ,
211- max_rows,
212- self . version . clone ( ) ,
213- )
214- . await ?;
206+ max_rows = cmp:: min ( max_rows, maximum_rows_read - self . rows_read ) ;
207+ }
208+ Ok ( Err ( IndexRangeRequest {
209+ stable_index_name : self . stable_index_name . clone ( ) ,
210+ interval : self . unfetched_interval . clone ( ) ,
211+ order : self . order ,
212+ max_rows,
213+ version : self . version . clone ( ) ,
214+ } ) )
215+ }
216+
217+ #[ convex_macro:: instrument_future]
218+ async fn _next < RT : Runtime > (
219+ & mut self ,
220+ tx : & mut Transaction < RT > ,
221+ prefetch_hint : Option < usize > ,
222+ ) -> anyhow:: Result < Option < ( GenericDocument < T :: T > , WriteTimestamp ) > > {
223+ loop {
224+ let request = match self . start_next ( tx, prefetch_hint) ? {
225+ Ok ( result) => return Ok ( result) ,
226+ Err ( request) => request,
227+ } ;
228+ let ( page, fetch_cursor) = T :: index_range_batch ( tx, btreemap ! { 0 => request} )
229+ . await
230+ . remove ( & 0 )
231+ . context ( "batch_key missing" ) ??;
215232 let ( _, new_unfetched_interval) =
216233 self . unfetched_interval . split ( fetch_cursor, self . order ) ;
217234 anyhow:: ensure!( self . unfetched_interval != new_unfetched_interval) ;
0 commit comments