@@ -16,12 +16,12 @@ use crate::header::{HEADER_SIZE, MAX_INITIAL_BYTES};
16
16
use crate :: http:: HttpBackend ;
17
17
#[ cfg( feature = "mmap-async-tokio" ) ]
18
18
use crate :: mmap:: MmapBackend ;
19
- use crate :: tile:: { tile_id, Tile } ;
19
+ use crate :: tile:: tile_id;
20
20
use crate :: { Compression , Header } ;
21
21
22
- pub struct AsyncPmTilesReader < B : AsyncBackend > {
23
- pub header : Header ,
22
+ pub struct AsyncPmTilesReader < B > {
24
23
backend : B ,
24
+ header : Header ,
25
25
root_directory : Directory ,
26
26
}
27
27
@@ -30,11 +30,13 @@ impl<B: AsyncBackend + Sync + Send> AsyncPmTilesReader<B> {
30
30
///
31
31
/// Note: Prefer using new_with_* methods.
32
32
pub async fn try_from_source ( backend : B ) -> Result < Self , Error > {
33
- let mut initial_bytes = backend. read_initial_bytes ( ) . await ?;
34
-
35
- let header_bytes = initial_bytes. split_to ( HEADER_SIZE ) ;
33
+ // Read the first 127 and up to 16,384 bytes to ensure we can initialize the header and root directory.
34
+ let mut initial_bytes = backend. read ( 0 , MAX_INITIAL_BYTES ) . await ?;
35
+ if initial_bytes. len ( ) < HEADER_SIZE {
36
+ return Err ( Error :: InvalidHeader ) ;
37
+ }
36
38
37
- let header = Header :: try_from_bytes ( header_bytes ) ?;
39
+ let header = Header :: try_from_bytes ( initial_bytes . split_to ( HEADER_SIZE ) ) ?;
38
40
39
41
let directory_bytes = initial_bytes
40
42
. split_off ( ( header. root_offset as usize ) - HEADER_SIZE )
@@ -44,45 +46,32 @@ impl<B: AsyncBackend + Sync + Send> AsyncPmTilesReader<B> {
44
46
Self :: read_compressed_directory ( header. internal_compression , directory_bytes) . await ?;
45
47
46
48
Ok ( Self {
47
- header,
48
49
backend,
50
+ header,
49
51
root_directory,
50
52
} )
51
53
}
52
54
53
- /// Fetches a [Tile] from the archive.
54
- pub async fn get_tile ( & self , z : u8 , x : u64 , y : u64 ) -> Option < Tile > {
55
+ /// Fetches tile bytes from the archive.
56
+ pub async fn get_tile ( & self , z : u8 , x : u64 , y : u64 ) -> Option < Bytes > {
55
57
let tile_id = tile_id ( z, x, y) ;
56
- let entry = self . find_tile_entry ( tile_id, None , 0 ) . await ?;
57
-
58
- let data = self
59
- . backend
60
- . read_exact (
61
- ( self . header . data_offset + entry. offset ) as _ ,
62
- entry. length as _ ,
63
- )
64
- . await
65
- . ok ( ) ?;
58
+ let entry = self . find_tile_entry ( tile_id) . await ?;
66
59
67
- Some ( Tile {
68
- data ,
69
- tile_type : self . header . tile_type ,
70
- tile_compression : self . header . tile_compression ,
71
- } )
60
+ let offset = ( self . header . data_offset + entry . offset ) as _ ;
61
+ let length = entry . length as _ ;
62
+ let data = self . backend . read_exact ( offset , length ) . await . ok ( ) ? ;
63
+
64
+ Some ( data )
72
65
}
73
66
74
67
/// Gets metadata from the archive.
75
68
///
76
69
/// Note: by spec, this should be valid JSON. This method currently returns a [String].
77
70
/// This may change in the future.
78
71
pub async fn get_metadata ( & self ) -> Result < String , Error > {
79
- let metadata = self
80
- . backend
81
- . read_exact (
82
- self . header . metadata_offset as _ ,
83
- self . header . metadata_length as _ ,
84
- )
85
- . await ?;
72
+ let offset = self . header . metadata_offset as _ ;
73
+ let length = self . header . metadata_length as _ ;
74
+ let metadata = self . backend . read_exact ( offset, length) . await ?;
86
75
87
76
let decompressed_metadata =
88
77
Self :: decompress ( self . header . internal_compression , metadata) . await ?;
@@ -132,71 +121,52 @@ impl<B: AsyncBackend + Sync + Send> AsyncPmTilesReader<B> {
132
121
Ok ( tj)
133
122
}
134
123
135
- #[ async_recursion]
136
- async fn find_tile_entry (
137
- & self ,
138
- tile_id : u64 ,
139
- next_dir : Option < Directory > ,
140
- depth : u8 ,
141
- ) -> Option < Entry > {
142
- // Max recursion...
143
- if depth >= 4 {
144
- return None ;
124
+ /// Recursively locates a tile in the archive.
125
+ async fn find_tile_entry ( & self , tile_id : u64 ) -> Option < Entry > {
126
+ let entry = self . root_directory . find_tile_id ( tile_id) ;
127
+ if let Some ( entry) = entry {
128
+ if entry. is_leaf ( ) {
129
+ return self . find_entry_rec ( tile_id, entry, 0 ) . await ;
130
+ }
145
131
}
132
+ entry. cloned ( )
133
+ }
146
134
147
- let next_dir = next_dir. as_ref ( ) . unwrap_or ( & self . root_directory ) ;
148
-
149
- match next_dir. find_tile_id ( tile_id) {
150
- None => None ,
151
- Some ( needle) => {
152
- if needle. run_length == 0 {
153
- // Leaf directory
154
- let next_dir = self
155
- . read_directory (
156
- ( self . header . leaf_offset + needle. offset ) as _ ,
157
- needle. length as _ ,
158
- )
159
- . await
160
- . ok ( ) ?;
161
- self . find_tile_entry ( tile_id, Some ( next_dir) , depth + 1 )
162
- . await
135
+ #[ async_recursion]
136
+ async fn find_entry_rec ( & self , tile_id : u64 , entry : & Entry , depth : u8 ) -> Option < Entry > {
137
+ // the recursion is done as two functions because it is a bit cleaner,
138
+ // and it allows directory to be cached later without cloning it first.
139
+ let offset = ( self . header . leaf_offset + entry. offset ) as _ ;
140
+ let length = entry. length as _ ;
141
+ let dir = self . read_directory ( offset, length) . await . ok ( ) ?;
142
+ let entry = dir. find_tile_id ( tile_id) ;
143
+
144
+ if let Some ( entry) = entry {
145
+ if entry. is_leaf ( ) {
146
+ return if depth <= 4 {
147
+ self . find_entry_rec ( tile_id, entry, depth + 1 ) . await
163
148
} else {
164
- Some ( needle . clone ( ) )
165
- }
149
+ None
150
+ } ;
166
151
}
167
152
}
153
+
154
+ entry. cloned ( )
168
155
}
169
156
170
157
async fn read_directory ( & self , offset : usize , length : usize ) -> Result < Directory , Error > {
171
- Self :: read_directory_with_backend (
172
- & self . backend ,
173
- self . header . internal_compression ,
174
- offset,
175
- length,
176
- )
177
- . await
158
+ let data = self . backend . read_exact ( offset, length) . await ?;
159
+ Self :: read_compressed_directory ( self . header . internal_compression , data) . await
178
160
}
179
161
180
162
async fn read_compressed_directory (
181
163
compression : Compression ,
182
164
bytes : Bytes ,
183
165
) -> Result < Directory , Error > {
184
166
let decompressed_bytes = Self :: decompress ( compression, bytes) . await ?;
185
-
186
167
Directory :: try_from ( decompressed_bytes)
187
168
}
188
169
189
- async fn read_directory_with_backend (
190
- backend : & B ,
191
- compression : Compression ,
192
- offset : usize ,
193
- length : usize ,
194
- ) -> Result < Directory , Error > {
195
- let directory_bytes = backend. read_exact ( offset, length) . await ?;
196
-
197
- Self :: read_compressed_directory ( compression, directory_bytes) . await
198
- }
199
-
200
170
async fn decompress ( compression : Compression , bytes : Bytes ) -> Result < Bytes , Error > {
201
171
let mut decompressed_bytes = Vec :: with_capacity ( bytes. len ( ) * 2 ) ;
202
172
match compression {
@@ -229,8 +199,8 @@ impl AsyncPmTilesReader<MmapBackend> {
229
199
/// Creates a new PMTiles reader from a file path using the async mmap backend.
230
200
///
231
201
/// Fails if [p] does not exist or is an invalid archive.
232
- pub async fn new_with_path < P : AsRef < Path > > ( p : P ) -> Result < Self , Error > {
233
- let backend = MmapBackend :: try_from ( p ) . await ?;
202
+ pub async fn new_with_path < P : AsRef < Path > > ( path : P ) -> Result < Self , Error > {
203
+ let backend = MmapBackend :: try_from ( path ) . await ?;
234
204
235
205
Self :: try_from_source ( backend) . await
236
206
}
@@ -243,16 +213,6 @@ pub trait AsyncBackend {
243
213
244
214
/// Reads up to `length` bytes starting at `offset`.
245
215
async fn read ( & self , offset : usize , length : usize ) -> Result < Bytes , Error > ;
246
-
247
- /// Read the first 127 and up to 16,384 bytes to ensure we can initialize the header and root directory.
248
- async fn read_initial_bytes ( & self ) -> Result < Bytes , Error > {
249
- let bytes = self . read ( 0 , MAX_INITIAL_BYTES ) . await ?;
250
- if bytes. len ( ) < HEADER_SIZE {
251
- return Err ( Error :: InvalidHeader ) ;
252
- }
253
-
254
- Ok ( bytes)
255
- }
256
216
}
257
217
258
218
#[ cfg( test) ]
@@ -274,11 +234,11 @@ mod tests {
274
234
let tile = tiles. get_tile ( z, x, y) . await . unwrap ( ) ;
275
235
276
236
assert_eq ! (
277
- tile. data . len( ) ,
237
+ tile. len( ) ,
278
238
fixture_bytes. len( ) ,
279
239
"Expected tile length to match."
280
240
) ;
281
- assert_eq ! ( tile. data , fixture_bytes, "Expected tile to match fixture." ) ;
241
+ assert_eq ! ( tile, fixture_bytes, "Expected tile to match fixture." ) ;
282
242
}
283
243
284
244
#[ tokio:: test]
0 commit comments