@@ -79,7 +79,7 @@ enum chunk_kind {
79
79
#undef DEFINE_SMALL_OBJECT_CHUNK_KIND
80
80
81
81
SMALL_OBJECT_CHUNK_KINDS ,
82
- FREE_CHUNK = 254 ,
82
+ FREE_LARGE_OBJECT = 254 ,
83
83
LARGE_OBJECT = 255
84
84
};
85
85
@@ -109,9 +109,9 @@ static unsigned chunk_kind_to_granules(enum chunk_kind kind) {
109
109
110
110
// Given a pointer P returned by malloc(), we get a header pointer via
111
111
// P&~PAGE_MASK, and a chunk index via (P&PAGE_MASK)/CHUNKS_PER_PAGE. If
112
- // chunk_kinds[chunk_idx] is LARGE_OBJECT, then the pointer is a large object,
113
- // otherwise the kind indicates the size in granules of the objects in the
114
- // chunk.
112
+ // chunk_kinds[chunk_idx] is [FREE_] LARGE_OBJECT, then the pointer is a large
113
+ // object, otherwise the kind indicates the size in granules of the objects in
114
+ // the chunk.
115
115
struct page_header {
116
116
uint8_t chunk_kinds [CHUNKS_PER_PAGE ];
117
117
};
@@ -213,6 +213,54 @@ static void maybe_repurpose_single_chunk_large_objects_head(void) {
213
213
}
214
214
}
215
215
216
+ // If there have been any large-object frees since the last large object
217
+ // allocation, go through the freelist and merge any adjacent objects.
218
+ static int pending_large_object_compact = 0 ;
219
+ static struct large_object * *
220
+ maybe_merge_free_large_object (struct large_object * * prev ) {
221
+ struct large_object * obj = * prev ;
222
+ while (1 ) {
223
+ char * end = get_large_object_payload (obj ) + obj -> size ;
224
+ ASSERT_ALIGNED ((uintptr_t )end , CHUNK_SIZE );
225
+ unsigned chunk = get_chunk_index (end );
226
+ if (chunk < FIRST_ALLOCATABLE_CHUNK ) {
227
+ // Merging can't create a large object that newly spans the header chunk.
228
+ // This check also catches the end-of-heap case.
229
+ return prev ;
230
+ }
231
+ struct page * page = get_page (end );
232
+ if (page -> header .chunk_kinds [chunk ] != FREE_LARGE_OBJECT ) {
233
+ return prev ;
234
+ }
235
+ struct large_object * next = (struct large_object * ) end ;
236
+
237
+ struct large_object * * prev_prev = & large_objects , * walk = large_objects ;
238
+ while (1 ) {
239
+ ASSERT (walk );
240
+ if (walk == next ) {
241
+ obj -> size += LARGE_OBJECT_HEADER_SIZE + walk -> size ;
242
+ * prev_prev = walk -> next ;
243
+ if (prev == & walk -> next ) {
244
+ prev = prev_prev ;
245
+ }
246
+ break ;
247
+ }
248
+ prev_prev = & walk -> next ;
249
+ walk = walk -> next ;
250
+ }
251
+ }
252
+ }
253
+ static void
254
+ maybe_compact_free_large_objects (void ) {
255
+ if (pending_large_object_compact ) {
256
+ pending_large_object_compact = 0 ;
257
+ struct large_object * * prev = & large_objects ;
258
+ while (* prev ) {
259
+ prev = & (* maybe_merge_free_large_object (prev ))-> next ;
260
+ }
261
+ }
262
+ }
263
+
216
264
// Allocate a large object with enough space for SIZE payload bytes. Returns a
217
265
// large object with a header, aligned on a chunk boundary, whose payload size
218
266
// may be larger than SIZE, and whose total size (header included) is
@@ -226,15 +274,16 @@ static void maybe_repurpose_single_chunk_large_objects_head(void) {
226
274
// object.
227
275
static struct large_object *
228
276
allocate_large_object (size_t size ) {
277
+ maybe_compact_free_large_objects ();
229
278
struct large_object * best = NULL , * * best_prev = & large_objects ;
230
279
size_t best_size = -1 ;
231
- for (struct large_object * prev = NULL , * walk = large_objects ;
280
+ for (struct large_object * * prev = & large_objects , * walk = large_objects ;
232
281
walk ;
233
- prev = walk , walk = walk -> next ) {
282
+ prev = & walk -> next , walk = walk -> next ) {
234
283
if (walk -> size >= size && walk -> size < best_size ) {
235
284
best_size = walk -> size ;
236
285
best = walk ;
237
- if ( prev ) best_prev = & prev -> next ;
286
+ best_prev = prev ;
238
287
if (best_size + LARGE_OBJECT_HEADER_SIZE
239
288
== align (size + LARGE_OBJECT_HEADER_SIZE , CHUNK_SIZE ))
240
289
// Not going to do any better than this; just return it.
@@ -262,6 +311,8 @@ allocate_large_object(size_t size) {
262
311
ASSERT (best_size >= size_with_header );
263
312
}
264
313
314
+ allocate_chunk (get_page (best ), get_chunk_index (best ), LARGE_OBJECT );
315
+
265
316
struct large_object * next = best -> next ;
266
317
* best_prev = next ;
267
318
@@ -282,6 +333,7 @@ allocate_large_object(size_t size) {
282
333
ASSERT_ALIGNED ((uintptr_t )end , PAGE_SIZE );
283
334
size_t first_page_size = PAGE_SIZE - (((uintptr_t )start ) & PAGE_MASK );
284
335
struct large_object * head = best ;
336
+ allocate_chunk (start_page , get_chunk_index (start ), FREE_LARGE_OBJECT );
285
337
head -> size = first_page_size ;
286
338
head -> next = large_objects ;
287
339
large_objects = head ;
@@ -316,7 +368,7 @@ allocate_large_object(size_t size) {
316
368
317
369
if (tail_size ) {
318
370
struct page * page = get_page (end - tail_size );
319
- char * tail_ptr = allocate_chunk (page , tail_idx , LARGE_OBJECT );
371
+ char * tail_ptr = allocate_chunk (page , tail_idx , FREE_LARGE_OBJECT );
320
372
struct large_object * tail = (struct large_object * ) tail_ptr ;
321
373
tail -> next = large_objects ;
322
374
tail -> size = tail_size - LARGE_OBJECT_HEADER_SIZE ;
@@ -327,6 +379,7 @@ allocate_large_object(size_t size) {
327
379
}
328
380
}
329
381
382
+ ASSERT_ALIGNED ((uintptr_t )(get_large_object_payload (best ) + best -> size ), CHUNK_SIZE );
330
383
return best ;
331
384
}
332
385
@@ -401,6 +454,8 @@ free(void *ptr) {
401
454
struct large_object * obj = get_large_object (ptr );
402
455
obj -> next = large_objects ;
403
456
large_objects = obj ;
457
+ allocate_chunk (page , chunk , FREE_LARGE_OBJECT );
458
+ pending_large_object_compact = 1 ;
404
459
} else {
405
460
size_t granules = kind ;
406
461
struct freelist * * loc = get_small_object_freelist (granules );
0 commit comments