Skip to content

Commit e3c479f

Browse files
committed
Add large object compaction
1 parent f6c7d43 commit e3c479f

File tree

1 file changed

+63
-8
lines changed

1 file changed

+63
-8
lines changed

walloc.c

Lines changed: 63 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ enum chunk_kind {
7979
#undef DEFINE_SMALL_OBJECT_CHUNK_KIND
8080

8181
SMALL_OBJECT_CHUNK_KINDS,
82-
FREE_CHUNK = 254,
82+
FREE_LARGE_OBJECT = 254,
8383
LARGE_OBJECT = 255
8484
};
8585

@@ -109,9 +109,9 @@ static unsigned chunk_kind_to_granules(enum chunk_kind kind) {
109109

110110
// Given a pointer P returned by malloc(), we get a header pointer via
111111
// P&~PAGE_MASK, and a chunk index via (P&PAGE_MASK)/CHUNKS_PER_PAGE. If
112-
// chunk_kinds[chunk_idx] is LARGE_OBJECT, then the pointer is a large object,
113-
// otherwise the kind indicates the size in granules of the objects in the
114-
// chunk.
112+
// chunk_kinds[chunk_idx] is [FREE_]LARGE_OBJECT, then the pointer is a large
113+
// object, otherwise the kind indicates the size in granules of the objects in
114+
// the chunk.
115115
struct page_header {
116116
uint8_t chunk_kinds[CHUNKS_PER_PAGE];
117117
};
@@ -213,6 +213,54 @@ static void maybe_repurpose_single_chunk_large_objects_head(void) {
213213
}
214214
}
215215

216+
// If there have been any large-object frees since the last large object
217+
// allocation, go through the freelist and merge any adjacent objects.
218+
static int pending_large_object_compact = 0;
219+
static struct large_object**
220+
maybe_merge_free_large_object(struct large_object** prev) {
221+
struct large_object *obj = *prev;
222+
while (1) {
223+
char *end = get_large_object_payload(obj) + obj->size;
224+
ASSERT_ALIGNED((uintptr_t)end, CHUNK_SIZE);
225+
unsigned chunk = get_chunk_index(end);
226+
if (chunk < FIRST_ALLOCATABLE_CHUNK) {
227+
// Merging can't create a large object that newly spans the header chunk.
228+
// This check also catches the end-of-heap case.
229+
return prev;
230+
}
231+
struct page *page = get_page(end);
232+
if (page->header.chunk_kinds[chunk] != FREE_LARGE_OBJECT) {
233+
return prev;
234+
}
235+
struct large_object *next = (struct large_object*) end;
236+
237+
struct large_object **prev_prev = &large_objects, *walk = large_objects;
238+
while (1) {
239+
ASSERT(walk);
240+
if (walk == next) {
241+
obj->size += LARGE_OBJECT_HEADER_SIZE + walk->size;
242+
*prev_prev = walk->next;
243+
if (prev == &walk->next) {
244+
prev = prev_prev;
245+
}
246+
break;
247+
}
248+
prev_prev = &walk->next;
249+
walk = walk->next;
250+
}
251+
}
252+
}
253+
static void
254+
maybe_compact_free_large_objects(void) {
255+
if (pending_large_object_compact) {
256+
pending_large_object_compact = 0;
257+
struct large_object **prev = &large_objects;
258+
while (*prev) {
259+
prev = &(*maybe_merge_free_large_object(prev))->next;
260+
}
261+
}
262+
}
263+
216264
// Allocate a large object with enough space for SIZE payload bytes. Returns a
217265
// large object with a header, aligned on a chunk boundary, whose payload size
218266
// may be larger than SIZE, and whose total size (header included) is
@@ -226,15 +274,16 @@ static void maybe_repurpose_single_chunk_large_objects_head(void) {
226274
// object.
227275
static struct large_object*
228276
allocate_large_object(size_t size) {
277+
maybe_compact_free_large_objects();
229278
struct large_object *best = NULL, **best_prev = &large_objects;
230279
size_t best_size = -1;
231-
for (struct large_object *prev = NULL, *walk = large_objects;
280+
for (struct large_object **prev = &large_objects, *walk = large_objects;
232281
walk;
233-
prev = walk, walk = walk->next) {
282+
prev = &walk->next, walk = walk->next) {
234283
if (walk->size >= size && walk->size < best_size) {
235284
best_size = walk->size;
236285
best = walk;
237-
if (prev) best_prev = &prev->next;
286+
best_prev = prev;
238287
if (best_size + LARGE_OBJECT_HEADER_SIZE
239288
== align(size + LARGE_OBJECT_HEADER_SIZE, CHUNK_SIZE))
240289
// Not going to do any better than this; just return it.
@@ -262,6 +311,8 @@ allocate_large_object(size_t size) {
262311
ASSERT(best_size >= size_with_header);
263312
}
264313

314+
allocate_chunk(get_page(best), get_chunk_index(best), LARGE_OBJECT);
315+
265316
struct large_object *next = best->next;
266317
*best_prev = next;
267318

@@ -282,6 +333,7 @@ allocate_large_object(size_t size) {
282333
ASSERT_ALIGNED((uintptr_t)end, PAGE_SIZE);
283334
size_t first_page_size = PAGE_SIZE - (((uintptr_t)start) & PAGE_MASK);
284335
struct large_object *head = best;
336+
allocate_chunk(start_page, get_chunk_index(start), FREE_LARGE_OBJECT);
285337
head->size = first_page_size;
286338
head->next = large_objects;
287339
large_objects = head;
@@ -316,7 +368,7 @@ allocate_large_object(size_t size) {
316368

317369
if (tail_size) {
318370
struct page *page = get_page(end - tail_size);
319-
char *tail_ptr = allocate_chunk(page, tail_idx, LARGE_OBJECT);
371+
char *tail_ptr = allocate_chunk(page, tail_idx, FREE_LARGE_OBJECT);
320372
struct large_object *tail = (struct large_object *) tail_ptr;
321373
tail->next = large_objects;
322374
tail->size = tail_size - LARGE_OBJECT_HEADER_SIZE;
@@ -327,6 +379,7 @@ allocate_large_object(size_t size) {
327379
}
328380
}
329381

382+
ASSERT_ALIGNED((uintptr_t)(get_large_object_payload(best) + best->size), CHUNK_SIZE);
330383
return best;
331384
}
332385

@@ -401,6 +454,8 @@ free(void *ptr) {
401454
struct large_object *obj = get_large_object(ptr);
402455
obj->next = large_objects;
403456
large_objects = obj;
457+
allocate_chunk(page, chunk, FREE_LARGE_OBJECT);
458+
pending_large_object_compact = 1;
404459
} else {
405460
size_t granules = kind;
406461
struct freelist **loc = get_small_object_freelist(granules);

0 commit comments

Comments
 (0)