Skip to content

Commit f1359c5

Browse files
committed
implement a feature to allocate an extra word ahead of each individual object
1 parent cacb8f6 commit f1359c5

File tree

9 files changed

+188
-85
lines changed

9 files changed

+188
-85
lines changed

Cargo.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,9 @@ is_mmtk_object = ["vo_bit"]
102102
# Enable object pinning, in particular, enable pinning/unpinning, and its metadata
103103
object_pinning = []
104104

105+
# Enable allocate extra header of each individual object
106+
extra_header = []
107+
105108
# The following two features are useful for using Immix for VMs that do not support moving GC.
106109

107110
# Disable any object copying in Immix. This makes Immix a non-moving policy.

src/policy/markcompactspace.rs

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,7 @@ impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for MarkCompac
148148
}
149149

150150
impl<VM: VMBinding> MarkCompactSpace<VM> {
151+
#[cfg(not(feature = "extra_header"))]
151152
/// We need one extra header word for each object. Considering the alignment requirement, this is
152153
/// the actual bytes we need to reserve for each allocation.
153154
pub const HEADER_RESERVED_IN_BYTES: usize = if VM::MAX_ALIGNMENT > GC_EXTRA_HEADER_BYTES {
@@ -157,6 +158,18 @@ impl<VM: VMBinding> MarkCompactSpace<VM> {
157158
}
158159
.next_power_of_two();
159160

161+
#[cfg(feature = "extra_header")]
162+
/// We need one extra header word for each object. Considering the alignment requirement, this is
163+
/// the actual bytes we need to reserve for each allocation.
164+
pub const HEADER_RESERVED_IN_BYTES: usize = if VM::MAX_ALIGNMENT > GC_EXTRA_HEADER_BYTES {
165+
VM::MAX_ALIGNMENT + VM::EXTRA_HEADER_BYTES
166+
} else {
167+
GC_EXTRA_HEADER_BYTES + VM::EXTRA_HEADER_BYTES
168+
}
169+
.next_power_of_two();
170+
171+
pub const GC_EXTRA_HEADER_OFFSET: usize = Self::HEADER_RESERVED_IN_BYTES;
172+
160173
// The following are a few functions for manipulating header forwarding poiner.
161174
// Basically for each allocation request, we allocate extra bytes of [`HEADER_RESERVED_IN_BYTES`].
162175
// From the allocation result we get (e.g. `alloc_res`), `alloc_res + HEADER_RESERVED_IN_BYTES` is the cell
@@ -166,7 +179,7 @@ impl<VM: VMBinding> MarkCompactSpace<VM> {
166179

167180
/// Get the address for header forwarding pointer
168181
fn header_forwarding_pointer_address(object: ObjectReference) -> Address {
169-
object.to_object_start::<VM>() - GC_EXTRA_HEADER_BYTES
182+
object.to_object_start::<VM>() - Self::GC_EXTRA_HEADER_OFFSET
170183
}
171184

172185
/// Get header forwarding pointer for an object

src/policy/marksweepspace/native_ms/block.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -285,9 +285,12 @@ impl Block {
285285
let mut cell = self.start();
286286
let mut last = unsafe { Address::zero() };
287287
while cell + cell_size <= self.start() + Block::BYTES {
288+
#[cfg(not(feature = "extra_header"))]
288289
// The invariants we checked earlier ensures that we can use cell and object reference interchangably
289290
// We may not really have an object in this cell, but if we do, this object reference is correct.
290291
let potential_object = ObjectReference::from_raw_address(cell);
292+
#[cfg(feature = "extra_header")]
293+
let potential_object = ObjectReference::from_raw_address(cell + VM::EXTRA_HEADER_BYTES);
291294

292295
if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC
293296
.is_marked::<VM>(potential_object, Ordering::SeqCst)

src/util/alloc/bumpallocator.rs

Lines changed: 34 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -61,25 +61,21 @@ impl<VM: VMBinding> Allocator<VM> for BumpAllocator<VM> {
6161
BLOCK_SIZE
6262
}
6363

64+
#[cfg(not(feature = "extra_header"))]
6465
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
65-
trace!("alloc");
66-
let result = align_allocation_no_fill::<VM>(self.cursor, align, offset);
67-
let new_cursor = result + size;
66+
self.alloc_impl(size, align, offset)
67+
}
6868

69-
if new_cursor > self.limit {
70-
trace!("Thread local buffer used up, go to alloc slow path");
71-
self.alloc_slow(size, align, offset)
69+
#[cfg(feature = "extra_header")]
70+
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
71+
let rtn = self.alloc_impl(size + VM::EXTRA_HEADER_BYTES, align, offset);
72+
73+
// Check if the result is valid and return the actual object start address
74+
// Note that `rtn` can be null in the case of OOM
75+
if !rtn.is_zero() {
76+
rtn + VM::EXTRA_HEADER_BYTES
7277
} else {
73-
fill_alignment_gap::<VM>(self.cursor, result);
74-
self.cursor = new_cursor;
75-
trace!(
76-
"Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
77-
size,
78-
result,
79-
self.cursor,
80-
self.limit
81-
);
82-
result
78+
rtn
8379
}
8480
}
8581

@@ -151,6 +147,28 @@ impl<VM: VMBinding> BumpAllocator<VM> {
151147
}
152148
}
153149

150+
fn alloc_impl(&mut self, size: usize, align: usize, offset: usize) -> Address {
151+
trace!("alloc");
152+
let result = align_allocation_no_fill::<VM>(self.cursor, align, offset);
153+
let new_cursor = result + size;
154+
155+
if new_cursor > self.limit {
156+
trace!("Thread local buffer used up, go to alloc slow path");
157+
self.alloc_slow(size, align, offset)
158+
} else {
159+
fill_alignment_gap::<VM>(self.cursor, result);
160+
self.cursor = new_cursor;
161+
trace!(
162+
"Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
163+
size,
164+
result,
165+
self.cursor,
166+
self.limit
167+
);
168+
result
169+
}
170+
}
171+
154172
fn acquire_block(
155173
&mut self,
156174
size: usize,

src/util/alloc/free_list_allocator.rs

Lines changed: 49 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -42,41 +42,22 @@ impl<VM: VMBinding> Allocator<VM> for FreeListAllocator<VM> {
4242
self.plan
4343
}
4444

45-
// Find a block with free space and allocate to it
45+
#[cfg(not(feature = "extra_header"))]
4646
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
47-
debug_assert!(
48-
size <= MAX_BIN_SIZE,
49-
"Alloc request for {} bytes is too big.",
50-
size
51-
);
52-
debug_assert!(align <= VM::MAX_ALIGNMENT);
53-
debug_assert!(align >= VM::MIN_ALIGNMENT);
47+
self.alloc_impl(size, align, offset)
48+
}
5449

55-
if let Some(block) = self.find_free_block_local(size, align) {
56-
let cell = self.block_alloc(block);
57-
if !cell.is_zero() {
58-
// We succeeded in fastpath alloc, this cannot be precise stress test
59-
debug_assert!(
60-
!(*self.plan.options().precise_stress
61-
&& self.plan.base().is_stress_test_gc_enabled())
62-
);
50+
#[cfg(feature = "extra_header")]
51+
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
52+
let rtn = self.alloc_impl(size + VM::EXTRA_HEADER_BYTES, align, offset);
6353

64-
let res = allocator::align_allocation::<VM>(cell, align, offset);
65-
// Make sure that the allocation region is within the cell
66-
#[cfg(debug_assertions)]
67-
{
68-
let cell_size = block.load_block_cell_size();
69-
debug_assert!(
70-
res + size <= cell + cell_size,
71-
"Allocating (size = {}, align = {}, offset = {}) to the cell {} of size {}, but the end of the allocation region {} is beyond the cell end {}",
72-
size, align, offset, cell, cell_size, res + size, cell + cell_size
73-
);
74-
}
75-
return res;
76-
}
54+
// Check if the result is valid and return the actual object start address
55+
// Note that `rtn` can be null in the case of OOM
56+
if !rtn.is_zero() {
57+
rtn + VM::EXTRA_HEADER_BYTES
58+
} else {
59+
rtn
7760
}
78-
79-
self.alloc_slow(size, align, offset)
8061
}
8162

8263
fn alloc_slow_once(&mut self, size: usize, align: usize, offset: usize) -> Address {
@@ -141,6 +122,43 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
141122
}
142123
}
143124

125+
// Find a block with free space and allocate to it
126+
fn alloc_impl(&mut self, size: usize, align: usize, offset: usize) -> Address {
127+
debug_assert!(
128+
size <= MAX_BIN_SIZE,
129+
"Alloc request for {} bytes is too big.",
130+
size
131+
);
132+
debug_assert!(align <= VM::MAX_ALIGNMENT);
133+
debug_assert!(align >= VM::MIN_ALIGNMENT);
134+
135+
if let Some(block) = self.find_free_block_local(size, align) {
136+
let cell = self.block_alloc(block);
137+
if !cell.is_zero() {
138+
// We succeeded in fastpath alloc, this cannot be precise stress test
139+
debug_assert!(
140+
!(*self.plan.options().precise_stress
141+
&& self.plan.base().is_stress_test_gc_enabled())
142+
);
143+
144+
let res = allocator::align_allocation::<VM>(cell, align, offset);
145+
// Make sure that the allocation region is within the cell
146+
#[cfg(debug_assertions)]
147+
{
148+
let cell_size = block.load_block_cell_size();
149+
debug_assert!(
150+
res + size <= cell + cell_size,
151+
"Allocating (size = {}, align = {}, offset = {}) to the cell {} of size {}, but the end of the allocation region {} is beyond the cell end {}",
152+
size, align, offset, cell, cell_size, res + size, cell + cell_size
153+
);
154+
}
155+
return res;
156+
}
157+
}
158+
159+
self.alloc_slow(size, align, offset)
160+
}
161+
144162
// Find a free cell within a given block
145163
fn block_alloc(&mut self, block: Block) -> Address {
146164
let cell = block.load_free_list();

src/util/alloc/immix_allocator.rs

Lines changed: 50 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -65,41 +65,21 @@ impl<VM: VMBinding> Allocator<VM> for ImmixAllocator<VM> {
6565
crate::policy::immix::block::Block::BYTES
6666
}
6767

68+
#[cfg(not(feature = "extra_header"))]
6869
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
69-
debug_assert!(
70-
size <= crate::policy::immix::MAX_IMMIX_OBJECT_SIZE,
71-
"Trying to allocate a {} bytes object, which is larger than MAX_IMMIX_OBJECT_SIZE {}",
72-
size,
73-
crate::policy::immix::MAX_IMMIX_OBJECT_SIZE
74-
);
75-
let result = align_allocation_no_fill::<VM>(self.cursor, align, offset);
76-
let new_cursor = result + size;
70+
self.alloc_impl(size, align, offset)
71+
}
7772

78-
if new_cursor > self.limit {
79-
trace!(
80-
"{:?}: Thread local buffer used up, go to alloc slow path",
81-
self.tls
82-
);
83-
if get_maximum_aligned_size::<VM>(size, align) > Line::BYTES {
84-
// Size larger than a line: do large allocation
85-
self.overflow_alloc(size, align, offset)
86-
} else {
87-
// Size smaller than a line: fit into holes
88-
self.alloc_slow_hot(size, align, offset)
89-
}
73+
#[cfg(feature = "extra_header")]
74+
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
75+
let rtn = self.alloc_impl(size + VM::EXTRA_HEADER_BYTES, align, offset);
76+
77+
// Check if the result is valid and return the actual object start address
78+
// Note that `rtn` can be null in the case of OOM
79+
if !rtn.is_zero() {
80+
rtn + VM::EXTRA_HEADER_BYTES
9081
} else {
91-
// Simple bump allocation.
92-
fill_alignment_gap::<VM>(self.cursor, result);
93-
self.cursor = new_cursor;
94-
trace!(
95-
"{:?}: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
96-
self.tls,
97-
size,
98-
result,
99-
self.cursor,
100-
self.limit
101-
);
102-
result
82+
rtn
10383
}
10484
}
10585

@@ -194,6 +174,44 @@ impl<VM: VMBinding> ImmixAllocator<VM> {
194174
self.space
195175
}
196176

177+
fn alloc_impl(&mut self, size: usize, align: usize, offset: usize) -> Address {
178+
debug_assert!(
179+
size <= crate::policy::immix::MAX_IMMIX_OBJECT_SIZE,
180+
"Trying to allocate a {} bytes object, which is larger than MAX_IMMIX_OBJECT_SIZE {}",
181+
size,
182+
crate::policy::immix::MAX_IMMIX_OBJECT_SIZE
183+
);
184+
let result = align_allocation_no_fill::<VM>(self.cursor, align, offset);
185+
let new_cursor = result + size;
186+
187+
if new_cursor > self.limit {
188+
trace!(
189+
"{:?}: Thread local buffer used up, go to alloc slow path",
190+
self.tls
191+
);
192+
if get_maximum_aligned_size::<VM>(size, align) > Line::BYTES {
193+
// Size larger than a line: do large allocation
194+
self.overflow_alloc(size, align, offset)
195+
} else {
196+
// Size smaller than a line: fit into holes
197+
self.alloc_slow_hot(size, align, offset)
198+
}
199+
} else {
200+
// Simple bump allocation.
201+
fill_alignment_gap::<VM>(self.cursor, result);
202+
self.cursor = new_cursor;
203+
trace!(
204+
"{:?}: Bump allocation size: {}, result: {}, new_cursor: {}, limit: {}",
205+
self.tls,
206+
size,
207+
result,
208+
self.cursor,
209+
self.limit
210+
);
211+
result
212+
}
213+
}
214+
197215
/// Large-object (larger than a line) bump allocation.
198216
fn overflow_alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
199217
trace!("{:?}: overflow_alloc", self.tls);

src/util/alloc/large_object_allocator.rs

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,18 @@ impl<VM: VMBinding> Allocator<VM> for LargeObjectAllocator<VM> {
3434
false
3535
}
3636

37+
#[cfg(not(feature = "extra_header"))]
3738
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
38-
let cell: Address = self.alloc_slow(size, align, offset);
39-
// We may get a null ptr from alloc due to the VM being OOM
40-
if !cell.is_zero() {
41-
allocator::align_allocation::<VM>(cell, align, offset)
39+
self.alloc_impl(size, align, offset)
40+
}
41+
42+
#[cfg(feature = "extra_header")]
43+
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
44+
let rtn = self.alloc_impl(size + VM::EXTRA_HEADER_BYTES, align, offset);
45+
if !rtn.is_zero() {
46+
rtn + VM::EXTRA_HEADER_BYTES
4247
} else {
43-
cell
48+
rtn
4449
}
4550
}
4651

@@ -65,4 +70,14 @@ impl<VM: VMBinding> LargeObjectAllocator<VM> {
6570
) -> Self {
6671
LargeObjectAllocator { tls, space, plan }
6772
}
73+
74+
fn alloc_impl(&mut self, size: usize, align: usize, offset: usize) -> Address {
75+
let cell: Address = self.alloc_slow(size, align, offset);
76+
// We may get a null ptr from alloc due to the VM being OOM
77+
if !cell.is_zero() {
78+
allocator::align_allocation::<VM>(cell, align, offset)
79+
} else {
80+
cell
81+
}
82+
}
6883
}

src/util/alloc/malloc_allocator.rs

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,12 @@ impl<VM: VMBinding> Allocator<VM> for MallocAllocator<VM> {
2525
self.plan
2626
}
2727

28+
#[cfg(feature = "extra_header")]
29+
fn alloc(&mut self, _size: usize, _align: usize, _offset: usize) -> Address {
30+
unimplemented!()
31+
}
32+
33+
#[cfg(not(feature = "extra_header"))]
2834
fn alloc(&mut self, size: usize, align: usize, offset: usize) -> Address {
2935
self.alloc_slow(size, align, offset)
3036
}

src/vm/mod.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,4 +71,13 @@ where
7171
/// Note that MMTk does not attempt to do anything to align the cursor to this value, but
7272
/// it merely asserts with this constant.
7373
const ALLOC_END_ALIGNMENT: usize = 1;
74+
75+
#[cfg(feature = "extra_header")]
76+
const EXTRA_HEADER_BYTES: usize =
77+
if Self::MAX_ALIGNMENT > crate::util::constants::BYTES_IN_WORD {
78+
Self::MAX_ALIGNMENT
79+
} else {
80+
crate::util::constants::BYTES_IN_WORD
81+
}
82+
.next_power_of_two();
7483
}

0 commit comments

Comments
 (0)