Skip to content

Commit 912e832

Browse files
committed
Add scratch region
This adds a second region of guest physical memory, which is intended to become the only mutable region of memory, but which is currently unused. Signed-off-by: Lucy Menon <168595099+syntactically@users.noreply.github.com>
1 parent 8d7a926 commit 912e832

File tree

12 files changed

+245
-23
lines changed

12 files changed

+245
-23
lines changed

src/hyperlight_common/src/arch/amd64/layout.rs

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,14 @@ See the License for the specific language governing permissions and
1414
limitations under the License.
1515
*/
1616

17-
// Keep in mind that the minimum upper half GVA is 0xffff_8000_0000_0000
18-
pub const SNAPSHOT_PT_GVA: usize = 0xffff_ff00_0000_0000;
17+
/// We have this the top of the page below the top of memory in order
18+
/// to make working with start/end ptrs in a few places more
19+
/// convenient (not needing to worry about overflow)
20+
pub const MAX_GVA: usize = 0xffff_ffff_ffff_efff;
21+
pub const SNAPSHOT_PT_GVA: usize = 0xffff_8000_0000_0000;
22+
23+
/// We assume 36-bit IPAs for now, since every amd64 processor
24+
/// supports at least 36 bits. Almost all of them support at least 40
25+
/// bits, so we could consider bumping this in the future if we were
26+
/// ever memory-constrained.
27+
pub const MAX_GPA: usize = 0x0000_00ff_ffff_ffff;

src/hyperlight_common/src/arch/amd64/vmem.rs

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,15 @@ impl<const HIGH_BIT: u8, const LOW_BIT: u8, Op: TableOps> Iterator
145145
let next_vmin = if self.n == 0 {
146146
self.request.vmin
147147
} else {
148-
// Align to the next boundary by adding one entry's worth and masking off lower bits
149-
(self.request.vmin + (self.n << LOW_BIT)) & !lower_bits_mask
148+
// Align to the next boundary by adding one entry's worth
149+
// and masking off lower bits. Masking off before adding
150+
// is safe, since n << LOW_BIT must always have zeros in
151+
// these positions.
152+
let aligned_min = self.request.vmin & !lower_bits_mask;
153+
// Use checked_add here because going past the end of the
154+
// address space counts as "the next one would be out of
155+
// range"
156+
aligned_min.checked_add(self.n << LOW_BIT)?
150157
};
151158

152159
// Check if we've processed the entire requested range

src/hyperlight_common/src/layout.rs

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,21 @@ mod arch;
2121

2222
// The constraint on the feature is temporary and will be removed when other arch i686 is added
2323
#[cfg(feature = "init-paging")]
24-
pub use arch::SNAPSHOT_PT_GVA;
24+
pub use arch::MAX_GPA;
25+
#[cfg(feature = "init-paging")]
26+
pub use arch::{MAX_GVA, SNAPSHOT_PT_GVA};
27+
28+
// offsets down from the top of scratch memory for various things
29+
pub const SCRATCH_TOP_SIZE_OFFSET: u64 = 0x08;
30+
pub const SCRATCH_TOP_USED_OFFSET: u64 = 0x10;
31+
pub const SCRATCH_TOP_ALLOCATOR_OFFSET: u64 = 0x18;
32+
pub const SCRATCH_TOP_EXN_STACK_OFFSET: u64 = 0x20;
33+
34+
#[cfg(feature = "init-paging")]
35+
pub fn scratch_base_gpa(size: usize) -> u64 {
36+
(MAX_GPA - size + 1) as u64
37+
}
38+
#[cfg(feature = "init-paging")]
39+
pub fn scratch_base_gva(size: usize) -> u64 {
40+
(MAX_GVA - size + 1) as u64
41+
}

src/hyperlight_host/src/hypervisor/hyperlight_vm.rs

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ use crate::hypervisor::{InterruptHandle, InterruptHandleImpl, get_max_log_level}
5454
use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType};
5555
use crate::mem::mgr::SandboxMemoryManager;
5656
use crate::mem::ptr::{GuestPtr, RawPtr};
57-
use crate::mem::shared_mem::HostSharedMemory;
57+
use crate::mem::shared_mem::{GuestSharedMemory, HostSharedMemory, SharedMemory};
5858
use crate::metrics::{METRIC_ERRONEOUS_VCPU_KICKS, METRIC_GUEST_CANCELLATION};
5959
use crate::sandbox::SandboxConfiguration;
6060
use crate::sandbox::host_funcs::FunctionRegistry;
@@ -85,6 +85,10 @@ pub(crate) struct HyperlightVm {
8585
mmap_regions: Vec<(u32, MemoryRegion)>, // Later mapped regions (slot number, region)
8686
next_slot: u32, // Monotonically increasing slot number
8787
freed_slots: Vec<u32>, // Reusable slots from unmapped regions
88+
scratch_slot: u32, // The slot number used for the scratch region
89+
// The current scratch region, used to keep it alive as long as it
90+
// is used & when unmapping
91+
scratch_memory: Option<GuestSharedMemory>,
8892

8993
#[cfg(gdb)]
9094
gdb_conn: Option<DebugCommChannel<DebugResponse, DebugMsg>>,
@@ -172,6 +176,7 @@ impl HyperlightVm {
172176
}),
173177
});
174178

179+
let scratch_slot = mem_regions.len() as u32;
175180
#[cfg_attr(not(gdb), allow(unused_mut))]
176181
let mut ret = Self {
177182
vm,
@@ -180,10 +185,12 @@ impl HyperlightVm {
180185
interrupt_handle,
181186
page_size: 0, // Will be set in `initialise`
182187

183-
next_slot: mem_regions.len() as u32,
188+
next_slot: scratch_slot + 1,
184189
sandbox_regions: mem_regions,
185190
mmap_regions: Vec::new(),
186191
freed_slots: Vec::new(),
192+
scratch_slot,
193+
scratch_memory: None,
187194

188195
#[cfg(gdb)]
189196
gdb_conn,
@@ -310,6 +317,21 @@ impl HyperlightVm {
310317
self.mmap_regions.iter().map(|(_, region)| region)
311318
}
312319

320+
/// Update the scratch mapping to point to a new GuestSharedMemory
321+
pub(crate) fn update_scratch_mapping(&mut self, scratch: GuestSharedMemory) -> Result<()> {
322+
let guest_base = hyperlight_common::layout::scratch_base_gpa(scratch.mem_size());
323+
let rgn = scratch.mapping_at(guest_base, MemoryRegionType::Scratch);
324+
325+
if let Some(old_scratch) = self.scratch_memory.replace(scratch) {
326+
let old_base = hyperlight_common::layout::scratch_base_gpa(old_scratch.mem_size());
327+
let old_rgn = old_scratch.mapping_at(old_base, MemoryRegionType::Scratch);
328+
self.vm.unmap_memory((self.scratch_slot, &old_rgn))?;
329+
}
330+
unsafe { self.vm.map_memory((self.scratch_slot, &rgn))? };
331+
332+
Ok(())
333+
}
334+
313335
/// Dispatch a call from the host to the guest using the given pointer
314336
/// to the dispatch function _in the guest's address space_.
315337
///

src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,21 @@ impl KvmVm {
7878
let vm_fd = hv.create_vm_with_type(0)?;
7979
let vcpu_fd = vm_fd.create_vcpu(0)?;
8080

81+
// Set the CPUID leaf for MaxPhysAddr. KVM allows this to
82+
// easily be overriden by the hypervisor and defaults it very
83+
// low, while mshv passes it through from hardware unless an
84+
// intercept is installed.
85+
let mut kvm_cpuid = hv
86+
.get_supported_cpuid(kvm_bindings::KVM_MAX_CPUID_ENTRIES)
87+
.unwrap();
88+
for entry in kvm_cpuid.as_mut_slice().iter_mut() {
89+
if entry.function == 0x8000_0008 {
90+
entry.eax &= !0xff;
91+
entry.eax |= hyperlight_common::layout::MAX_GPA.ilog2() + 1;
92+
}
93+
}
94+
vcpu_fd.set_cpuid2(&kvm_cpuid)?;
95+
8196
Ok(Self {
8297
vm_fd,
8398
vcpu_fd,

src/hyperlight_host/src/mem/layout.rs

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,10 @@ pub(crate) struct SandboxMemoryLayout {
126126
// The offset in the sandbox memory where the code starts
127127
guest_code_offset: usize,
128128
pub(crate) init_data_permissions: Option<MemoryRegionFlags>,
129+
130+
// The size of the scratch region in physical memory; note that
131+
// this will appear under the top of physical memory.
132+
scratch_size: usize,
129133
}
130134

131135
impl Debug for SandboxMemoryLayout {
@@ -202,6 +206,10 @@ impl Debug for SandboxMemoryLayout {
202206
"Guest Code Offset",
203207
&format_args!("{:#x}", self.guest_code_offset),
204208
)
209+
.field(
210+
"Scratch region size",
211+
&format_args!("{:#x}", self.scratch_size),
212+
)
205213
.finish()
206214
}
207215
}
@@ -229,6 +237,7 @@ impl SandboxMemoryLayout {
229237
code_size: usize,
230238
stack_size: usize,
231239
heap_size: usize,
240+
scratch_size: usize,
232241
init_data_size: usize,
233242
init_data_permissions: Option<MemoryRegionFlags>,
234243
) -> Result<Self> {
@@ -295,6 +304,7 @@ impl SandboxMemoryLayout {
295304
init_data_permissions,
296305
pt_offset,
297306
pt_size: None,
307+
scratch_size,
298308
})
299309
}
300310

@@ -324,6 +334,11 @@ impl SandboxMemoryLayout {
324334
self.stack_size
325335
}
326336

337+
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
338+
pub(super) fn get_scratch_size(&self) -> usize {
339+
self.scratch_size
340+
}
341+
327342
/// Get the offset in guest memory to the output data pointer.
328343
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
329344
fn get_output_data_pointer_offset(&self) -> usize {
@@ -818,7 +833,7 @@ mod tests {
818833
fn test_get_memory_size() {
819834
let sbox_cfg = SandboxConfiguration::default();
820835
let sbox_mem_layout =
821-
SandboxMemoryLayout::new(sbox_cfg, 4096, 2048, 4096, 0, None).unwrap();
836+
SandboxMemoryLayout::new(sbox_cfg, 4096, 2048, 4096, 0x3000, 0, None).unwrap();
822837
assert_eq!(
823838
sbox_mem_layout.get_memory_size().unwrap(),
824839
get_expected_memory_size(&sbox_mem_layout)

src/hyperlight_host/src/mem/memory_region.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,8 @@ pub enum MemoryRegionType {
138138
GuardPage,
139139
/// The region contains the Stack
140140
Stack,
141+
/// The scratch region
142+
Scratch,
141143
}
142144

143145
/// A trait that distinguishes between different kinds of memory region representations.

src/hyperlight_host/src/mem/mgr.rs

Lines changed: 40 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ pub(crate) const STACK_COOKIE_LEN: usize = 16;
4545
pub(crate) struct SandboxMemoryManager<S> {
4646
/// Shared memory for the Sandbox
4747
pub(crate) shared_mem: S,
48+
/// Scratch memory for the Sandbox
49+
pub(crate) scratch_mem: S,
4850
/// The memory layout of the underlying shared memory
4951
pub(crate) layout: SandboxMemoryLayout,
5052
/// Pointer to where to load memory from
@@ -145,13 +147,15 @@ where
145147
pub(crate) fn new(
146148
layout: SandboxMemoryLayout,
147149
shared_mem: S,
150+
scratch_mem: S,
148151
load_addr: RawPtr,
149152
entrypoint_offset: Option<Offset>,
150153
stack_cookie: [u8; STACK_COOKIE_LEN],
151154
) -> Self {
152155
Self {
153156
layout,
154157
shared_mem,
158+
scratch_mem,
155159
load_addr,
156160
entrypoint_offset,
157161
mapped_rgns: 0,
@@ -191,26 +195,22 @@ where
191195
mapped_regions,
192196
)
193197
}
194-
195-
/// This function restores a memory snapshot from a given snapshot.
196-
pub(crate) fn restore_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
197-
self.shared_mem.restore_from_snapshot(snapshot)?;
198-
Ok(())
199-
}
200198
}
201199

202200
impl SandboxMemoryManager<ExclusiveSharedMemory> {
203201
pub(crate) fn from_snapshot(s: &Snapshot) -> Result<Self> {
204202
let layout = *s.layout();
205203
let mut shared_mem = ExclusiveSharedMemory::new(s.mem_size())?;
206204
shared_mem.copy_from_slice(s.memory(), 0)?;
205+
let scratch_mem = ExclusiveSharedMemory::new(s.layout().get_scratch_size())?;
207206
let load_addr: RawPtr = RawPtr::try_from(layout.get_guest_code_address())?;
208207
let stack_cookie = rand::random::<[u8; STACK_COOKIE_LEN]>();
209208
let entrypoint_gva = s.preinitialise();
210209
let entrypoint_offset = entrypoint_gva.map(|x| (x - u64::from(&load_addr)).into());
211210
Ok(Self::new(
212211
layout,
213212
shared_mem,
213+
scratch_mem,
214214
load_addr,
215215
entrypoint_offset,
216216
stack_cookie,
@@ -236,9 +236,11 @@ impl SandboxMemoryManager<ExclusiveSharedMemory> {
236236
SandboxMemoryManager<GuestSharedMemory>,
237237
) {
238238
let (hshm, gshm) = self.shared_mem.build();
239+
let (hscratch, gscratch) = self.scratch_mem.build();
239240
(
240241
SandboxMemoryManager {
241242
shared_mem: hshm,
243+
scratch_mem: hscratch,
242244
layout: self.layout,
243245
load_addr: self.load_addr.clone(),
244246
entrypoint_offset: self.entrypoint_offset,
@@ -248,6 +250,7 @@ impl SandboxMemoryManager<ExclusiveSharedMemory> {
248250
},
249251
SandboxMemoryManager {
250252
shared_mem: gshm,
253+
scratch_mem: gscratch,
251254
layout: self.layout,
252255
load_addr: self.load_addr.clone(),
253256
entrypoint_offset: self.entrypoint_offset,
@@ -382,6 +385,37 @@ impl SandboxMemoryManager<HostSharedMemory> {
382385
};
383386
}
384387
}
388+
389+
/// This function restores a memory snapshot from a given snapshot.
390+
pub(crate) fn restore_snapshot(
391+
&mut self,
392+
snapshot: &Snapshot,
393+
) -> Result<Option<GuestSharedMemory>> {
394+
if self.shared_mem.mem_size() != snapshot.mem_size() {
395+
return Err(new_error!(
396+
"Snapshot size does not match current memory size: {} != {}",
397+
self.shared_mem.raw_mem_size(),
398+
snapshot.mem_size()
399+
));
400+
}
401+
self.shared_mem.restore_from_snapshot(snapshot)?;
402+
let new_scratch_size = snapshot.layout().get_scratch_size();
403+
if new_scratch_size == self.scratch_mem.mem_size() {
404+
self.scratch_mem.zero()?;
405+
Ok(None)
406+
} else {
407+
let new_scratch_mem = ExclusiveSharedMemory::new(new_scratch_size)?;
408+
let (hscratch, gscratch) = new_scratch_mem.build();
409+
// Even though this destroys the reference to the host
410+
// side of the old scratch mapping, the VM should still
411+
// own the reference to the guest side of the old scratch
412+
// mapping, so it won't actually be deallocated until it
413+
// has been unmapped from the VM.
414+
self.scratch_mem = hscratch;
415+
416+
Ok(Some(gscratch))
417+
}
418+
}
385419
}
386420

387421
#[cfg(test)]

0 commit comments

Comments
 (0)