diff --git a/src/interpreter.rs b/src/interpreter.rs index 33a406226..ac290ece3 100644 --- a/src/interpreter.rs +++ b/src/interpreter.rs @@ -5,21 +5,32 @@ // Copyright 2016 6WIND S.A. // (Translation to Rust, MetaBuff/multiple classes addition, hashmaps for helpers) -use ebpf; use crate::lib::*; +use ebpf; -fn check_mem(addr: u64, len: usize, access_type: &str, insn_ptr: usize, - mbuff: &[u8], mem: &[u8], stack: &[u8]) -> Result<(), Error> { +fn check_mem( + addr: u64, + len: usize, + access_type: &str, + insn_ptr: usize, + mbuff: &[u8], + mem: &[u8], + stack: &[u8], + allowed: &HashSet, +) -> Result<(), Error> { if let Some(addr_end) = addr.checked_add(len as u64) { - if mbuff.as_ptr() as u64 <= addr && addr_end <= mbuff.as_ptr() as u64 + mbuff.len() as u64 { - return Ok(()) - } - if mem.as_ptr() as u64 <= addr && addr_end <= mem.as_ptr() as u64 + mem.len() as u64 { - return Ok(()) - } - if stack.as_ptr() as u64 <= addr && addr_end <= stack.as_ptr() as u64 + stack.len() as u64 { - return Ok(()) - } + if mbuff.as_ptr() as u64 <= addr && addr_end <= mbuff.as_ptr() as u64 + mbuff.len() as u64 { + return Ok(()); + } + if mem.as_ptr() as u64 <= addr && addr_end <= mem.as_ptr() as u64 + mem.len() as u64 { + return Ok(()); + } + if stack.as_ptr() as u64 <= addr && addr_end <= stack.as_ptr() as u64 + stack.len() as u64 { + return Ok(()); + } + if allowed.contains(&addr) { + return Ok(()); + } } Err(Error::new(ErrorKind::Other, format!( @@ -33,37 +44,54 @@ fn check_mem(addr: u64, len: usize, access_type: &str, insn_ptr: usize, #[allow(unknown_lints)] #[allow(cyclomatic_complexity)] -pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers: &HashMap) -> Result { +pub fn execute_program( + prog_: Option<&[u8]>, + mem: &[u8], + mbuff: &[u8], + helpers: &HashMap, + allowed: &HashSet, +) -> Result { const U32MAX: u64 = u32::MAX as u64; const SHIFT_MASK_64: u64 = 0x3f; let prog = match prog_ { Some(prog) => prog, - None => Err(Error::new(ErrorKind::Other, - "Error: No program set, call prog_set() to load one"))?, + None => Err(Error::new( + ErrorKind::Other, + "Error: No program set, call prog_set() to load one", + ))?, }; - let stack = vec![0u8;ebpf::STACK_SIZE]; + let stack = vec![0u8; ebpf::STACK_SIZE]; // R1 points to beginning of memory area, R10 to stack - let mut reg: [u64;11] = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, stack.as_ptr() as u64 + stack.len() as u64 + let mut reg: [u64; 11] = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + stack.as_ptr() as u64 + stack.len() as u64, ]; if !mbuff.is_empty() { reg[1] = mbuff.as_ptr() as u64; - } - else if !mem.is_empty() { + } else if !mem.is_empty() { reg[1] = mem.as_ptr() as u64; } - let check_mem_load = | addr: u64, len: usize, insn_ptr: usize | { - check_mem(addr, len, "load", insn_ptr, mbuff, mem, &stack) + let check_mem_load = |addr: u64, len: usize, insn_ptr: usize| { + check_mem(addr, len, "load", insn_ptr, mbuff, mem, &stack, allowed) }; - let check_mem_store = | addr: u64, len: usize, insn_ptr: usize | { - check_mem(addr, len, "store", insn_ptr, mbuff, mem, &stack) + let check_mem_store = |addr: u64, len: usize, insn_ptr: usize| { + check_mem(addr, len, "store", insn_ptr, mbuff, mem, &stack, allowed) }; // Loop on instructions - let mut insn_ptr:usize = 0; + let mut insn_ptr: usize = 0; while insn_ptr * ebpf::INSN_SIZE < prog.len() { let insn = ebpf::get_insn(prog, insn_ptr); insn_ptr += 1; @@ -75,103 +103,130 @@ pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers: }; match insn.opc { - // BPF_LD class // LD_ABS_* and LD_IND_* are supposed to load pointer to data from metadata buffer. // Since this pointer is constant, and since we already know it (mem), do not // bother re-fetching it, just use mem already. - ebpf::LD_ABS_B => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_ABS_H => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_ABS_W => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_ABS_DW => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() - }, - ebpf::LD_IND_B => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_IND_H => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_IND_W => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_IND_DW => reg[0] = unsafe { - let x = (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() - }, + ebpf::LD_ABS_B => { + reg[0] = unsafe { + let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u8; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_ABS_H => { + reg[0] = unsafe { + let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u16; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_ABS_W => { + reg[0] = unsafe { + let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u32; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_ABS_DW => { + reg[0] = unsafe { + let x = (mem.as_ptr() as u64 + (insn.imm as u32) as u64) as *const u64; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() + } + } + ebpf::LD_IND_B => { + reg[0] = unsafe { + let x = + (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u8; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_IND_H => { + reg[0] = unsafe { + let x = + (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u16; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_IND_W => { + reg[0] = unsafe { + let x = + (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u32; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_IND_DW => { + reg[0] = unsafe { + let x = + (mem.as_ptr() as u64 + reg[_src] + (insn.imm as u32) as u64) as *const u64; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() + } + } - ebpf::LD_DW_IMM => { + ebpf::LD_DW_IMM => { let next_insn = ebpf::get_insn(prog, insn_ptr); insn_ptr += 1; reg[_dst] = ((insn.imm as u32) as u64) + ((next_insn.imm as u64) << 32); - }, + } // BPF_LDX class - ebpf::LD_B_REG => reg[_dst] = unsafe { - #[allow(clippy::cast_ptr_alignment)] - let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u8; - check_mem_load(x as u64, 1, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_H_REG => reg[_dst] = unsafe { - #[allow(clippy::cast_ptr_alignment)] - let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16; - check_mem_load(x as u64, 2, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_W_REG => reg[_dst] = unsafe { - #[allow(clippy::cast_ptr_alignment)] - let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32; - check_mem_load(x as u64, 4, insn_ptr)?; - x.read_unaligned() as u64 - }, - ebpf::LD_DW_REG => reg[_dst] = unsafe { - #[allow(clippy::cast_ptr_alignment)] - let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64; - check_mem_load(x as u64, 8, insn_ptr)?; - x.read_unaligned() - }, + ebpf::LD_B_REG => { + reg[_dst] = unsafe { + #[allow(clippy::cast_ptr_alignment)] + let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u8; + check_mem_load(x as u64, 1, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_H_REG => { + reg[_dst] = unsafe { + #[allow(clippy::cast_ptr_alignment)] + let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u16; + check_mem_load(x as u64, 2, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_W_REG => { + reg[_dst] = unsafe { + #[allow(clippy::cast_ptr_alignment)] + let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u32; + check_mem_load(x as u64, 4, insn_ptr)?; + x.read_unaligned() as u64 + } + } + ebpf::LD_DW_REG => { + reg[_dst] = unsafe { + #[allow(clippy::cast_ptr_alignment)] + let x = (reg[_src] as *const u8).offset(insn.off as isize) as *const u64; + check_mem_load(x as u64, 8, insn_ptr)?; + x.read_unaligned() + } + } // BPF_ST class - ebpf::ST_B_IMM => unsafe { + ebpf::ST_B_IMM => unsafe { let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8; check_mem_store(x as u64, 1, insn_ptr)?; x.write_unaligned(insn.imm as u8); }, - ebpf::ST_H_IMM => unsafe { + ebpf::ST_H_IMM => unsafe { #[allow(clippy::cast_ptr_alignment)] let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16; check_mem_store(x as u64, 2, insn_ptr)?; x.write_unaligned(insn.imm as u16); }, - ebpf::ST_W_IMM => unsafe { + ebpf::ST_W_IMM => unsafe { #[allow(clippy::cast_ptr_alignment)] let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32; check_mem_store(x as u64, 4, insn_ptr)?; x.write_unaligned(insn.imm as u32); }, - ebpf::ST_DW_IMM => unsafe { + ebpf::ST_DW_IMM => unsafe { #[allow(clippy::cast_ptr_alignment)] let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64; check_mem_store(x as u64, 8, insn_ptr)?; @@ -179,178 +234,375 @@ pub fn execute_program(prog_: Option<&[u8]>, mem: &[u8], mbuff: &[u8], helpers: }, // BPF_STX class - ebpf::ST_B_REG => unsafe { + ebpf::ST_B_REG => unsafe { let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u8; check_mem_store(x as u64, 1, insn_ptr)?; x.write_unaligned(reg[_src] as u8); }, - ebpf::ST_H_REG => unsafe { + ebpf::ST_H_REG => unsafe { #[allow(clippy::cast_ptr_alignment)] let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u16; check_mem_store(x as u64, 2, insn_ptr)?; x.write_unaligned(reg[_src] as u16); }, - ebpf::ST_W_REG => unsafe { + ebpf::ST_W_REG => unsafe { #[allow(clippy::cast_ptr_alignment)] let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u32; check_mem_store(x as u64, 4, insn_ptr)?; x.write_unaligned(reg[_src] as u32); }, - ebpf::ST_DW_REG => unsafe { + ebpf::ST_DW_REG => unsafe { #[allow(clippy::cast_ptr_alignment)] let x = (reg[_dst] as *const u8).offset(insn.off as isize) as *mut u64; check_mem_store(x as u64, 8, insn_ptr)?; x.write_unaligned(reg[_src]); }, - ebpf::ST_W_XADD => unimplemented!(), + ebpf::ST_W_XADD => unimplemented!(), ebpf::ST_DW_XADD => unimplemented!(), // BPF_ALU class // TODO Check how overflow works in kernel. Should we &= U32MAX all src register value // before we do the operation? // Cf ((0x11 << 32) - (0x1 << 32)) as u32 VS ((0x11 << 32) as u32 - (0x1 << 32) as u32 - ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm as u64) & U32MAX, - ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX, - ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64, - ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64, - ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64, - ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64, + ebpf::ADD32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_add(insn.imm) as u64, //((reg[_dst] & U32MAX) + insn.imm as u64) & U32MAX, + ebpf::ADD32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_add(reg[_src] as i32) as u64, //((reg[_dst] & U32MAX) + (reg[_src] & U32MAX)) & U32MAX, + ebpf::SUB32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_sub(insn.imm) as u64, + ebpf::SUB32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_sub(reg[_src] as i32) as u64, + ebpf::MUL32_IMM => reg[_dst] = (reg[_dst] as i32).wrapping_mul(insn.imm) as u64, + ebpf::MUL32_REG => reg[_dst] = (reg[_dst] as i32).wrapping_mul(reg[_src] as i32) as u64, ebpf::DIV32_IMM if insn.imm as u32 == 0 => reg[_dst] = 0, - ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64, + ebpf::DIV32_IMM => reg[_dst] = (reg[_dst] as u32 / insn.imm as u32) as u64, ebpf::DIV32_REG if reg[_src] as u32 == 0 => reg[_dst] = 0, - ebpf::DIV32_REG => reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64, - ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64, - ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64, - ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64, - ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64, + ebpf::DIV32_REG => reg[_dst] = (reg[_dst] as u32 / reg[_src] as u32) as u64, + ebpf::OR32_IMM => reg[_dst] = (reg[_dst] as u32 | insn.imm as u32) as u64, + ebpf::OR32_REG => reg[_dst] = (reg[_dst] as u32 | reg[_src] as u32) as u64, + ebpf::AND32_IMM => reg[_dst] = (reg[_dst] as u32 & insn.imm as u32) as u64, + ebpf::AND32_REG => reg[_dst] = (reg[_dst] as u32 & reg[_src] as u32) as u64, // As for the 64-bit version, we should mask the number of bits to shift with // 0x1f, but .wrappping_shr() already takes care of it for us. - ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64, - ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64, - ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64, - ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64, - ebpf::NEG32 => { reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; reg[_dst] &= U32MAX; }, + ebpf::LSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shl(insn.imm as u32) as u64, + ebpf::LSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shl(reg[_src] as u32) as u64, + ebpf::RSH32_IMM => reg[_dst] = (reg[_dst] as u32).wrapping_shr(insn.imm as u32) as u64, + ebpf::RSH32_REG => reg[_dst] = (reg[_dst] as u32).wrapping_shr(reg[_src] as u32) as u64, + ebpf::NEG32 => { + reg[_dst] = (reg[_dst] as i32).wrapping_neg() as u64; + reg[_dst] &= U32MAX; + } ebpf::MOD32_IMM if insn.imm as u32 == 0 => (), - ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64, + ebpf::MOD32_IMM => reg[_dst] = (reg[_dst] as u32 % insn.imm as u32) as u64, ebpf::MOD32_REG if reg[_src] as u32 == 0 => (), - ebpf::MOD32_REG => reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64, - ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64, - ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64, - ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64, - ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64, + ebpf::MOD32_REG => reg[_dst] = (reg[_dst] as u32 % reg[_src] as u32) as u64, + ebpf::XOR32_IMM => reg[_dst] = (reg[_dst] as u32 ^ insn.imm as u32) as u64, + ebpf::XOR32_REG => reg[_dst] = (reg[_dst] as u32 ^ reg[_src] as u32) as u64, + ebpf::MOV32_IMM => reg[_dst] = insn.imm as u32 as u64, + ebpf::MOV32_REG => reg[_dst] = (reg[_src] as u32) as u64, // As for the 64-bit version, we should mask the number of bits to shift with // 0x1f, but .wrappping_shr() already takes care of it for us. - ebpf::ARSH32_IMM => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; reg[_dst] &= U32MAX; }, - ebpf::ARSH32_REG => { reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; reg[_dst] &= U32MAX; }, - ebpf::LE => { + ebpf::ARSH32_IMM => { + reg[_dst] = (reg[_dst] as i32).wrapping_shr(insn.imm as u32) as u64; + reg[_dst] &= U32MAX; + } + ebpf::ARSH32_REG => { + reg[_dst] = (reg[_dst] as i32).wrapping_shr(reg[_src] as u32) as u64; + reg[_dst] &= U32MAX; + } + ebpf::LE => { reg[_dst] = match insn.imm { 16 => (reg[_dst] as u16).to_le() as u64, 32 => (reg[_dst] as u32).to_le() as u64, - 64 => reg[_dst].to_le(), - _ => unreachable!(), + 64 => reg[_dst].to_le(), + _ => unreachable!(), }; - }, - ebpf::BE => { + } + ebpf::BE => { reg[_dst] = match insn.imm { 16 => (reg[_dst] as u16).to_be() as u64, 32 => (reg[_dst] as u32).to_be() as u64, - 64 => reg[_dst].to_be(), - _ => unreachable!(), + 64 => reg[_dst].to_be(), + _ => unreachable!(), }; - }, + } // BPF_ALU64 class - ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64), - ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]), - ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64), - ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]), - ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64), - ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]), + ebpf::ADD64_IMM => reg[_dst] = reg[_dst].wrapping_add(insn.imm as u64), + ebpf::ADD64_REG => reg[_dst] = reg[_dst].wrapping_add(reg[_src]), + ebpf::SUB64_IMM => reg[_dst] = reg[_dst].wrapping_sub(insn.imm as u64), + ebpf::SUB64_REG => reg[_dst] = reg[_dst].wrapping_sub(reg[_src]), + ebpf::MUL64_IMM => reg[_dst] = reg[_dst].wrapping_mul(insn.imm as u64), + ebpf::MUL64_REG => reg[_dst] = reg[_dst].wrapping_mul(reg[_src]), ebpf::DIV64_IMM if insn.imm == 0 => reg[_dst] = 0, - ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64, + ebpf::DIV64_IMM => reg[_dst] /= insn.imm as u64, ebpf::DIV64_REG if reg[_src] == 0 => reg[_dst] = 0, - ebpf::DIV64_REG => reg[_dst] /= reg[_src], - ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64, - ebpf::OR64_REG => reg[_dst] |= reg[_src], - ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64, - ebpf::AND64_REG => reg[_dst] &= reg[_src], - ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64 & SHIFT_MASK_64, - ebpf::LSH64_REG => reg[_dst] <<= reg[_src] & SHIFT_MASK_64, - ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64 & SHIFT_MASK_64, - ebpf::RSH64_REG => reg[_dst] >>= reg[_src] & SHIFT_MASK_64, - ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64, + ebpf::DIV64_REG => reg[_dst] /= reg[_src], + ebpf::OR64_IMM => reg[_dst] |= insn.imm as u64, + ebpf::OR64_REG => reg[_dst] |= reg[_src], + ebpf::AND64_IMM => reg[_dst] &= insn.imm as u64, + ebpf::AND64_REG => reg[_dst] &= reg[_src], + ebpf::LSH64_IMM => reg[_dst] <<= insn.imm as u64 & SHIFT_MASK_64, + ebpf::LSH64_REG => reg[_dst] <<= reg[_src] & SHIFT_MASK_64, + ebpf::RSH64_IMM => reg[_dst] >>= insn.imm as u64 & SHIFT_MASK_64, + ebpf::RSH64_REG => reg[_dst] >>= reg[_src] & SHIFT_MASK_64, + ebpf::NEG64 => reg[_dst] = -(reg[_dst] as i64) as u64, ebpf::MOD64_IMM if insn.imm == 0 => (), - ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64, + ebpf::MOD64_IMM => reg[_dst] %= insn.imm as u64, ebpf::MOD64_REG if reg[_src] == 0 => (), - ebpf::MOD64_REG => reg[_dst] %= reg[_src], - ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64, - ebpf::XOR64_REG => reg[_dst] ^= reg[_src], - ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64, - ebpf::MOV64_REG => reg[_dst] = reg[_src], - ebpf::ARSH64_IMM => reg[_dst] = (reg[_dst] as i64 >> (insn.imm as u64 & SHIFT_MASK_64)) as u64, - ebpf::ARSH64_REG => reg[_dst] = (reg[_dst] as i64 >> (reg[_src] as u64 & SHIFT_MASK_64)) as u64, + ebpf::MOD64_REG => reg[_dst] %= reg[_src], + ebpf::XOR64_IMM => reg[_dst] ^= insn.imm as u64, + ebpf::XOR64_REG => reg[_dst] ^= reg[_src], + ebpf::MOV64_IMM => reg[_dst] = insn.imm as u64, + ebpf::MOV64_REG => reg[_dst] = reg[_src], + ebpf::ARSH64_IMM => { + reg[_dst] = (reg[_dst] as i64 >> (insn.imm as u64 & SHIFT_MASK_64)) as u64 + } + ebpf::ARSH64_REG => { + reg[_dst] = (reg[_dst] as i64 >> (reg[_src] as u64 & SHIFT_MASK_64)) as u64 + } // BPF_JMP class // TODO: check this actually works as expected for signed / unsigned ops - ebpf::JA => do_jump(), - ebpf::JEQ_IMM => if reg[_dst] == insn.imm as u64 { do_jump(); }, - ebpf::JEQ_REG => if reg[_dst] == reg[_src] { do_jump(); }, - ebpf::JGT_IMM => if reg[_dst] > insn.imm as u64 { do_jump(); }, - ebpf::JGT_REG => if reg[_dst] > reg[_src] { do_jump(); }, - ebpf::JGE_IMM => if reg[_dst] >= insn.imm as u64 { do_jump(); }, - ebpf::JGE_REG => if reg[_dst] >= reg[_src] { do_jump(); }, - ebpf::JLT_IMM => if reg[_dst] < insn.imm as u64 { do_jump(); }, - ebpf::JLT_REG => if reg[_dst] < reg[_src] { do_jump(); }, - ebpf::JLE_IMM => if reg[_dst] <= insn.imm as u64 { do_jump(); }, - ebpf::JLE_REG => if reg[_dst] <= reg[_src] { do_jump(); }, - ebpf::JSET_IMM => if reg[_dst] & insn.imm as u64 != 0 { do_jump(); }, - ebpf::JSET_REG => if reg[_dst] & reg[_src] != 0 { do_jump(); }, - ebpf::JNE_IMM => if reg[_dst] != insn.imm as u64 { do_jump(); }, - ebpf::JNE_REG => if reg[_dst] != reg[_src] { do_jump(); }, - ebpf::JSGT_IMM => if reg[_dst] as i64 > insn.imm as i64 { do_jump(); }, - ebpf::JSGT_REG => if reg[_dst] as i64 > reg[_src] as i64 { do_jump(); }, - ebpf::JSGE_IMM => if reg[_dst] as i64 >= insn.imm as i64 { do_jump(); }, - ebpf::JSGE_REG => if reg[_dst] as i64 >= reg[_src] as i64 { do_jump(); }, - ebpf::JSLT_IMM => if (reg[_dst] as i64) < insn.imm as i64 { do_jump(); }, - ebpf::JSLT_REG => if (reg[_dst] as i64) < reg[_src] as i64 { do_jump(); }, - ebpf::JSLE_IMM => if reg[_dst] as i64 <= insn.imm as i64 { do_jump(); }, - ebpf::JSLE_REG => if reg[_dst] as i64 <= reg[_src] as i64 { do_jump(); }, + ebpf::JA => do_jump(), + ebpf::JEQ_IMM => { + if reg[_dst] == insn.imm as u64 { + do_jump(); + } + } + ebpf::JEQ_REG => { + if reg[_dst] == reg[_src] { + do_jump(); + } + } + ebpf::JGT_IMM => { + if reg[_dst] > insn.imm as u64 { + do_jump(); + } + } + ebpf::JGT_REG => { + if reg[_dst] > reg[_src] { + do_jump(); + } + } + ebpf::JGE_IMM => { + if reg[_dst] >= insn.imm as u64 { + do_jump(); + } + } + ebpf::JGE_REG => { + if reg[_dst] >= reg[_src] { + do_jump(); + } + } + ebpf::JLT_IMM => { + if reg[_dst] < insn.imm as u64 { + do_jump(); + } + } + ebpf::JLT_REG => { + if reg[_dst] < reg[_src] { + do_jump(); + } + } + ebpf::JLE_IMM => { + if reg[_dst] <= insn.imm as u64 { + do_jump(); + } + } + ebpf::JLE_REG => { + if reg[_dst] <= reg[_src] { + do_jump(); + } + } + ebpf::JSET_IMM => { + if reg[_dst] & insn.imm as u64 != 0 { + do_jump(); + } + } + ebpf::JSET_REG => { + if reg[_dst] & reg[_src] != 0 { + do_jump(); + } + } + ebpf::JNE_IMM => { + if reg[_dst] != insn.imm as u64 { + do_jump(); + } + } + ebpf::JNE_REG => { + if reg[_dst] != reg[_src] { + do_jump(); + } + } + ebpf::JSGT_IMM => { + if reg[_dst] as i64 > insn.imm as i64 { + do_jump(); + } + } + ebpf::JSGT_REG => { + if reg[_dst] as i64 > reg[_src] as i64 { + do_jump(); + } + } + ebpf::JSGE_IMM => { + if reg[_dst] as i64 >= insn.imm as i64 { + do_jump(); + } + } + ebpf::JSGE_REG => { + if reg[_dst] as i64 >= reg[_src] as i64 { + do_jump(); + } + } + ebpf::JSLT_IMM => { + if (reg[_dst] as i64) < insn.imm as i64 { + do_jump(); + } + } + ebpf::JSLT_REG => { + if (reg[_dst] as i64) < reg[_src] as i64 { + do_jump(); + } + } + ebpf::JSLE_IMM => { + if reg[_dst] as i64 <= insn.imm as i64 { + do_jump(); + } + } + ebpf::JSLE_REG => { + if reg[_dst] as i64 <= reg[_src] as i64 { + do_jump(); + } + } // BPF_JMP32 class - ebpf::JEQ_IMM32 => if reg[_dst] as u32 == insn.imm as u32 { do_jump(); }, - ebpf::JEQ_REG32 => if reg[_dst] as u32 == reg[_src] as u32 { do_jump(); }, - ebpf::JGT_IMM32 => if reg[_dst] as u32 > insn.imm as u32 { do_jump(); }, - ebpf::JGT_REG32 => if reg[_dst] as u32 > reg[_src] as u32 { do_jump(); }, - ebpf::JGE_IMM32 => if reg[_dst] as u32 >= insn.imm as u32 { do_jump(); }, - ebpf::JGE_REG32 => if reg[_dst] as u32 >= reg[_src] as u32 { do_jump(); }, - ebpf::JLT_IMM32 => if (reg[_dst] as u32) < insn.imm as u32 { do_jump(); }, - ebpf::JLT_REG32 => if (reg[_dst] as u32) < reg[_src] as u32 { do_jump(); }, - ebpf::JLE_IMM32 => if reg[_dst] as u32 <= insn.imm as u32 { do_jump(); }, - ebpf::JLE_REG32 => if reg[_dst] as u32 <= reg[_src] as u32 { do_jump(); }, - ebpf::JSET_IMM32 => if reg[_dst] as u32 & insn.imm as u32 != 0 { do_jump(); }, - ebpf::JSET_REG32 => if reg[_dst] as u32 & reg[_src] as u32 != 0 { do_jump(); }, - ebpf::JNE_IMM32 => if reg[_dst] as u32 != insn.imm as u32 { do_jump(); }, - ebpf::JNE_REG32 => if reg[_dst] as u32 != reg[_src] as u32 { do_jump(); }, - ebpf::JSGT_IMM32 => if reg[_dst] as i32 > insn.imm { do_jump(); }, - ebpf::JSGT_REG32 => if reg[_dst] as i32 > reg[_src] as i32 { do_jump(); }, - ebpf::JSGE_IMM32 => if reg[_dst] as i32 >= insn.imm { do_jump(); }, - ebpf::JSGE_REG32 => if reg[_dst] as i32 >= reg[_src] as i32 { do_jump(); }, - ebpf::JSLT_IMM32 => if (reg[_dst] as i32) < insn.imm { do_jump(); }, - ebpf::JSLT_REG32 => if (reg[_dst] as i32) < reg[_src] as i32 { do_jump(); }, - ebpf::JSLE_IMM32 => if reg[_dst] as i32 <= insn.imm { do_jump(); }, - ebpf::JSLE_REG32 => if reg[_dst] as i32 <= reg[_src] as i32 { do_jump(); }, + ebpf::JEQ_IMM32 => { + if reg[_dst] as u32 == insn.imm as u32 { + do_jump(); + } + } + ebpf::JEQ_REG32 => { + if reg[_dst] as u32 == reg[_src] as u32 { + do_jump(); + } + } + ebpf::JGT_IMM32 => { + if reg[_dst] as u32 > insn.imm as u32 { + do_jump(); + } + } + ebpf::JGT_REG32 => { + if reg[_dst] as u32 > reg[_src] as u32 { + do_jump(); + } + } + ebpf::JGE_IMM32 => { + if reg[_dst] as u32 >= insn.imm as u32 { + do_jump(); + } + } + ebpf::JGE_REG32 => { + if reg[_dst] as u32 >= reg[_src] as u32 { + do_jump(); + } + } + ebpf::JLT_IMM32 => { + if (reg[_dst] as u32) < insn.imm as u32 { + do_jump(); + } + } + ebpf::JLT_REG32 => { + if (reg[_dst] as u32) < reg[_src] as u32 { + do_jump(); + } + } + ebpf::JLE_IMM32 => { + if reg[_dst] as u32 <= insn.imm as u32 { + do_jump(); + } + } + ebpf::JLE_REG32 => { + if reg[_dst] as u32 <= reg[_src] as u32 { + do_jump(); + } + } + ebpf::JSET_IMM32 => { + if reg[_dst] as u32 & insn.imm as u32 != 0 { + do_jump(); + } + } + ebpf::JSET_REG32 => { + if reg[_dst] as u32 & reg[_src] as u32 != 0 { + do_jump(); + } + } + ebpf::JNE_IMM32 => { + if reg[_dst] as u32 != insn.imm as u32 { + do_jump(); + } + } + ebpf::JNE_REG32 => { + if reg[_dst] as u32 != reg[_src] as u32 { + do_jump(); + } + } + ebpf::JSGT_IMM32 => { + if reg[_dst] as i32 > insn.imm { + do_jump(); + } + } + ebpf::JSGT_REG32 => { + if reg[_dst] as i32 > reg[_src] as i32 { + do_jump(); + } + } + ebpf::JSGE_IMM32 => { + if reg[_dst] as i32 >= insn.imm { + do_jump(); + } + } + ebpf::JSGE_REG32 => { + if reg[_dst] as i32 >= reg[_src] as i32 { + do_jump(); + } + } + ebpf::JSLT_IMM32 => { + if (reg[_dst] as i32) < insn.imm { + do_jump(); + } + } + ebpf::JSLT_REG32 => { + if (reg[_dst] as i32) < reg[_src] as i32 { + do_jump(); + } + } + ebpf::JSLE_IMM32 => { + if reg[_dst] as i32 <= insn.imm { + do_jump(); + } + } + ebpf::JSLE_REG32 => { + if reg[_dst] as i32 <= reg[_src] as i32 { + do_jump(); + } + } // Do not delegate the check to the verifier, since registered functions can be // changed after the program has been verified. - ebpf::CALL => if let Some(function) = helpers.get(&(insn.imm as u32)) { - reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]); - } else { - Err(Error::new(ErrorKind::Other, format!("Error: unknown helper function (id: {:#x})", insn.imm as u32)))?; - }, - ebpf::TAIL_CALL => unimplemented!(), - ebpf::EXIT => return Ok(reg[0]), + ebpf::CALL => { + if let Some(function) = helpers.get(&(insn.imm as u32)) { + reg[0] = function(reg[1], reg[2], reg[3], reg[4], reg[5]); + } else { + Err(Error::new( + ErrorKind::Other, + format!( + "Error: unknown helper function (id: {:#x})", + insn.imm as u32 + ), + ))?; + } + } + ebpf::TAIL_CALL => unimplemented!(), + ebpf::EXIT => return Ok(reg[0]), - _ => unreachable!() + _ => unreachable!(), } } diff --git a/src/lib.rs b/src/lib.rs index f348fef25..69b88e447 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -31,9 +31,9 @@ extern crate byteorder; extern crate combine; +extern crate log; #[cfg(feature = "std")] extern crate time; -extern crate log; #[cfg(not(feature = "std"))] extern crate alloc; @@ -49,8 +49,8 @@ extern crate cranelift_module; #[cfg(feature = "cranelift")] extern crate cranelift_native; -use byteorder::{ByteOrder, LittleEndian}; use crate::lib::*; +use byteorder::{ByteOrder, LittleEndian}; mod asm_parser; pub mod assembler; @@ -63,9 +63,9 @@ pub mod insn_builder; mod interpreter; #[cfg(all(not(windows), feature = "std"))] mod jit; -mod verifier; #[cfg(not(feature = "std"))] mod no_std_error; +mod verifier; /// Reexports all the types needed from the `std`, `core`, and `alloc` /// crates. This avoids elaborate import wrangling having to happen in every @@ -83,7 +83,7 @@ pub mod lib { pub use self::core::mem::ManuallyDrop; pub use self::core::ptr; - pub use self::core::{u32, u64, f64}; + pub use self::core::{f64, u32, u64}; #[cfg(feature = "std")] pub use std::println; @@ -92,6 +92,10 @@ pub mod lib { pub use alloc::vec; #[cfg(not(feature = "std"))] pub use alloc::vec::Vec; + #[cfg(not(feature = "std"))] + pub use core::iter::FromIterator; + #[cfg(feature = "std")] + pub use std::iter::FromIterator; #[cfg(feature = "std")] pub use std::vec::Vec; @@ -182,6 +186,7 @@ pub struct EbpfVmMbuff<'a> { #[cfg(feature = "cranelift")] cranelift_prog: Option, helpers: HashMap, + allowed: HashSet, } impl<'a> EbpfVmMbuff<'a> { @@ -213,6 +218,7 @@ impl<'a> EbpfVmMbuff<'a> { #[cfg(feature = "cranelift")] cranelift_prog: None, helpers: HashMap::new(), + allowed: HashSet::new(), }) } @@ -320,6 +326,46 @@ impl<'a> EbpfVmMbuff<'a> { Ok(()) } + /// Register a set of addresses that the ebpf program is allowed to load and store. + /// + /// When using certain helpers, typically map lookups, the linux kernel will return pointers + /// to structs that the ebpf program needs to interact with. By default rbpf only allows the + /// program to interact with its stack, the memory buffer and the program itself, making it + /// impossible to supply functional implementations of these helpers. + /// This option allows you to pass in a list of addresses that rbpf will allow the program + /// to load and store to. Given rust's memory model you will always know these addresses up + /// front when implementing the helpers. + /// + /// Each invocation of this method will append to the set of allowed addresses. + /// + /// # Examples + /// + /// ``` + /// use std::iter::FromIterator; + /// use std::ptr::addr_of; + /// + /// struct MapValue { + /// data: u8 + /// } + /// static VALUE: MapValue = MapValue { data: 1 }; + /// + /// let prog = &[ + /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0 + /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit + /// ]; + /// + /// // Instantiate a VM. + /// let mut vm = rbpf::EbpfVmMbuff::new(Some(prog)).unwrap(); + /// let start = addr_of!(VALUE) as u64; + /// let addrs = Vec::from_iter(start..start+size_of::() as u64); + /// vm.register_allowed_memory(&addrs); + /// ``` + pub fn register_allowed_memory(&mut self, addrs: &[u64]) -> () { + for i in addrs { + self.allowed.insert(*i); + } + } + /// Execute the program loaded, with the given packet data and metadata buffer. /// /// If the program is made to be compatible with Linux kernel, it is expected to load the @@ -357,7 +403,7 @@ impl<'a> EbpfVmMbuff<'a> { /// assert_eq!(res, 0x2211); /// ``` pub fn execute_program(&self, mem: &[u8], mbuff: &[u8]) -> Result { - interpreter::execute_program(self.prog, mem, mbuff, &self.helpers) + interpreter::execute_program(self.prog, mem, mbuff, &self.helpers, &self.allowed) } /// JIT-compile the loaded program. No argument required for this. @@ -818,14 +864,48 @@ impl<'a> EbpfVmFixedMbuff<'a> { /// assert_eq!(res, 3); /// } /// ``` - pub fn register_helper( - &mut self, - key: u32, - function: fn(u64, u64, u64, u64, u64) -> u64, - ) -> Result<(), Error> { + pub fn register_helper(&mut self, key: u32, function: Helper) -> Result<(), Error> { self.parent.register_helper(key, function) } + /// Register an object that the ebpf program is allowed to load and store. + /// + /// When using certain helpers, typically map lookups, the linux kernel will return pointers + /// to structs that the ebpf program needs to interact with. By default rbpf only allows the + /// program to interact with its stack, the memory buffer and the program itself, making it + /// impossible to supply functional implementations of these helpers. + /// This option allows you to pass in a list of addresses that rbpf will allow the program + /// to load and store to. Given rust's memory model you will always know these addresses up + /// front when implementing the helpers. + /// + /// Each invocation of this method will append to the set of allowed addresses. + /// + /// # Examples + /// + /// ``` + /// use std::iter::FromIterator; + /// use std::ptr::addr_of; + /// + /// struct MapValue { + /// data: u8 + /// } + /// static VALUE: MapValue = MapValue { data: 1 }; + /// + /// let prog = &[ + /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0 + /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit + /// ]; + /// + /// // Instantiate a VM. + /// let mut vm = rbpf::EbpfVmFixedMbuff::new(Some(prog), 0x40, 0x50).unwrap(); + /// let start = addr_of!(VALUE) as u64; + /// let addrs = Vec::from_iter(start..start+size_of::() as u64); + /// vm.register_allowed_memory(&addrs); + /// ``` + pub fn register_allowed_memory(&mut self, allowed: &[u64]) -> () { + self.parent.register_allowed_memory(allowed) + } + /// Execute the program loaded, with the given packet data. /// /// If the program is made to be compatible with Linux kernel, it is expected to load the @@ -1252,14 +1332,48 @@ impl<'a> EbpfVmRaw<'a> { /// assert_eq!(res, 0x10000000); /// } /// ``` - pub fn register_helper( - &mut self, - key: u32, - function: fn(u64, u64, u64, u64, u64) -> u64, - ) -> Result<(), Error> { + pub fn register_helper(&mut self, key: u32, function: Helper) -> Result<(), Error> { self.parent.register_helper(key, function) } + /// Register an object that the ebpf program is allowed to load and store. + /// + /// When using certain helpers, typically map lookups, the linux kernel will return pointers + /// to structs that the ebpf program needs to interact with. By default rbpf only allows the + /// program to interact with its stack, the memory buffer and the program itself, making it + /// impossible to supply functional implementations of these helpers. + /// This option allows you to pass in a list of addresses that rbpf will allow the program + /// to load and store to. Given rust's memory model you will always know these addresses up + /// front when implementing the helpers. + /// + /// Each invocation of this method will append to the set of allowed addresses. + /// + /// # Examples + /// + /// ``` + /// use std::iter::FromIterator; + /// use std::ptr::addr_of; + /// + /// struct MapValue { + /// data: u8 + /// } + /// static VALUE: MapValue = MapValue { data: 1 }; + /// + /// let prog = &[ + /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0 + /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit + /// ]; + /// + /// // Instantiate a VM. + /// let mut vm = rbpf::EbpfVmRaw::new(Some(prog)).unwrap(); + /// let start = addr_of!(VALUE) as u64; + /// let addrs = Vec::from_iter(start..start+size_of::() as u64); + /// vm.register_allowed_memory(&addrs); + /// ``` + pub fn register_allowed_memory(&mut self, allowed: &[u64]) -> () { + self.parent.register_allowed_memory(allowed) + } + /// Execute the program loaded, with the given packet data. /// /// # Examples @@ -1594,14 +1708,48 @@ impl<'a> EbpfVmNoData<'a> { /// assert_eq!(res, 0x1000); /// } /// ``` - pub fn register_helper( - &mut self, - key: u32, - function: fn(u64, u64, u64, u64, u64) -> u64, - ) -> Result<(), Error> { + pub fn register_helper(&mut self, key: u32, function: Helper) -> Result<(), Error> { self.parent.register_helper(key, function) } + /// Register an object that the ebpf program is allowed to load and store. + /// + /// When using certain helpers, typically map lookups, the linux kernel will return pointers + /// to structs that the ebpf program needs to interact with. By default rbpf only allows the + /// program to interact with its stack, the memory buffer and the program itself, making it + /// impossible to supply functional implementations of these helpers. + /// This option allows you to pass in a list of addresses that rbpf will allow the program + /// to load and store to. Given rust's memory model you will always know these addresses up + /// front when implementing the helpers. + /// + /// Each invocation of this method will append to the set of allowed addresses. + /// + /// # Examples + /// + /// ``` + /// use std::iter::FromIterator; + /// use std::ptr::addr_of; + /// + /// struct MapValue { + /// data: u8 + /// } + /// static VALUE: MapValue = MapValue { data: 1 }; + /// + /// let prog = &[ + /// 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov r0, 0 + /// 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // exit + /// ]; + /// + /// // Instantiate a VM. + /// let mut vm = rbpf::EbpfVmNoData::new(Some(prog)).unwrap(); + /// let start = addr_of!(VALUE) as u64; + /// let addrs = Vec::from_iter(start..start+size_of::() as u64); + /// vm.register_allowed_memory(&addrs); + /// ``` + pub fn register_allowed_memory(&mut self, allowed: &[u64]) -> () { + self.parent.register_allowed_memory(allowed) + } + /// JIT-compile the loaded program. No argument required for this. /// /// If using helper functions, be sure to register them into the VM before calling this