Skip to content
This repository has been archived by the owner on Jan 10, 2025. It is now read-only.

Feature - Explicit sign extension of results in SBPF-v2 #548

Merged
merged 2 commits into from
Oct 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 30 additions & 14 deletions src/interpreter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,18 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
true
}

fn sign_extension(&self, value: i32) -> u64 {
if self
.executable
.get_sbpf_version()
.implicit_sign_extension_of_results()
{
value as i64 as u64
} else {
value as u32 as u64
}
}

/// Advances the interpreter state by one instruction
///
/// Returns false if the program terminated or threw an error.
Expand Down Expand Up @@ -248,14 +260,14 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
},

// BPF_ALU class
ebpf::ADD32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(insn.imm as i32) as u64,
ebpf::ADD32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(self.reg[src] as i32) as u64,
ebpf::ADD32_IMM => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(insn.imm as i32)),
ebpf::ADD32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(self.reg[src] as i32)),
ebpf::SUB32_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
self.reg[dst] = (insn.imm as i32).wrapping_sub(self.reg[dst] as i32) as u64
self.reg[dst] = self.sign_extension((insn.imm as i32).wrapping_sub(self.reg[dst] as i32))
} else {
self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64
self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(insn.imm as i32))
},
ebpf::SUB32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32) as u64,
ebpf::SUB32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32)),
ebpf::MUL32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
ebpf::MUL32_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
ebpf::DIV32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64,
Expand All @@ -280,9 +292,13 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64,
ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64,
ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64,
ebpf::MOV32_REG => self.reg[dst] = (self.reg[src] as u32) as u64,
ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64 & (u32::MAX as u64),
ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u64 & (u32::MAX as u64),
ebpf::MOV32_REG => self.reg[dst] = if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.reg[src] as u32 as u64
} else {
self.reg[src] as i32 as i64 as u64
},
ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u32 as u64,
ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u32 as u64,
ebpf::LE if self.executable.get_sbpf_version().enable_le() => {
self.reg[dst] = match insn.imm {
16 => (self.reg[dst] as u16).to_le() as u64,
Expand Down Expand Up @@ -345,8 +361,8 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
}

// BPF_PQR class
ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32).wrapping_mul(insn.imm as u32) as u64,
ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32).wrapping_mul(self.reg[src] as u32) as u64,
ebpf::LMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64),
ebpf::LMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]),
ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u64 as u128).wrapping_shr(64) as u64,
Expand Down Expand Up @@ -383,12 +399,12 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
},
ebpf::SDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u32 as u64;
}
ebpf::SDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideByZero; self, self.reg[src], i32);
throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u32 as u64;
},
ebpf::SDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
Expand All @@ -401,12 +417,12 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
},
ebpf::SREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u32 as u64;
}
ebpf::SREM32_REG if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideByZero; self, self.reg[src], i32);
throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u32 as u64;
},
ebpf::SREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
Expand Down
26 changes: 20 additions & 6 deletions src/jit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -483,11 +483,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
// BPF_ALU class
ebpf::ADD32_IMM => {
self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm);
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::ADD32_REG => {
self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x01, src, dst, 0, None));
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::SUB32_IMM => {
if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
Expand All @@ -498,11 +502,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
} else {
self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm);
}
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::SUB32_REG => {
self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x29, src, dst, 0, None));
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() =>
self.emit_product_quotient_remainder(OperandSize::S32, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, dst, dst, Some(insn.imm)),
Expand All @@ -526,7 +534,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.emit_ins(X86Instruction::load_immediate(OperandSize::S32, dst, insn.imm));
}
}
ebpf::MOV32_REG => self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst)),
ebpf::MOV32_REG => {
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst));
} else {
self.emit_ins(X86Instruction::mov_with_sign_extension(OperandSize::S64, src, dst));
}
}
ebpf::ARSH32_IMM => self.emit_shift(OperandSize::S32, 7, REGISTER_SCRATCH, dst, Some(insn.imm)),
ebpf::ARSH32_REG => self.emit_shift(OperandSize::S32, 7, src, dst, None),
ebpf::LE if self.executable.get_sbpf_version().enable_le() => {
Expand Down Expand Up @@ -1301,7 +1315,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.emit_ins(X86Instruction::pop(RAX));
}
if let OperandSize::S32 = size {
if signed {
if signed && self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
}
Expand Down
5 changes: 5 additions & 0 deletions src/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ pub enum SBPFVersion {
}

impl SBPFVersion {
/// Implicitly perform sign extension of results
pub fn implicit_sign_extension_of_results(&self) -> bool {
self == &SBPFVersion::V1
}

/// Enable the little-endian byte swap instructions
pub fn enable_le(&self) -> bool {
self == &SBPFVersion::V1
Expand Down
13 changes: 13 additions & 0 deletions src/x86.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,19 @@ impl X86Instruction {
}
}

/// Move source to destination
#[inline]
pub const fn mov_with_sign_extension(size: OperandSize, source: u8, destination: u8) -> Self {
exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16);
Self {
size,
opcode: 0x63,
first_operand: destination,
second_operand: source,
..Self::DEFAULT
}
}

/// Conditionally move source to destination
#[inline]
pub const fn cmov(size: OperandSize, condition: u8, source: u8, destination: u8) -> Self {
Expand Down
Loading