diff --git a/lib/runtime-core/Cargo.toml b/lib/runtime-core/Cargo.toml index 44fa2eed399..00c9a4bb3d0 100644 --- a/lib/runtime-core/Cargo.toml +++ b/lib/runtime-core/Cargo.toml @@ -22,6 +22,7 @@ smallvec = "1" bincode = "1.1" wasm-debug = { optional = true, version = "0.2.0" } target-lexicon = "0.10" +log = "0.4" [dependencies.indexmap] version = "1.2" diff --git a/lib/runtime-core/src/instance.rs b/lib/runtime-core/src/instance.rs index 48e59d4ec65..8f911641a4d 100644 --- a/lib/runtime-core/src/instance.rs +++ b/lib/runtime-core/src/instance.rs @@ -19,6 +19,7 @@ use crate::{ types::{FuncIndex, FuncSig, GlobalIndex, LocalOrImport, MemoryIndex, TableIndex, Type, Value}, vm::{self, InternalField}, }; +use log::debug; use smallvec::{smallvec, SmallVec}; use std::{ borrow::Borrow, @@ -260,6 +261,7 @@ impl Instance { /// # } /// ``` pub fn call(&self, name: &str, params: &[Value]) -> CallResult> { + debug!("Instance::call {}, {:?}", name, params); let func: DynFunc = self.exports.get(name)?; func.call(params) } @@ -518,10 +520,12 @@ fn call_func_with_index( .as_ptr(), }; + debug!("call_func_with_index: before get_trampoline"); let wasm = runnable .get_trampoline(info, sig_index) .expect("wasm trampoline"); + debug!("call_func_with_index: before call_func_with_index_inner"); call_func_with_index_inner(ctx_ptr, func_ptr, signature, wasm, args, rets) } @@ -586,6 +590,7 @@ pub(crate) fn call_func_with_index_inner( let run_wasm = |result_space: *mut u64| unsafe { let mut error_out = None; + debug!("call_func_with_index_inner: before invoking wasm"); let success = invoke( trampoline, @@ -614,6 +619,7 @@ pub(crate) fn call_func_with_index_inner( Type::V128 => unreachable!("V128 does not map to any single value"), }; + debug!("call_func_with_index_inner: before run_wasm"); match signature.returns() { &[] => { run_wasm(ptr::null_mut())?; diff --git a/lib/singlepass-backend/Cargo.toml b/lib/singlepass-backend/Cargo.toml index bf1951399a1..25baf5974c9 100644 --- a/lib/singlepass-backend/Cargo.toml +++ b/lib/singlepass-backend/Cargo.toml @@ -22,6 +22,7 @@ smallvec = "1" serde = "1.0" serde_derive = "1.0" bincode = "1.2" +log = "0.4" [features] default = [] diff --git a/lib/singlepass-backend/src/codegen_x64.rs b/lib/singlepass-backend/src/codegen_x64.rs index 38df4e67b6b..ebfdd0f7a9c 100644 --- a/lib/singlepass-backend/src/codegen_x64.rs +++ b/lib/singlepass-backend/src/codegen_x64.rs @@ -8,6 +8,7 @@ use dynasmrt::aarch64::Assembler; #[cfg(target_arch = "x86_64")] use dynasmrt::x64::Assembler; use dynasmrt::{AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi}; +use log::debug; use smallvec::SmallVec; use std::{ any::Any, @@ -496,10 +497,12 @@ impl RunnableModule for X64ExecutionContext { } fn get_trampoline(&self, _: &ModuleInfo, sig_index: SigIndex) -> Option { + debug!("BEGIN X64ExecutionContext::get_trampoline"); // Correctly unwinding from `catch_unsafe_unwind` on hardware exceptions depends // on the signal handlers being installed. Here we call `ensure_sighandler` "statically" // outside `invoke()`. fault::ensure_sighandler(); + debug!(" X64ExecutionContext::get_trampoline after fault::ensure_sighandler"); unsafe extern "C" fn invoke( _trampoline: Trampoline, @@ -539,6 +542,7 @@ impl RunnableModule for X64ExecutionContext { callable: NonNull, } extern "C" fn call_fn(f: *mut u8) -> u64 { + debug!("BEGIN X64ExecutionContext call_fn"); unsafe { let f = &*(f as *const CallCtx); let callable: extern "C" fn( @@ -578,6 +582,7 @@ impl RunnableModule for X64ExecutionContext { ) -> u64 = std::mem::transmute(f.callable); let mut args = f.args.iter(); + debug!("BEFORE X64ExecutionContext callable"); callable( f.ctx as u64, args.next().cloned().unwrap_or(0), @@ -625,6 +630,7 @@ impl RunnableModule for X64ExecutionContext { PROT_WRITE, }; const STACK_SIZE: usize = 1048576 * 1024; // 1GB of virtual address space for stack. + debug!("BEFORE X64ExecutionContext allocating stack"); let stack_ptr = mmap( ::std::ptr::null_mut(), STACK_SIZE, @@ -637,11 +643,13 @@ impl RunnableModule for X64ExecutionContext { panic!("unable to allocate stack"); } // TODO: Mark specific regions in the stack as PROT_NONE. + debug!("BEFORE X64ExecutionContext SWITCH_STACK"); let ret = SWITCH_STACK( (stack_ptr as *mut u8).offset(STACK_SIZE as isize) as *mut u64, call_fn, &mut cctx as *mut CallCtx as *mut u8, ); + debug!("BEFORE X64ExecutionContext freeing stack"); munmap(stack_ptr, STACK_SIZE); ret }