1283: Workaround for floating point arguments and return values in `DynamicFunc`s. r=syrusakbary a=losfair

This PR makes floating point arguments and return values for `DynamicFunc`s work correctly in all three backends.

Previously Singlepass used integer registers for all arguments. This PR adds another thin trampoline layer just before control is transferred to the import function, so that arguments will be rearranged strictly according to the System V ABI.

The full fix would require singlepass to implement the SysV calling convention internally too: https://github.com/wasmerio/wasmer/pull/1271 . This is just a workaround.

Co-authored-by: Ivan Enderlin <ivan.enderlin@hoa-project.net>
Co-authored-by: losfair <zhy20000919@hotmail.com>
Co-authored-by: Heyang Zhou <zhy20000919@hotmail.com>
Co-authored-by: Syrus Akbary <me@syrusakbary.com>
This commit is contained in:
bors[bot] 2020-03-12 04:52:12 +00:00 committed by GitHub
commit 18168fc974
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 587 additions and 116 deletions

View File

@ -2,6 +2,8 @@
## **[Unreleased]** ## **[Unreleased]**
- [#1283](https://github.com/wasmerio/wasmer/pull/1283) Workaround for floating point arguments and return values in `DynamicFunc`s.
## 0.16.2 - 2020-03-11 ## 0.16.2 - 2020-03-11
- [#1294](https://github.com/wasmerio/wasmer/pull/1294) Fix bug related to system calls in WASI that rely on reading from WasmPtrs as arrays of length 0. `WasmPtr` will now succeed on length 0 arrays again. - [#1294](https://github.com/wasmerio/wasmer/pull/1294) Fix bug related to system calls in WASI that rely on reading from WasmPtrs as arrays of length 0. `WasmPtr` will now succeed on length 0 arrays again.

View File

@ -209,7 +209,7 @@ impl ModuleCodeGenerator<CraneliftFunctionCodeGenerator, Caller, CodegenError>
Ok(()) Ok(())
} }
fn feed_import_function(&mut self) -> Result<(), CodegenError> { fn feed_import_function(&mut self, _sigindex: SigIndex) -> Result<(), CodegenError> {
Ok(()) Ok(())
} }

View File

@ -8984,7 +8984,7 @@ impl<'ctx> ModuleCodeGenerator<LLVMFunctionCodeGenerator<'ctx>, LLVMBackend, Cod
Ok(()) Ok(())
} }
fn feed_import_function(&mut self) -> Result<(), CodegenError> { fn feed_import_function(&mut self, _sigindex: SigIndex) -> Result<(), CodegenError> {
self.func_import_count += 1; self.func_import_count += 1;
Ok(()) Ok(())
} }

View File

@ -34,6 +34,8 @@ pub unsafe extern "C" fn wasmer_trampoline_buffer_builder_add_context_trampoline
} }
/// Adds a callinfo trampoline to the builder. /// Adds a callinfo trampoline to the builder.
///
/// Deprecated. In a future version `DynamicFunc::new` will be exposed to the C API and should be used instead of this function.
#[no_mangle] #[no_mangle]
#[allow(clippy::cast_ptr_alignment)] #[allow(clippy::cast_ptr_alignment)]
pub unsafe extern "C" fn wasmer_trampoline_buffer_builder_add_callinfo_trampoline( pub unsafe extern "C" fn wasmer_trampoline_buffer_builder_add_callinfo_trampoline(
@ -42,8 +44,14 @@ pub unsafe extern "C" fn wasmer_trampoline_buffer_builder_add_callinfo_trampolin
ctx: *const c_void, ctx: *const c_void,
num_params: u32, num_params: u32,
) -> usize { ) -> usize {
use wasmer_runtime_core::types::Type;
let builder = &mut *(builder as *mut TrampolineBufferBuilder); let builder = &mut *(builder as *mut TrampolineBufferBuilder);
builder.add_callinfo_trampoline(mem::transmute(func), ctx as *const CallContext, num_params) builder.add_callinfo_trampoline(
mem::transmute(func),
ctx as *const CallContext,
&vec![Type::I64; num_params as usize],
&[Type::I64],
)
} }
/// Finalizes the trampoline builder into an executable buffer. /// Finalizes the trampoline builder into an executable buffer.

View File

@ -1386,6 +1386,8 @@ wasmer_result_t wasmer_table_new(wasmer_table_t **table, wasmer_limits_t limits)
#if (!defined(_WIN32) && defined(ARCH_X86_64)) #if (!defined(_WIN32) && defined(ARCH_X86_64))
/** /**
* Adds a callinfo trampoline to the builder. * Adds a callinfo trampoline to the builder.
*
* Deprecated. In a future version `DynamicFunc::new` will be exposed to the C API and should be used instead of this function.
*/ */
uintptr_t wasmer_trampoline_buffer_builder_add_callinfo_trampoline(wasmer_trampoline_buffer_builder_t *builder, uintptr_t wasmer_trampoline_buffer_builder_add_callinfo_trampoline(wasmer_trampoline_buffer_builder_t *builder,
const wasmer_trampoline_callable_t *func, const wasmer_trampoline_callable_t *func,

View File

@ -1146,6 +1146,8 @@ wasmer_result_t wasmer_table_new(wasmer_table_t **table, wasmer_limits_t limits)
#if (!defined(_WIN32) && defined(ARCH_X86_64)) #if (!defined(_WIN32) && defined(ARCH_X86_64))
/// Adds a callinfo trampoline to the builder. /// Adds a callinfo trampoline to the builder.
///
/// Deprecated. In a future version `DynamicFunc::new` will be exposed to the C API and should be used instead of this function.
uintptr_t wasmer_trampoline_buffer_builder_add_callinfo_trampoline(wasmer_trampoline_buffer_builder_t *builder, uintptr_t wasmer_trampoline_buffer_builder_add_callinfo_trampoline(wasmer_trampoline_buffer_builder_t *builder,
const wasmer_trampoline_callable_t *func, const wasmer_trampoline_callable_t *func,
const void *ctx, const void *ctx,

View File

@ -1,4 +1,4 @@
use std::sync::Arc; use std::{convert::TryInto, sync::Arc};
use wasmer_runtime_core::{ use wasmer_runtime_core::{
compile_with, compile_with,
error::RuntimeError, error::RuntimeError,
@ -12,10 +12,11 @@ use wasmer_runtime_core::{
use wasmer_runtime_core_tests::{get_compiler, wat2wasm}; use wasmer_runtime_core_tests::{get_compiler, wat2wasm};
macro_rules! call_and_assert { macro_rules! call_and_assert {
($instance:ident, $function:ident, $expected_value:expr) => { ($instance:ident, $function:ident( $( $inputs:ty ),* ) -> $output:ty, ( $( $arguments:expr ),* ) == $expected_value:expr) => {
let $function: Func<i32, i32> = $instance.func(stringify!($function)).unwrap(); #[allow(unused_parens)]
let $function: Func<( $( $inputs ),* ), $output> = $instance.func(stringify!($function)).expect(concat!("Failed to get the `", stringify!($function), "` export function."));
let result = $function.call(1); let result = $function.call( $( $arguments ),* );
match (result, $expected_value) { match (result, $expected_value) {
(Ok(value), expected_value) => assert_eq!( (Ok(value), expected_value) => assert_eq!(
@ -75,7 +76,12 @@ fn imported_functions_forms(test: &dyn Fn(&Instance)) {
(import "env" "memory" (memory 1 1)) (import "env" "memory" (memory 1 1))
(import "env" "callback_fn" (func $callback_fn (type $type))) (import "env" "callback_fn" (func $callback_fn (type $type)))
(import "env" "callback_closure" (func $callback_closure (type $type))) (import "env" "callback_closure" (func $callback_closure (type $type)))
(import "env" "callback_closure_dynamic" (func $callback_closure_dynamic (type $type))) (import "env" "callback_fn_dynamic" (func $callback_fn_dynamic (type $type)))
(import "env" "callback_closure_dynamic_0" (func $callback_closure_dynamic_0))
(import "env" "callback_closure_dynamic_1" (func $callback_closure_dynamic_1 (param i32) (result i32)))
(import "env" "callback_closure_dynamic_2" (func $callback_closure_dynamic_2 (param i32 i64) (result i64)))
(import "env" "callback_closure_dynamic_3" (func $callback_closure_dynamic_3 (param i32 i64 f32) (result f32)))
(import "env" "callback_closure_dynamic_4" (func $callback_closure_dynamic_4 (param i32 i64 f32 f64) (result f64)))
(import "env" "callback_closure_with_env" (func $callback_closure_with_env (type $type))) (import "env" "callback_closure_with_env" (func $callback_closure_with_env (type $type)))
(import "env" "callback_fn_with_vmctx" (func $callback_fn_with_vmctx (type $type))) (import "env" "callback_fn_with_vmctx" (func $callback_fn_with_vmctx (type $type)))
(import "env" "callback_closure_with_vmctx" (func $callback_closure_with_vmctx (type $type))) (import "env" "callback_closure_with_vmctx" (func $callback_closure_with_vmctx (type $type)))
@ -94,9 +100,34 @@ fn imported_functions_forms(test: &dyn Fn(&Instance)) {
get_local 0 get_local 0
call $callback_closure) call $callback_closure)
(func (export "function_closure_dynamic") (type $type) (func (export "function_fn_dynamic") (type $type)
get_local 0 get_local 0
call $callback_closure_dynamic) call $callback_fn_dynamic)
(func (export "function_closure_dynamic_0")
call $callback_closure_dynamic_0)
(func (export "function_closure_dynamic_1") (param i32) (result i32)
get_local 0
call $callback_closure_dynamic_1)
(func (export "function_closure_dynamic_2") (param i32 i64) (result i64)
get_local 0
get_local 1
call $callback_closure_dynamic_2)
(func (export "function_closure_dynamic_3") (param i32 i64 f32) (result f32)
get_local 0
get_local 1
get_local 2
call $callback_closure_dynamic_3)
(func (export "function_closure_dynamic_4") (param i32 i64 f32 f64) (result f64)
get_local 0
get_local 1
get_local 2
get_local 3
call $callback_closure_dynamic_4)
(func (export "function_closure_with_env") (type $type) (func (export "function_closure_with_env") (type $type)
get_local 0 get_local 0
@ -154,13 +185,73 @@ fn imported_functions_forms(test: &dyn Fn(&Instance)) {
Ok(n + 1) Ok(n + 1)
}), }),
"callback_closure_dynamic" => DynamicFunc::new( // Regular polymorphic function.
"callback_fn_dynamic" => DynamicFunc::new(
Arc::new(FuncSig::new(vec![Type::I32], vec![Type::I32])), Arc::new(FuncSig::new(vec![Type::I32], vec![Type::I32])),
|_, params| -> Vec<Value> { callback_fn_dynamic,
match params[0] { ),
Value::I32(x) => vec![Value::I32(x + 1)],
_ => unreachable!() // Polymorphic closure “closures”.
"callback_closure_dynamic_0" => DynamicFunc::new(
Arc::new(FuncSig::new(vec![], vec![])),
|_, inputs: &[Value]| -> Vec<Value> {
assert!(inputs.is_empty());
vec![]
} }
),
"callback_closure_dynamic_1" => DynamicFunc::new(
Arc::new(FuncSig::new(vec![Type::I32], vec![Type::I32])),
move |vmctx: &mut vm::Ctx, inputs: &[Value]| -> Vec<Value> {
assert_eq!(inputs.len(), 1);
let memory = vmctx.memory(0);
let shift_ = shift + memory.view::<i32>()[0].get();
let n: i32 = (&inputs[0]).try_into().unwrap();
vec![Value::I32(shift_ + n)]
}
),
"callback_closure_dynamic_2" => DynamicFunc::new(
Arc::new(FuncSig::new(vec![Type::I32, Type::I64], vec![Type::I64])),
move |vmctx: &mut vm::Ctx, inputs: &[Value]| -> Vec<Value> {
assert_eq!(inputs.len(), 2);
let memory = vmctx.memory(0);
let shift_ = shift + memory.view::<i32>()[0].get();
let i: i32 = (&inputs[0]).try_into().unwrap();
let j: i64 = (&inputs[1]).try_into().unwrap();
vec![Value::I64(shift_ as i64 + i as i64 + j)]
}
),
"callback_closure_dynamic_3" => DynamicFunc::new(
Arc::new(FuncSig::new(vec![Type::I32, Type::I64, Type::F32], vec![Type::F32])),
move |vmctx: &mut vm::Ctx, inputs: &[Value]| -> Vec<Value> {
assert_eq!(inputs.len(), 3);
let memory = vmctx.memory(0);
let shift_ = shift + memory.view::<i32>()[0].get();
let i: i32 = (&inputs[0]).try_into().unwrap();
let j: i64 = (&inputs[1]).try_into().unwrap();
let k: f32 = (&inputs[2]).try_into().unwrap();
vec![Value::F32(shift_ as f32 + i as f32 + j as f32 + k)]
}
),
"callback_closure_dynamic_4" => DynamicFunc::new(
Arc::new(FuncSig::new(vec![Type::I32, Type::I64, Type::F32, Type::F64], vec![Type::F64])),
move |vmctx: &mut vm::Ctx, inputs: &[Value]| -> Vec<Value> {
assert_eq!(inputs.len(), 4);
let memory = vmctx.memory(0);
let shift_ = shift + memory.view::<i32>()[0].get();
let i: i32 = (&inputs[0]).try_into().unwrap();
let j: i64 = (&inputs[1]).try_into().unwrap();
let k: f32 = (&inputs[2]).try_into().unwrap();
let l: f64 = (&inputs[3]).try_into().unwrap();
vec![Value::F64(shift_ as f64 + i as f64 + j as f64 + k as f64 + l)]
} }
), ),
@ -227,6 +318,13 @@ fn callback_fn(n: i32) -> Result<i32, ()> {
Ok(n + 1) Ok(n + 1)
} }
fn callback_fn_dynamic(_: &mut vm::Ctx, inputs: &[Value]) -> Vec<Value> {
match inputs[0] {
Value::I32(x) => vec![Value::I32(x + 1)],
_ => unreachable!(),
}
}
fn callback_fn_with_vmctx(vmctx: &mut vm::Ctx, n: i32) -> Result<i32, ()> { fn callback_fn_with_vmctx(vmctx: &mut vm::Ctx, n: i32) -> Result<i32, ()> {
let memory = vmctx.memory(0); let memory = vmctx.memory(0);
let shift_: i32 = memory.view()[0].get(); let shift_: i32 = memory.view()[0].get();
@ -246,57 +344,82 @@ fn callback_fn_trap_with_vmctx(vmctx: &mut vm::Ctx, n: i32) -> Result<i32, Strin
} }
macro_rules! test { macro_rules! test {
($test_name:ident, $function:ident, $expected_value:expr) => { ($test_name:ident, $function:ident( $( $inputs:ty ),* ) -> $output:ty, ( $( $arguments:expr ),* ) == $expected_value:expr) => {
#[test] #[test]
fn $test_name() { fn $test_name() {
imported_functions_forms(&|instance| { imported_functions_forms(&|instance| {
call_and_assert!(instance, $function, $expected_value); call_and_assert!(instance, $function( $( $inputs ),* ) -> $output, ( $( $arguments ),* ) == $expected_value);
}); });
} }
}; };
} }
test!(test_fn, function_fn, Ok(2)); test!(test_fn, function_fn(i32) -> i32, (1) == Ok(2));
test!(test_closure, function_closure, Ok(2)); test!(test_closure, function_closure(i32) -> i32, (1) == Ok(2));
test!(test_closure_dynamic, function_closure_dynamic, Ok(2)); test!(test_fn_dynamic, function_fn_dynamic(i32) -> i32, (1) == Ok(2));
test!(
test_closure_dynamic_0,
function_closure_dynamic_0(()) -> (),
() == Ok(())
);
test!(
test_closure_dynamic_1,
function_closure_dynamic_1(i32) -> i32,
(1) == Ok(1 + shift + SHIFT)
);
test!(
test_closure_dynamic_2,
function_closure_dynamic_2(i32, i64) -> i64,
(1, 2) == Ok(1 + 2 + shift as i64 + SHIFT as i64)
);
test!(
test_closure_dynamic_3,
function_closure_dynamic_3(i32, i64, f32) -> f32,
(1, 2, 3.) == Ok(1. + 2. + 3. + shift as f32 + SHIFT as f32)
);
test!(
test_closure_dynamic_4,
function_closure_dynamic_4(i32, i64, f32, f64) -> f64,
(1, 2, 3., 4.) == Ok(1. + 2. + 3. + 4. + shift as f64 + SHIFT as f64)
);
test!( test!(
test_closure_with_env, test_closure_with_env,
function_closure_with_env, function_closure_with_env(i32) -> i32,
Ok(2 + shift + SHIFT) (1) == Ok(2 + shift + SHIFT)
); );
test!(test_fn_with_vmctx, function_fn_with_vmctx, Ok(2 + SHIFT)); test!(test_fn_with_vmctx, function_fn_with_vmctx(i32) -> i32, (1) == Ok(2 + SHIFT));
test!( test!(
test_closure_with_vmctx, test_closure_with_vmctx,
function_closure_with_vmctx, function_closure_with_vmctx(i32) -> i32,
Ok(2 + SHIFT) (1) == Ok(2 + SHIFT)
); );
test!( test!(
test_closure_with_vmctx_and_env, test_closure_with_vmctx_and_env,
function_closure_with_vmctx_and_env, function_closure_with_vmctx_and_env(i32) -> i32,
Ok(2 + shift + SHIFT) (1) == Ok(2 + shift + SHIFT)
); );
test!( test!(
test_fn_trap, test_fn_trap,
function_fn_trap, function_fn_trap(i32) -> i32,
Err(RuntimeError(Box::new(format!("foo {}", 2)))) (1) == Err(RuntimeError(Box::new(format!("foo {}", 2))))
); );
test!( test!(
test_closure_trap, test_closure_trap,
function_closure_trap, function_closure_trap(i32) -> i32,
Err(RuntimeError(Box::new(format!("bar {}", 2)))) (1) == Err(RuntimeError(Box::new(format!("bar {}", 2))))
); );
test!( test!(
test_fn_trap_with_vmctx, test_fn_trap_with_vmctx,
function_fn_trap_with_vmctx, function_fn_trap_with_vmctx(i32) -> i32,
Err(RuntimeError(Box::new(format!("baz {}", 2 + SHIFT)))) (1) == Err(RuntimeError(Box::new(format!("baz {}", 2 + SHIFT))))
); );
test!( test!(
test_closure_trap_with_vmctx, test_closure_trap_with_vmctx,
function_closure_trap_with_vmctx, function_closure_trap_with_vmctx(i32) -> i32,
Err(RuntimeError(Box::new(format!("qux {}", 2 + SHIFT)))) (1) == Err(RuntimeError(Box::new(format!("qux {}", 2 + SHIFT))))
); );
test!( test!(
test_closure_trap_with_vmctx_and_env, test_closure_trap_with_vmctx_and_env,
function_closure_trap_with_vmctx_and_env, function_closure_trap_with_vmctx_and_env(i32) -> i32,
Err(RuntimeError(Box::new(format!("! {}", 2 + shift + SHIFT)))) (1) == Err(RuntimeError(Box::new(format!("! {}", 2 + shift + SHIFT))))
); );

View File

@ -143,7 +143,7 @@ pub trait ModuleCodeGenerator<FCG: FunctionCodeGenerator<E>, RM: RunnableModule,
Ok(()) Ok(())
} }
/// Adds an import function. /// Adds an import function.
fn feed_import_function(&mut self) -> Result<(), E>; fn feed_import_function(&mut self, _sigindex: SigIndex) -> Result<(), E>;
/// Sets the signatures. /// Sets the signatures.
fn feed_signatures(&mut self, signatures: Map<SigIndex, FuncSig>) -> Result<(), E>; fn feed_signatures(&mut self, signatures: Map<SigIndex, FuncSig>) -> Result<(), E>;
/// Sets function signatures. /// Sets function signatures.

View File

@ -6,8 +6,8 @@ use crate::{
backend::{CompilerConfig, RunnableModule}, backend::{CompilerConfig, RunnableModule},
error::CompileError, error::CompileError,
module::{ module::{
DataInitializer, ExportIndex, ImportName, ModuleInfo, StringTable, StringTableBuilder, DataInitializer, ExportIndex, ImportName, ModuleInfo, NameIndex, NamespaceIndex,
TableInitializer, StringTable, StringTableBuilder, TableInitializer,
}, },
structures::{Map, TypedIndex}, structures::{Map, TypedIndex},
types::{ types::{
@ -110,11 +110,36 @@ pub fn read_module<
let mut namespace_builder = Some(StringTableBuilder::new()); let mut namespace_builder = Some(StringTableBuilder::new());
let mut name_builder = Some(StringTableBuilder::new()); let mut name_builder = Some(StringTableBuilder::new());
let mut func_count: usize = 0; let mut func_count: usize = 0;
let mut mcg_info_fed = false;
let mut feed_mcg_signatures: Option<_> = Some(|mcg: &mut MCG| -> Result<(), LoadError> {
let info_read = info.read().unwrap();
mcg.feed_signatures(info_read.signatures.clone())
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
Ok(())
});
let mut feed_mcg_info: Option<_> = Some(
|mcg: &mut MCG,
ns_builder: StringTableBuilder<NamespaceIndex>,
name_builder: StringTableBuilder<NameIndex>|
-> Result<(), LoadError> {
{
let mut info_write = info.write().unwrap();
info_write.namespace_table = ns_builder.finish();
info_write.name_table = name_builder.finish();
}
let info_read = info.read().unwrap();
mcg.feed_function_signatures(info_read.func_assoc.clone())
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
mcg.check_precondition(&info_read)
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
Ok(())
},
);
loop { loop {
use wasmparser::ParserState; use wasmparser::ParserState;
let state = parser.read(); let state = parser.read();
match *state { match *state {
ParserState::Error(ref err) => return Err(err.clone().into()), ParserState::Error(ref err) => return Err(err.clone().into()),
ParserState::TypeSectionEntry(ref ty) => { ParserState::TypeSectionEntry(ref ty) => {
@ -124,6 +149,10 @@ pub fn read_module<
.push(func_type_to_func_sig(ty)?); .push(func_type_to_func_sig(ty)?);
} }
ParserState::ImportSectionEntry { module, field, ty } => { ParserState::ImportSectionEntry { module, field, ty } => {
if let Some(f) = feed_mcg_signatures.take() {
f(mcg)?;
}
let namespace_index = namespace_builder.as_mut().unwrap().register(module); let namespace_index = namespace_builder.as_mut().unwrap().register(module);
let name_index = name_builder.as_mut().unwrap().register(field); let name_index = name_builder.as_mut().unwrap().register(field);
let import_name = ImportName { let import_name = ImportName {
@ -136,7 +165,7 @@ pub fn read_module<
let sigindex = SigIndex::new(sigindex as usize); let sigindex = SigIndex::new(sigindex as usize);
info.write().unwrap().imported_functions.push(import_name); info.write().unwrap().imported_functions.push(import_name);
info.write().unwrap().func_assoc.push(sigindex); info.write().unwrap().func_assoc.push(sigindex);
mcg.feed_import_function() mcg.feed_import_function(sigindex)
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?; .map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
} }
ImportSectionEntryType::Table(table_ty) => { ImportSectionEntryType::Table(table_ty) => {
@ -217,23 +246,17 @@ pub fn read_module<
info.write().unwrap().start_func = Some(FuncIndex::new(start_index as usize)); info.write().unwrap().start_func = Some(FuncIndex::new(start_index as usize));
} }
ParserState::BeginFunctionBody { range } => { ParserState::BeginFunctionBody { range } => {
if let Some(f) = feed_mcg_signatures.take() {
f(mcg)?;
}
if let Some(f) = feed_mcg_info.take() {
f(
mcg,
namespace_builder.take().unwrap(),
name_builder.take().unwrap(),
)?;
}
let id = func_count; let id = func_count;
if !mcg_info_fed {
mcg_info_fed = true;
{
let mut info_write = info.write().unwrap();
info_write.namespace_table = namespace_builder.take().unwrap().finish();
info_write.name_table = name_builder.take().unwrap().finish();
}
let info_read = info.read().unwrap();
mcg.feed_signatures(info_read.signatures.clone())
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
mcg.feed_function_signatures(info_read.func_assoc.clone())
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
mcg.check_precondition(&info_read)
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
}
let fcg = mcg let fcg = mcg
.next_function( .next_function(
Arc::clone(&info), Arc::clone(&info),
@ -432,17 +455,15 @@ pub fn read_module<
info.write().unwrap().globals.push(global_init); info.write().unwrap().globals.push(global_init);
} }
ParserState::EndWasm => { ParserState::EndWasm => {
// TODO Consolidate with BeginFunction body if possible if let Some(f) = feed_mcg_signatures.take() {
if !mcg_info_fed { f(mcg)?;
info.write().unwrap().namespace_table = }
namespace_builder.take().unwrap().finish(); if let Some(f) = feed_mcg_info.take() {
info.write().unwrap().name_table = name_builder.take().unwrap().finish(); f(
mcg.feed_signatures(info.read().unwrap().signatures.clone()) mcg,
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?; namespace_builder.take().unwrap(),
mcg.feed_function_signatures(info.read().unwrap().func_assoc.clone()) name_builder.take().unwrap(),
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?; )?;
mcg.check_precondition(&info.read().unwrap())
.map_err(|x| LoadError::Codegen(format!("{:?}", x)))?;
} }
break; break;
} }

View File

@ -480,10 +480,11 @@ impl InstanceImage {
} }
} }
/// Declarations for x86-64 registers. /// X64-specific structures and methods that do not depend on an x64 machine to run.
#[cfg(unix)] #[cfg(unix)]
pub mod x64_decl { pub mod x64_decl {
use super::*; use super::*;
use crate::types::Type;
/// General-purpose registers. /// General-purpose registers.
#[repr(u8)] #[repr(u8)]
@ -610,9 +611,88 @@ pub mod x64_decl {
_ => return None, _ => return None,
}) })
} }
/// Returns the instruction prefix for `movq %this_reg, ?(%rsp)`.
///
/// To build an instruction, append the memory location as a 32-bit
/// offset to the stack pointer to this prefix.
pub fn prefix_mov_to_stack(&self) -> Option<&'static [u8]> {
Some(match *self {
X64Register::GPR(gpr) => match gpr {
GPR::RDI => &[0x48, 0x89, 0xbc, 0x24],
GPR::RSI => &[0x48, 0x89, 0xb4, 0x24],
GPR::RDX => &[0x48, 0x89, 0x94, 0x24],
GPR::RCX => &[0x48, 0x89, 0x8c, 0x24],
GPR::R8 => &[0x4c, 0x89, 0x84, 0x24],
GPR::R9 => &[0x4c, 0x89, 0x8c, 0x24],
_ => return None,
},
X64Register::XMM(xmm) => match xmm {
XMM::XMM0 => &[0x66, 0x0f, 0xd6, 0x84, 0x24],
XMM::XMM1 => &[0x66, 0x0f, 0xd6, 0x8c, 0x24],
XMM::XMM2 => &[0x66, 0x0f, 0xd6, 0x94, 0x24],
XMM::XMM3 => &[0x66, 0x0f, 0xd6, 0x9c, 0x24],
XMM::XMM4 => &[0x66, 0x0f, 0xd6, 0xa4, 0x24],
XMM::XMM5 => &[0x66, 0x0f, 0xd6, 0xac, 0x24],
XMM::XMM6 => &[0x66, 0x0f, 0xd6, 0xb4, 0x24],
XMM::XMM7 => &[0x66, 0x0f, 0xd6, 0xbc, 0x24],
_ => return None,
},
})
}
}
/// An allocator that allocates registers for function arguments according to the System V ABI.
#[derive(Default)]
pub struct ArgumentRegisterAllocator {
n_gprs: usize,
n_xmms: usize,
}
impl ArgumentRegisterAllocator {
/// Allocates a register for argument type `ty`. Returns `None` if no register is available for this type.
pub fn next(&mut self, ty: Type) -> Option<X64Register> {
static GPR_SEQ: &'static [GPR] =
&[GPR::RDI, GPR::RSI, GPR::RDX, GPR::RCX, GPR::R8, GPR::R9];
static XMM_SEQ: &'static [XMM] = &[
XMM::XMM0,
XMM::XMM1,
XMM::XMM2,
XMM::XMM3,
XMM::XMM4,
XMM::XMM5,
XMM::XMM6,
XMM::XMM7,
];
match ty {
Type::I32 | Type::I64 => {
if self.n_gprs < GPR_SEQ.len() {
let gpr = GPR_SEQ[self.n_gprs];
self.n_gprs += 1;
Some(X64Register::GPR(gpr))
} else {
None
}
}
Type::F32 | Type::F64 => {
if self.n_xmms < XMM_SEQ.len() {
let xmm = XMM_SEQ[self.n_xmms];
self.n_xmms += 1;
Some(X64Register::XMM(xmm))
} else {
None
}
}
_ => todo!(
"ArgumentRegisterAllocator::next: Unsupported type: {:?}",
ty
),
}
}
} }
} }
/// X64-specific structures and methods that only work on an x64 machine.
#[cfg(unix)] #[cfg(unix)]
pub mod x64 { pub mod x64 {
//! The x64 state module contains functions to generate state and code for x64 targets. //! The x64 state module contains functions to generate state and code for x64 targets.

View File

@ -7,6 +7,8 @@
//! Variadic functions are not supported because `rax` is used by the trampoline code. //! Variadic functions are not supported because `rax` is used by the trampoline code.
use crate::loader::CodeMemory; use crate::loader::CodeMemory;
use crate::state::x64_decl::ArgumentRegisterAllocator;
use crate::types::Type;
use crate::vm::Ctx; use crate::vm::Ctx;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::fmt; use std::fmt;
@ -246,44 +248,50 @@ impl TrampolineBufferBuilder {
&mut self, &mut self,
target: unsafe extern "C" fn(*const CallContext, *const u64) -> u64, target: unsafe extern "C" fn(*const CallContext, *const u64) -> u64,
context: *const CallContext, context: *const CallContext,
num_params: u32, params: &[Type],
_returns: &[Type],
) -> usize { ) -> usize {
let idx = self.offsets.len(); let idx = self.offsets.len();
self.offsets.push(self.code.len()); self.offsets.push(self.code.len());
let mut stack_offset: u32 = num_params.checked_mul(8).unwrap(); let mut stack_offset: u32 = params.len().checked_mul(8).unwrap() as u32;
if stack_offset % 16 == 0 { if stack_offset % 16 == 0 {
stack_offset += 8; stack_offset += 8;
} }
self.code.extend_from_slice(&[0x48, 0x81, 0xec]); // sub ?, %rsp self.code.extend_from_slice(&[0x48, 0x81, 0xec]); // sub ?, %rsp
self.code.extend_from_slice(value_to_bytes(&stack_offset)); self.code.extend_from_slice(value_to_bytes(&stack_offset));
for i in 0..num_params {
match i { let mut allocator = ArgumentRegisterAllocator::default();
0..=5 => {
// mov %?, ?(%rsp) let mut source_stack_count: u32 = 0; // # of allocated slots in the source stack.
let prefix: &[u8] = match i {
0 => &[0x48, 0x89, 0xbc, 0x24], // rdi for (i, ty) in params.iter().enumerate() {
1 => &[0x48, 0x89, 0xb4, 0x24], // rsi match allocator.next(*ty) {
2 => &[0x48, 0x89, 0x94, 0x24], // rdx Some(reg) => {
3 => &[0x48, 0x89, 0x8c, 0x24], // rcx // This argument is allocated to a register.
4 => &[0x4c, 0x89, 0x84, 0x24], // r8
5 => &[0x4c, 0x89, 0x8c, 0x24], // r9 let prefix = reg
_ => unreachable!(), .prefix_mov_to_stack()
}; .expect("cannot get instruction prefix for argument register");
self.code.extend_from_slice(prefix); self.code.extend_from_slice(prefix);
self.code.extend_from_slice(value_to_bytes(&(i * 8u32))); self.code
.extend_from_slice(value_to_bytes(&((i as u32) * 8u32)));
} }
_ => { None => {
// This argument is allocated to the stack.
self.code.extend_from_slice(&[ self.code.extend_from_slice(&[
0x48, 0x8b, 0x84, 0x24, // mov ?(%rsp), %rax 0x48, 0x8b, 0x84, 0x24, // mov ?(%rsp), %rax
]); ]);
self.code.extend_from_slice(value_to_bytes( self.code.extend_from_slice(value_to_bytes(
&((i - 6) * 8u32 + stack_offset + 8/* ret addr */), &(source_stack_count * 8u32 + stack_offset + 8/* ret addr */),
)); ));
// mov %rax, ?(%rsp) // mov %rax, ?(%rsp)
self.code.extend_from_slice(&[0x48, 0x89, 0x84, 0x24]); self.code.extend_from_slice(&[0x48, 0x89, 0x84, 0x24]);
self.code.extend_from_slice(value_to_bytes(&(i * 8u32))); self.code
.extend_from_slice(value_to_bytes(&((i as u32) * 8u32)));
source_stack_count += 1;
} }
} }
} }
@ -395,8 +403,13 @@ mod tests {
} }
let mut builder = TrampolineBufferBuilder::new(); let mut builder = TrampolineBufferBuilder::new();
let ctx = TestContext { value: 100 }; let ctx = TestContext { value: 100 };
let idx = let param_types: Vec<Type> = vec![Type::I32; 8];
builder.add_callinfo_trampoline(do_add, &ctx as *const TestContext as *const _, 8); let idx = builder.add_callinfo_trampoline(
do_add,
&ctx as *const TestContext as *const _,
&param_types,
&[Type::I32],
);
let buf = builder.build(); let buf = builder.build();
let t = buf.get_trampoline(idx); let t = buf.get_trampoline(idx);
let ret = unsafe { let ret = unsafe {
@ -407,9 +420,49 @@ mod tests {
assert_eq!(ret, 136); assert_eq!(ret, 136);
} }
#[test]
fn test_trampolines_with_floating_point() {
unsafe extern "C" fn inner(n: *const CallContext, args: *const u64) -> u64 {
// `n` is not really a pointer. It is the length of the argument list, casted into the pointer type.
let n = n as usize;
let mut result: u64 = 0;
for i in 0..n {
result += *args.offset(i as _);
}
result
}
let buffer = TrampBuffer::new(4096);
let mut builder = TrampolineBufferBuilder::new();
builder.add_callinfo_trampoline(
inner,
8 as _,
&[
Type::I32,
Type::I32,
Type::I32,
Type::F32,
Type::I32,
Type::I32,
Type::I32,
Type::I32,
],
&[Type::I32],
);
let ptr = buffer.insert(builder.code()).unwrap();
let ret = unsafe {
let f = std::mem::transmute::<
_,
extern "C" fn(i32, i32, i32, f32, i32, i32, i32, i32) -> i32,
>(ptr);
f(1, 2, 3, f32::from_bits(4), 5, 6, 7, 8)
};
assert_eq!(ret, 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8);
}
#[test] #[test]
fn test_many_global_trampolines() { fn test_many_global_trampolines() {
unsafe extern "C" fn inner(n: *const CallContext, args: *const u64) -> u64 { unsafe extern "C" fn inner(n: *const CallContext, args: *const u64) -> u64 {
// `n` is not really a pointer. It is the length of the argument list, casted into the pointer type.
let n = n as usize; let n = n as usize;
let mut result: u64 = 0; let mut result: u64 = 0;
for i in 0..n { for i in 0..n {
@ -427,7 +480,8 @@ mod tests {
for i in 0..5000usize { for i in 0..5000usize {
let mut builder = TrampolineBufferBuilder::new(); let mut builder = TrampolineBufferBuilder::new();
let n = i % 8; let n = i % 8;
builder.add_callinfo_trampoline(inner, n as _, n as _); let param_types: Vec<_> = (0..n).map(|_| Type::I64).collect();
builder.add_callinfo_trampoline(inner, n as _, &param_types, &[Type::I64]);
let ptr = buffer let ptr = buffer
.insert(builder.code()) .insert(builder.code())
.expect("cannot insert new code into global buffer"); .expect("cannot insert new code into global buffer");

View File

@ -306,16 +306,15 @@ impl<'a> DynamicFunc<'a> {
{ {
use crate::trampoline_x64::{CallContext, TrampolineBufferBuilder}; use crate::trampoline_x64::{CallContext, TrampolineBufferBuilder};
use crate::types::Value; use crate::types::Value;
use std::convert::TryFrom;
struct PolymorphicContext { struct PolymorphicContext {
arg_types: Vec<Type>, arg_types: Vec<Type>,
func: Box<dyn Fn(&mut vm::Ctx, &[Value]) -> Vec<Value>>, func: Box<dyn Fn(&mut vm::Ctx, &[Value]) -> Vec<Value>>,
} }
unsafe extern "C" fn enter_host_polymorphic( unsafe fn do_enter_host_polymorphic(
ctx: *const CallContext, ctx: *const CallContext,
args: *const u64, args: *const u64,
) -> u64 { ) -> Vec<Value> {
let ctx = &*(ctx as *const PolymorphicContext); let ctx = &*(ctx as *const PolymorphicContext);
let vmctx = &mut *(*args.offset(0) as *mut vm::Ctx); let vmctx = &mut *(*args.offset(0) as *mut vm::Ctx);
let args: Vec<Value> = ctx let args: Vec<Value> = ctx
@ -335,13 +334,40 @@ impl<'a> DynamicFunc<'a> {
} }
}) })
.collect(); .collect();
let rets = (ctx.func)(vmctx, &args); (ctx.func)(vmctx, &args)
}
unsafe extern "C" fn enter_host_polymorphic_i(
ctx: *const CallContext,
args: *const u64,
) -> u64 {
let rets = do_enter_host_polymorphic(ctx, args);
if rets.len() == 0 { if rets.len() == 0 {
0 0
} else if rets.len() == 1 { } else if rets.len() == 1 {
u64::try_from(rets[0].to_u128()).expect( match rets[0] {
"128-bit return value from polymorphic host functions is not yet supported", Value::I32(x) => x as u64,
) Value::I64(x) => x as u64,
_ => panic!("enter_host_polymorphic_i: invalid return type"),
}
} else {
panic!(
"multiple return values from polymorphic host functions is not yet supported"
);
}
}
unsafe extern "C" fn enter_host_polymorphic_f(
ctx: *const CallContext,
args: *const u64,
) -> f64 {
let rets = do_enter_host_polymorphic(ctx, args);
if rets.len() == 0 {
0.0
} else if rets.len() == 1 {
match rets[0] {
Value::F32(x) => f64::from_bits(x.to_bits() as u64),
Value::F64(x) => x,
_ => panic!("enter_host_polymorphic_f: invalid return type"),
}
} else { } else {
panic!( panic!(
"multiple return values from polymorphic host functions is not yet supported" "multiple return values from polymorphic host functions is not yet supported"
@ -359,11 +385,29 @@ impl<'a> DynamicFunc<'a> {
func: Box::new(func), func: Box::new(func),
}); });
let ctx = Box::into_raw(ctx); let ctx = Box::into_raw(ctx);
let mut native_param_types = vec![Type::I64]; // vm::Ctx is the first parameter.
native_param_types.extend_from_slice(signature.params());
match signature.returns() {
[x] if *x == Type::F32 || *x == Type::F64 => {
builder.add_callinfo_trampoline( builder.add_callinfo_trampoline(
enter_host_polymorphic, unsafe { std::mem::transmute(enter_host_polymorphic_f as usize) },
ctx as *const _, ctx as *const _,
(signature.params().len() + 1) as u32, // +vmctx &native_param_types,
signature.returns(),
); );
}
_ => {
builder.add_callinfo_trampoline(
enter_host_polymorphic_i,
ctx as *const _,
&native_param_types,
signature.returns(),
);
}
}
let ptr = builder let ptr = builder
.insert_global() .insert_global()
.expect("cannot bump-allocate global trampoline memory"); .expect("cannot bump-allocate global trampoline memory");

View File

@ -32,8 +32,9 @@ use wasmer_runtime_core::{
memory::MemoryType, memory::MemoryType,
module::{ModuleInfo, ModuleInner}, module::{ModuleInfo, ModuleInner},
state::{ state::{
x64::new_machine_state, x64::X64Register, FunctionStateMap, MachineState, MachineValue, x64::new_machine_state, x64::X64Register, x64_decl::ArgumentRegisterAllocator,
ModuleStateMap, OffsetInfo, SuspendOffset, WasmAbstractValue, FunctionStateMap, MachineState, MachineValue, ModuleStateMap, OffsetInfo, SuspendOffset,
WasmAbstractValue,
}, },
structures::{Map, TypedIndex}, structures::{Map, TypedIndex},
typed_func::{Trampoline, Wasm}, typed_func::{Trampoline, Wasm},
@ -204,6 +205,7 @@ pub struct X64FunctionCode {
signatures: Arc<Map<SigIndex, FuncSig>>, signatures: Arc<Map<SigIndex, FuncSig>>,
function_signatures: Arc<Map<FuncIndex, SigIndex>>, function_signatures: Arc<Map<FuncIndex, SigIndex>>,
signature: FuncSig,
fsm: FunctionStateMap, fsm: FunctionStateMap,
offset: usize, offset: usize,
@ -712,11 +714,22 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
machine.track_state = self.config.as_ref().unwrap().track_state; machine.track_state = self.config.as_ref().unwrap().track_state;
assembler.emit_label(begin_label); assembler.emit_label(begin_label);
let signatures = self.signatures.as_ref().unwrap();
let function_signatures = self.function_signatures.as_ref().unwrap();
let sig_index = function_signatures
.get(FuncIndex::new(
self.functions.len() + self.func_import_count,
))
.unwrap()
.clone();
let sig = signatures.get(sig_index).unwrap().clone();
let code = X64FunctionCode { let code = X64FunctionCode {
local_function_id: self.functions.len(), local_function_id: self.functions.len(),
signatures: self.signatures.as_ref().unwrap().clone(), signatures: signatures.clone(),
function_signatures: self.function_signatures.as_ref().unwrap().clone(), function_signatures: function_signatures.clone(),
signature: sig,
fsm: FunctionStateMap::new(new_machine_state(), self.functions.len(), 32, vec![]), // only a placeholder; this is initialized later in `begin_body` fsm: FunctionStateMap::new(new_machine_state(), self.functions.len(), 32, vec![]), // only a placeholder; this is initialized later in `begin_body`
offset: begin_offset.0, offset: begin_offset.0,
@ -869,7 +882,7 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
Ok(()) Ok(())
} }
fn feed_import_function(&mut self) -> Result<(), CodegenError> { fn feed_import_function(&mut self, sigindex: SigIndex) -> Result<(), CodegenError> {
let labels = self.function_labels.as_mut().unwrap(); let labels = self.function_labels.as_mut().unwrap();
let id = labels.len(); let id = labels.len();
@ -880,6 +893,92 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
a.emit_label(label); a.emit_label(label);
labels.insert(id, (label, Some(offset))); labels.insert(id, (label, Some(offset)));
// Singlepass internally treats all arguments as integers, but the standard System V calling convention requires
// floating point arguments to be passed in XMM registers.
//
// FIXME: This is only a workaround. We should fix singlepass to use the standard CC.
let sig = self
.signatures
.as_ref()
.expect("signatures itself")
.get(sigindex)
.expect("signatures");
// Translation is expensive, so only do it if needed.
if sig
.params()
.iter()
.find(|&&x| x == Type::F32 || x == Type::F64)
.is_some()
{
let mut param_locations: Vec<Location> = vec![];
// Allocate stack space for arguments.
let stack_offset: i32 = if sig.params().len() > 5 {
5 * 8
} else {
(sig.params().len() as i32) * 8
};
if stack_offset > 0 {
a.emit_sub(
Size::S64,
Location::Imm32(stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
// Store all arguments to the stack to prevent overwrite.
for i in 0..sig.params().len() {
let loc = match i {
0..=4 => {
static PARAM_REGS: &'static [GPR] =
&[GPR::RSI, GPR::RDX, GPR::RCX, GPR::R8, GPR::R9];
let loc = Location::Memory(GPR::RSP, (i * 8) as i32);
a.emit_mov(Size::S64, Location::GPR(PARAM_REGS[i]), loc);
loc
}
_ => Location::Memory(GPR::RSP, stack_offset + 8 + ((i - 5) * 8) as i32),
};
param_locations.push(loc);
}
// Copy arguments.
let mut argalloc = ArgumentRegisterAllocator::default();
argalloc.next(Type::I32).unwrap(); // skip vm::Ctx
let mut caller_stack_offset: i32 = 0;
for (i, ty) in sig.params().iter().enumerate() {
let prev_loc = param_locations[i];
let target = match argalloc.next(*ty) {
Some(X64Register::GPR(gpr)) => Location::GPR(gpr),
Some(X64Register::XMM(xmm)) => Location::XMM(xmm),
None => {
// No register can be allocated. Put this argument on the stack.
//
// Since here we never use fewer registers than by the original call, on the caller's frame
// we always have enough space to store the rearranged arguments, and the copy "backward" between different
// slots in the caller argument region will always work.
a.emit_mov(Size::S64, prev_loc, Location::GPR(GPR::RAX));
a.emit_mov(
Size::S64,
Location::GPR(GPR::RAX),
Location::Memory(GPR::RSP, stack_offset + 8 + caller_stack_offset),
);
caller_stack_offset += 8;
continue;
}
};
a.emit_mov(Size::S64, prev_loc, target);
}
// Restore stack pointer.
if stack_offset > 0 {
a.emit_add(
Size::S64,
Location::Imm32(stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
}
// Emits a tail call trampoline that loads the address of the target import function // Emits a tail call trampoline that loads the address of the target import function
// from Ctx and jumps to it. // from Ctx and jumps to it.
@ -6260,9 +6359,16 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
false, false,
)[0]; )[0];
self.value_stack.push(ret); self.value_stack.push(ret);
match return_types[0] {
WpType::F32 | WpType::F64 => {
a.emit_mov(Size::S64, Location::XMM(XMM::XMM0), ret);
}
_ => {
a.emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); a.emit_mov(Size::S64, Location::GPR(GPR::RAX), ret);
} }
} }
}
}
Operator::CallIndirect { index, table_index } => { Operator::CallIndirect { index, table_index } => {
if table_index != 0 { if table_index != 0 {
return Err(CodegenError { return Err(CodegenError {
@ -6399,9 +6505,16 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
false, false,
)[0]; )[0];
self.value_stack.push(ret); self.value_stack.push(ret);
match return_types[0] {
WpType::F32 | WpType::F64 => {
a.emit_mov(Size::S64, Location::XMM(XMM::XMM0), ret);
}
_ => {
a.emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); a.emit_mov(Size::S64, Location::GPR(GPR::RAX), ret);
} }
} }
}
}
Operator::If { ty } => { Operator::If { ty } => {
let label_end = a.get_label(); let label_end = a.get_label();
let label_else = a.get_label(); let label_else = a.get_label();
@ -7614,6 +7727,18 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
self.machine.finalize_locals(a, &self.locals); self.machine.finalize_locals(a, &self.locals);
a.emit_mov(Size::S64, Location::GPR(GPR::RBP), Location::GPR(GPR::RSP)); a.emit_mov(Size::S64, Location::GPR(GPR::RBP), Location::GPR(GPR::RSP));
a.emit_pop(Size::S64, Location::GPR(GPR::RBP)); a.emit_pop(Size::S64, Location::GPR(GPR::RBP));
// Make a copy of the return value in XMM0, as required by the SysV CC.
match self.signature.returns() {
[x] if *x == Type::F32 || *x == Type::F64 => {
a.emit_mov(
Size::S64,
Location::GPR(GPR::RAX),
Location::XMM(XMM::XMM0),
);
}
_ => {}
}
a.emit_ret(); a.emit_ret();
} else { } else {
let released = &self.value_stack[frame.value_stack_depth..]; let released = &self.value_stack[frame.value_stack_depth..];

View File

@ -256,6 +256,16 @@ mod tests {
Memory, Table, Memory, Table,
}; };
fn format_panic(e: &dyn std::any::Any) -> String {
if let Some(s) = e.downcast_ref::<&str>() {
format!("{}", s)
} else if let Some(s) = e.downcast_ref::<String>() {
format!("{}", s)
} else {
"(unknown)".into()
}
}
fn parse_and_run( fn parse_and_run(
path: &PathBuf, path: &PathBuf,
file_excludes: &HashSet<String>, file_excludes: &HashSet<String>,
@ -342,7 +352,7 @@ mod tests {
file: filename.to_string(), file: filename.to_string(),
line: line, line: line,
kind: format!("{}", "Module"), kind: format!("{}", "Module"),
message: format!("caught panic {:?}", e), message: format!("caught panic {}", format_panic(&e)),
}, },
&test_key, &test_key,
excludes, excludes,
@ -798,7 +808,7 @@ mod tests {
file: filename.to_string(), file: filename.to_string(),
line: line, line: line,
kind: format!("{}", "AssertInvalid"), kind: format!("{}", "AssertInvalid"),
message: format!("caught panic {:?}", p), message: format!("caught panic {}", format_panic(&p)),
}, },
&test_key, &test_key,
excludes, excludes,
@ -851,7 +861,7 @@ mod tests {
file: filename.to_string(), file: filename.to_string(),
line: line, line: line,
kind: format!("{}", "AssertMalformed"), kind: format!("{}", "AssertMalformed"),
message: format!("caught panic {:?}", p), message: format!("caught panic {}", format_panic(&p)),
}, },
&test_key, &test_key,
excludes, excludes,
@ -975,7 +985,7 @@ mod tests {
file: filename.to_string(), file: filename.to_string(),
line: line, line: line,
kind: format!("{}", "AssertUnlinkable"), kind: format!("{}", "AssertUnlinkable"),
message: format!("caught panic {:?}", e), message: format!("caught panic {}", format_panic(&e)),
}, },
&test_key, &test_key,
excludes, excludes,