Update with list IR from walrus

This commit updates `wasm-bindgen` to the latest version of `walrus`
which transforms all internal IR representations to a list-based IR
instead of a tree-based IR. This isn't a major change other than
cosmetic for `wasm-bindgen` itself, but involves a lot of changes to the
threads/anyref passes.

This commit also updates our CI configuration to actually run all the
anyref tests on CI. This is done by downloading a nightly build of
node.js which is theorized to continue to be there for awhile until the
full support makes its way into releases.
This commit is contained in:
Alex Crichton 2019-08-12 10:49:00 -07:00
parent 1d0c333a2b
commit ad34fa29d8
8 changed files with 323 additions and 371 deletions

View File

@ -92,3 +92,4 @@ wasm-bindgen = { path = '.' }
wasm-bindgen-futures = { path = 'crates/futures' }
js-sys = { path = 'crates/js-sys' }
web-sys = { path = 'crates/web-sys' }
wasm-webidl-bindings = { git = 'https://github.com/alexcrichton/wasm-webidl-bindings', branch = 'update-walrus' }

View File

@ -18,8 +18,6 @@ jobs:
displayName: "Crate test suite"
- script: WASM_BINDGEN_NO_DEBUG=1 cargo test --target wasm32-unknown-unknown
displayName: "Crate test suite (no debug)"
- script: NODE_ARGS=/dev/null WASM_BINDGEN_ANYREF=1 cargo test --target wasm32-unknown-unknown --test wasm
displayName: "Anyref test suite builds"
- script: cargo test --target wasm32-unknown-unknown --features serde-serialize
displayName: "Crate test suite (with serde)"
- script: cargo test --target wasm32-unknown-unknown --features enable-interning
@ -30,6 +28,19 @@ jobs:
displayName: "Futures test suite on native"
- script: cargo test -p wasm-bindgen-futures --target wasm32-unknown-unknown
displayName: "Futures test suite on wasm"
- script: |
set -e
curl https://nodejs.org/download/nightly/v13.0.0-nightly2019081215b2d13310/node-v13.0.0-nightly2019081215b2d13310-linux-x64.tar.xz | tar xJf -
echo "##vso[task.prependpath]$PWD/node-v13.0.0-nightly2019081215b2d13310-linux-x64/bin"
echo "##vso[task.setvariable variable=NODE_ARGS]--experimental-wasm-anyref,--experimental-wasm-bulk_memory"
echo "##vso[task.setvariable variable=WASM_BINDGEN_ANYREF]1"
displayName: "Install a custom node.js and configure anyref"
- script: cargo test --target wasm32-unknown-unknown --test wasm
displayName: "(anyref) Crate test suite"
- script: WASM_BINDGEN_NO_DEBUG=1 cargo test --target wasm32-unknown-unknown --test wasm
displayName: "(anyref) Crate test suite (no debug)"
- script: cargo test --target wasm32-unknown-unknown --features serde-serialize --test wasm
displayName: "(anyref) Crate test suite (with serde)"
- job: test_wasm_bindgen_windows
displayName: "Run wasm-bindgen crate tests (Windows)"

View File

@ -18,9 +18,8 @@
use failure::{bail, format_err, Error};
use std::cmp;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::mem;
use walrus::ir::*;
use walrus::{ExportId, ImportId};
use walrus::{ExportId, ImportId, TypeId};
use walrus::{FunctionId, GlobalId, InitExpr, Module, TableId, ValType};
// must be kept in sync with src/lib.rs and ANYREF_HEAP_START
@ -198,19 +197,20 @@ impl Context {
// (tee_local 1 (call $heap_alloc))
// (table.get (local.get 0)))
// (local.get 1))
let mut builder = walrus::FunctionBuilder::new();
let mut builder =
walrus::FunctionBuilder::new(&mut module.types, &[ValType::I32], &[ValType::I32]);
let arg = module.locals.add(ValType::I32);
let local = module.locals.add(ValType::I32);
let alloc = builder.call(heap_alloc, Box::new([]));
let tee = builder.local_tee(local, alloc);
let get_arg = builder.local_get(arg);
let get_table = builder.table_get(table, get_arg);
let set_table = builder.table_set(table, tee, get_table);
let get_local = builder.local_get(local);
let mut body = builder.func_body();
body.call(heap_alloc)
.local_tee(local)
.local_get(arg)
.table_get(table)
.table_set(table)
.local_get(local);
let ty = module.types.add(&[ValType::I32], &[ValType::I32]);
let clone_ref = builder.finish(ty, vec![arg], vec![set_table, get_local], module);
let clone_ref = builder.finish(vec![arg], &mut module.funcs);
let name = "__wbindgen_object_clone_ref".to_string();
module.funcs.get_mut(clone_ref).name = Some(name);
@ -309,7 +309,7 @@ impl Transform<'_> {
None => continue,
};
let shim = self.append_shim(
let (shim, anyref_ty) = self.append_shim(
f,
&import.name,
func,
@ -318,6 +318,10 @@ impl Transform<'_> {
&mut module.locals,
);
self.import_map.insert(f, shim);
match &mut module.funcs.get_mut(f).kind {
walrus::FunctionKind::Import(f) => f.ty = anyref_ty,
_ => unreachable!(),
}
}
}
@ -332,7 +336,7 @@ impl Transform<'_> {
Some(s) => s,
None => continue,
};
let shim = self.append_shim(
let (shim, _anyref_ty) = self.append_shim(
f,
&export.name,
function,
@ -362,7 +366,7 @@ impl Transform<'_> {
// which places elements at the end.
while let Some((idx, function)) = self.cx.elements.remove(&(kind.elements.len() as u32)) {
let target = kind.elements[idx as usize].unwrap();
let shim = self.append_shim(
let (shim, _anyref_ty) = self.append_shim(
target,
&format!("closure{}", idx),
function,
@ -390,15 +394,17 @@ impl Transform<'_> {
types: &mut walrus::ModuleTypes,
funcs: &mut walrus::ModuleFunctions,
locals: &mut walrus::ModuleLocals,
) -> FunctionId {
) -> (FunctionId, TypeId) {
let target = funcs.get_mut(shim_target);
let (is_export, ty) = match &mut target.kind {
walrus::FunctionKind::Import(f) => (false, &mut f.ty),
walrus::FunctionKind::Local(f) => (true, &mut f.ty),
let (is_export, ty) = match &target.kind {
walrus::FunctionKind::Import(f) => (false, f.ty),
walrus::FunctionKind::Local(f) => (true, f.ty()),
_ => unreachable!(),
};
let target_ty = types.get(*ty);
let target_ty = types.get(ty);
let target_ty_params = target_ty.params().to_vec();
let target_ty_results = target_ty.results().to_vec();
// Learn about the various operations we're doing up front. Afterwards
// we'll have a better idea bout what sort of code we're gonna be
@ -452,14 +458,22 @@ impl Transform<'_> {
// If we're an import, then our shim is what the Rust code calls, which
// means it'll have the original signature. The existing import's
// signature, however, is transformed to be an anyref signature.
let shim_ty = if is_export {
anyref_ty
} else {
mem::replace(ty, anyref_ty)
};
let shim_ty = if is_export { anyref_ty } else { ty };
let mut builder = walrus::FunctionBuilder::new();
let mut before = Vec::new();
let mut builder = walrus::FunctionBuilder::new(
types,
if is_export {
&param_tys
} else {
&target_ty_params
},
if is_export {
&new_ret
} else {
&target_ty_results
},
);
let mut body = builder.func_body();
let params = types
.get(shim_ty)
.params()
@ -476,60 +490,63 @@ impl Transform<'_> {
// Update our stack pointer if there's any borrowed anyref objects.
if anyref_stack > 0 {
let sp = builder.global_get(self.stack_pointer);
let size = builder.const_(Value::I32(anyref_stack));
let new_sp = builder.binop(BinaryOp::I32Sub, sp, size);
let tee = builder.local_tee(fp, new_sp);
before.push(builder.global_set(self.stack_pointer, tee));
body.global_get(self.stack_pointer)
.const_(Value::I32(anyref_stack))
.binop(BinaryOp::I32Sub)
.local_tee(fp)
.global_set(self.stack_pointer);
}
let mut next_stack_offset = 0;
let mut args = Vec::new();
for (i, convert) in param_convert.iter().enumerate() {
let local = builder.local_get(params[i]);
args.push(match *convert {
Convert::None => local,
match *convert {
Convert::None => {
body.local_get(params[i]);
}
Convert::Load { owned: true } => {
// load the anyref onto the stack, then afterwards
// deallocate our index, leaving the anyref on the stack.
let get = builder.table_get(self.table, local);
let free = builder.call(self.heap_dealloc, Box::new([local]));
builder.with_side_effects(Vec::new(), get, vec![free])
body.local_get(params[i])
.table_get(self.table)
.local_get(params[i])
.call(self.heap_dealloc);
}
Convert::Load { owned: false } => {
body.local_get(params[i]).table_get(self.table);
}
Convert::Load { owned: false } => builder.table_get(self.table, local),
Convert::Store { owned: true } => {
// Allocate space for the anyref, store it, and then leave
// the index of the allocated anyref on the stack.
let alloc = builder.call(self.heap_alloc, Box::new([]));
let tee = builder.local_tee(scratch_i32, alloc);
let store = builder.table_set(self.table, tee, local);
let get = builder.local_get(scratch_i32);
builder.with_side_effects(vec![store], get, Vec::new())
body.call(self.heap_alloc)
.local_tee(scratch_i32)
.local_get(params[i])
.table_set(self.table)
.local_get(scratch_i32);
}
Convert::Store { owned: false } => {
// Store an anyref at an offset from our function's stack
// pointer frame.
let get_fp = builder.local_get(fp);
let (index, idx_local) = if next_stack_offset == 0 {
(get_fp, fp)
body.local_get(fp);
let idx_local = if next_stack_offset == 0 {
fp
} else {
let rhs = builder.i32_const(next_stack_offset);
let add = builder.binop(BinaryOp::I32Add, get_fp, rhs);
(builder.local_tee(scratch_i32, add), scratch_i32)
body.i32_const(next_stack_offset)
.binop(BinaryOp::I32Add)
.local_tee(scratch_i32);
scratch_i32
};
next_stack_offset += 1;
let store = builder.table_set(self.table, index, local);
let get = builder.local_get(idx_local);
builder.with_side_effects(vec![store], get, Vec::new())
body.local_get(params[i])
.table_set(self.table)
.local_get(idx_local);
}
}
});
}
// Now that we've converted all the arguments, call the original
// function. This may be either an import or an export which we're
// wrapping.
let mut result = builder.call(shim_target, args.into_boxed_slice());
let mut after = Vec::new();
body.call(shim_target);
// If an anyref value is returned, then we need to be sure to apply
// special treatment to convert it to an i32 as well. Note that only
@ -540,20 +557,20 @@ impl Transform<'_> {
// We're an export so we have an i32 on the stack and need to
// convert it to an anyref, basically by doing the same as an
// owned load above: get the value then deallocate our slot.
let tee = builder.local_tee(scratch_i32, result);
result = builder.table_get(self.table, tee);
let get_local = builder.local_get(scratch_i32);
after.push(builder.call(self.heap_dealloc, Box::new([get_local])));
body.local_tee(scratch_i32)
.table_get(self.table)
.local_get(scratch_i32)
.call(self.heap_dealloc);
} else {
// Imports are the opposite, we have any anyref on the stack
// and convert it to an i32 by allocating space for it and
// storing it there.
before.push(builder.local_set(scratch_anyref, result));
let alloc = builder.call(self.heap_alloc, Box::new([]));
let tee = builder.local_tee(scratch_i32, alloc);
let get = builder.local_get(scratch_anyref);
before.push(builder.table_set(self.table, tee, get));
result = builder.local_get(scratch_i32);
body.local_set(scratch_anyref)
.call(self.heap_alloc)
.local_tee(scratch_i32)
.local_get(scratch_anyref)
.table_set(self.table)
.local_get(scratch_i32);
}
}
@ -567,32 +584,28 @@ impl Transform<'_> {
// TODO: use `table.fill` once that's spec'd
if anyref_stack > 0 {
for i in 0..anyref_stack {
let get_fp = builder.local_get(fp);
let index = if i > 0 {
let offset = builder.i32_const(i);
builder.binop(BinaryOp::I32Add, get_fp, offset)
} else {
get_fp
};
let null = builder.ref_null();
after.push(builder.table_set(self.table, index, null));
body.local_get(fp);
if i > 0 {
body.i32_const(i).binop(BinaryOp::I32Add);
}
body.ref_null();
body.table_set(self.table);
}
let get_fp = builder.local_get(fp);
let size = builder.i32_const(anyref_stack);
let new_sp = builder.binop(BinaryOp::I32Add, get_fp, size);
after.push(builder.global_set(self.stack_pointer, new_sp));
body.local_get(fp)
.i32_const(anyref_stack)
.binop(BinaryOp::I32Add)
.global_set(self.stack_pointer);
}
// Create the final expression node and then finish the function builder
// with a fresh type we've been calculating so far. Give the function a
// nice name for debugging and then we're good to go!
let expr = builder.with_side_effects(before, result, after);
let id = builder.finish_parts(shim_ty, params, vec![expr], types, funcs);
let id = builder.finish(params, funcs);
let name = format!("{}_anyref_shim", name);
funcs.get_mut(id).name = Some(name);
self.shims.insert(id);
return id;
(id, anyref_ty)
}
fn rewrite_calls(&mut self, module: &mut Module) {
@ -600,67 +613,59 @@ impl Transform<'_> {
if self.shims.contains(&id) {
continue;
}
let mut entry = func.entry_block();
Rewrite {
func,
xform: self,
replace: None,
}
.visit_block_id_mut(&mut entry);
let entry = func.entry_block();
dfs_pre_order_mut(&mut Rewrite { xform: self }, func, entry);
}
struct Rewrite<'a, 'b> {
func: &'a mut walrus::LocalFunction,
xform: &'a Transform<'b>,
replace: Option<ExprId>,
}
impl VisitorMut for Rewrite<'_, '_> {
fn local_function_mut(&mut self) -> &mut walrus::LocalFunction {
self.func
}
fn visit_expr_id_mut(&mut self, expr: &mut ExprId) {
expr.visit_mut(self);
if let Some(id) = self.replace.take() {
*expr = id;
}
}
fn visit_call_mut(&mut self, e: &mut Call) {
e.visit_mut(self);
let intrinsic = match self.xform.intrinsic_map.get(&e.func) {
fn start_instr_seq_mut(&mut self, seq: &mut InstrSeq) {
for i in (0..seq.instrs.len()).rev() {
let call = match &mut seq.instrs[i] {
Instr::Call(call) => call,
_ => continue,
};
let intrinsic = match self.xform.intrinsic_map.get(&call.func) {
Some(f) => f,
None => {
// If this wasn't a call of an intrinsic, but it was a
// call of one of our old import functions then we
// switch the functions we're calling here.
if let Some(f) = self.xform.import_map.get(&e.func) {
e.func = *f;
if let Some(f) = self.xform.import_map.get(&call.func) {
call.func = *f;
}
return;
continue;
}
};
let builder = self.func.builder_mut();
match intrinsic {
Intrinsic::TableGrow => {
assert_eq!(e.args.len(), 1);
let delta = e.args[0];
let null = builder.ref_null();
let grow = builder.table_grow(self.xform.table, delta, null);
self.replace = Some(grow);
// Switch this to a `table.grow` instruction...
seq.instrs[i] = TableGrow {
table: self.xform.table,
}
.into();
// ... and then insert a `ref.null` before the
// preceding instruction as the value to grow the
// table with.
seq.instrs.insert(i - 1, RefNull {}.into());
}
Intrinsic::TableSetNull => {
assert_eq!(e.args.len(), 1);
let index = e.args[0];
let null = builder.ref_null();
let set = builder.table_set(self.xform.table, index, null);
self.replace = Some(set);
// Switch this to a `table.set` instruction...
seq.instrs[i] = TableSet {
table: self.xform.table,
}
.into();
// ... and then insert a `ref.null` as the
// preceding instruction
seq.instrs.insert(i, RefNull {}.into());
}
Intrinsic::DropRef => call.func = self.xform.heap_dealloc,
Intrinsic::CloneRef => call.func = self.xform.clone_ref,
}
Intrinsic::DropRef => e.func = self.xform.heap_dealloc,
Intrinsic::CloneRef => e.func = self.xform.clone_ref,
}
}
}

View File

@ -18,9 +18,9 @@ log = "0.4"
rustc-demangle = "0.1.13"
serde_json = "1.0"
tempfile = "3.0"
walrus = "0.10.0"
walrus = "0.11.0"
wasm-bindgen-anyref-xform = { path = '../anyref-xform', version = '=0.2.48' }
wasm-bindgen-shared = { path = "../shared", version = '=0.2.48' }
wasm-bindgen-threads-xform = { path = '../threads-xform', version = '=0.2.48' }
wasm-bindgen-wasm-interpreter = { path = "../wasm-interpreter", version = '=0.2.48' }
wasm-webidl-bindings = "0.3.0"
wasm-webidl-bindings = "0.4.0"

View File

@ -14,9 +14,8 @@ use crate::descriptor::{Closure, Descriptor};
use failure::Error;
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::mem;
use walrus::ImportId;
use walrus::{CustomSection, FunctionId, LocalFunction, Module, TypedCustomSectionId};
use walrus::{CustomSection, FunctionId, Module, TypedCustomSectionId};
use wasm_bindgen_wasm_interpreter::Interpreter;
#[derive(Default, Debug)]
@ -112,19 +111,16 @@ impl WasmBindgenDescriptorsSection {
let mut element_removal_list = HashSet::new();
let mut func_to_descriptor = HashMap::new();
for (id, local) in module.funcs.iter_local() {
let entry = local.entry_block();
let mut find = FindDescribeClosure {
func: local,
wbindgen_describe_closure,
cur: entry.into(),
call: None,
found: false,
};
find.visit_block_id(&entry);
if let Some(call) = find.call {
dfs_in_order(&mut find, local, local.entry_block());
if find.found {
let descriptor = interpreter
.interpret_closure_descriptor(id, module, &mut element_removal_list)
.unwrap();
func_to_descriptor.insert(id, (call, Descriptor::decode(descriptor)));
func_to_descriptor.insert(id, Descriptor::decode(descriptor));
}
}
@ -150,7 +146,7 @@ impl WasmBindgenDescriptorsSection {
// freshly manufactured import. Save off the type of this import in
// ourselves, and then we're good to go.
let ty = module.funcs.get(wbindgen_describe_closure).ty();
for (func, (call_instr, descriptor)) in func_to_descriptor {
for (func, descriptor) in func_to_descriptor {
let import_name = format!("__wbindgen_closure_wrapper{}", func.index());
let (id, import_id) =
module.add_import_func("__wbindgen_placeholder__", &import_name, ty);
@ -160,37 +156,42 @@ impl WasmBindgenDescriptorsSection {
walrus::FunctionKind::Local(l) => l,
_ => unreachable!(),
};
let call = local.get_mut(call_instr).unwrap_call_mut();
assert_eq!(call.func, wbindgen_describe_closure);
call.func = id;
let entry = local.entry_block();
dfs_pre_order_mut(
&mut UpdateDescribeClosure {
wbindgen_describe_closure,
replacement: id,
},
local,
entry,
);
self.closure_imports
.insert(import_id, descriptor.unwrap_closure());
}
return Ok(());
struct FindDescribeClosure<'a> {
func: &'a LocalFunction,
struct FindDescribeClosure {
wbindgen_describe_closure: FunctionId,
cur: ExprId,
call: Option<ExprId>,
}
impl<'a> Visitor<'a> for FindDescribeClosure<'a> {
fn local_function(&self) -> &'a LocalFunction {
self.func
}
fn visit_expr_id(&mut self, id: &ExprId) {
let prev = mem::replace(&mut self.cur, *id);
id.visit(self);
self.cur = prev;
found: bool,
}
impl<'a> Visitor<'a> for FindDescribeClosure {
fn visit_call(&mut self, call: &Call) {
call.visit(self);
if call.func == self.wbindgen_describe_closure {
assert!(self.call.is_none());
self.call = Some(self.cur);
self.found = true;
}
}
}
struct UpdateDescribeClosure {
wbindgen_describe_closure: FunctionId,
replacement: FunctionId,
}
impl<'a> VisitorMut for UpdateDescribeClosure {
fn visit_call_mut(&mut self, call: &mut Call) {
if call.func == self.wbindgen_describe_closure {
call.func = self.replacement;
}
}
}

View File

@ -644,10 +644,9 @@ impl<'a> Context<'a> {
self.module.start = Some(match self.module.start {
Some(prev_start) => {
let mut builder = walrus::FunctionBuilder::new();
let call_init = builder.call(import, Box::new([]));
let call_prev = builder.call(prev_start, Box::new([]));
builder.finish(ty, Vec::new(), vec![call_init, call_prev], self.module)
let mut builder = walrus::FunctionBuilder::new(&mut self.module.types, &[], &[]);
builder.func_body().call(import).call(prev_start);
builder.finish(Vec::new(), &mut self.module.funcs)
}
None => import,
});
@ -827,11 +826,9 @@ impl<'a> Context<'a> {
// because the start function currently only shows up when it's injected
// through thread/anyref transforms. These injected start functions need
// to happen before user code, so we always schedule them first.
let mut builder = walrus::FunctionBuilder::new();
let call1 = builder.call(prev_start, Box::new([]));
let call2 = builder.call(id, Box::new([]));
let ty = self.module.funcs.get(id).ty();
let new_start = builder.finish(ty, Vec::new(), vec![call1, call2], self.module);
let mut builder = walrus::FunctionBuilder::new(&mut self.module.types, &[], &[]);
builder.func_body().call(prev_start).call(id);
let new_start = builder.finish(Vec::new(), &mut self.module.funcs);
self.module.start = Some(new_start);
Ok(())
}

View File

@ -5,7 +5,7 @@ use std::mem;
use failure::{bail, format_err, Error};
use walrus::ir::Value;
use walrus::{DataId, FunctionId, InitExpr, LocalFunction, ValType};
use walrus::{DataId, FunctionId, InitExpr, ValType};
use walrus::{ExportItem, GlobalId, GlobalKind, ImportKind, MemoryId, Module};
const PAGE_SIZE: u32 = 1 << 16;
@ -365,13 +365,13 @@ fn inject_start(
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new();
let mut exprs = Vec::new();
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
let addr = builder.i32_const(addr as i32);
let one = builder.i32_const(1);
let thread_id = builder.atomic_rmw(
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
@ -379,92 +379,72 @@ fn inject_start(
align: 4,
offset: 0,
},
addr,
one,
);
let thread_id = builder.local_tee(local, thread_id);
let global_set = builder.global_set(globals.thread_id, thread_id);
exprs.push(global_set);
)
.local_tee(local)
.global_set(globals.thread_id);
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
//
let thread_id_is_nonzero = builder.local_get(local);
body.local_get(local);
body.if_else(
Box::new([]),
Box::new([]),
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
let mut block = builder.if_else_block(Box::new([]), Box::new([]));
|body| {
if let Some(stack_pointer) = stack_pointer {
// local0 = grow_memory(stack_size);
let grow_amount = block.i32_const((stack_size / PAGE_SIZE) as i32);
let memory_growth = block.memory_grow(memory, grow_amount);
let set_local = block.local_set(local, memory_growth);
block.expr(set_local);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
let if_negative_trap = {
let mut block = block.block(Box::new([]), Box::new([]));
let lhs = block.local_get(local);
let rhs = block.i32_const(-1);
let condition = block.binop(BinaryOp::I32Ne, lhs, rhs);
let id = block.id();
let br_if = block.br_if(condition, id, Box::new([]));
block.expr(br_if);
let unreachable = block.unreachable();
block.expr(unreachable);
id
};
block.expr(if_negative_trap.into());
body.block(Box::new([]), Box::new([]), |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
let get_local = block.local_get(local);
let page_size = block.i32_const(PAGE_SIZE as i32);
let sp_base = block.binop(BinaryOp::I32Mul, get_local, page_size);
let stack_size = block.i32_const(stack_size as i32);
let sp = block.binop(BinaryOp::I32Add, sp_base, stack_size);
let set_stack_pointer = block.global_set(stack_pointer, sp);
block.expr(set_stack_pointer);
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
}
let if_nonzero_block = block.id();
drop(block);
},
// If the thread ID is zero then we can skip the update of the stack
// pointer as we know our stack pointer is valid. We need to initialize
// memory, however, so do that here.
let if_zero_block = {
let mut block = builder.if_else_block(Box::new([]), Box::new([]));
|body| {
match &memory_init {
InitMemory::Segments(segments) => {
for segment in segments {
let zero = block.i32_const(0);
let offset = match segment.offset {
InitExpr::Global(id) => block.global_get(id),
InitExpr::Value(v) => block.const_(v),
// let zero = block.i32_const(0);
match segment.offset {
InitExpr::Global(id) => body.global_get(id),
InitExpr::Value(v) => body.const_(v),
};
let len = block.i32_const(segment.len as i32);
let init = block.memory_init(memory, segment.id, offset, zero, len);
block.expr(init);
let drop = block.data_drop(segment.id);
block.expr(drop);
body.i32_const(0)
.i32_const(segment.len as i32)
.memory_init(memory, segment.id)
.data_drop(segment.id);
}
}
InitMemory::Call {
wasm_init_memory, ..
} => {
let call = block.call(*wasm_init_memory, Box::new([]));
block.expr(call);
body.call(*wasm_init_memory);
}
}
block.id()
};
let block = builder.if_else(thread_id_is_nonzero, if_nonzero_block, if_zero_block);
exprs.push(block);
},
);
// If we have these globals then we're using the new thread local system
// implemented in LLVM, which means that `__wasm_init_tls` needs to be
@ -477,21 +457,19 @@ fn inject_start(
} = memory_init
{
let malloc = find_wbindgen_malloc(module)?;
let size = builder.i32_const(tls_size as i32);
let ptr = builder.call(malloc, Box::new([size]));
let block = builder.call(wasm_init_tls, Box::new([ptr]));
exprs.push(block);
body.i32_const(tls_size as i32)
.call(malloc)
.call(wasm_init_tls);
}
// If a start function previously existed we're done with our own
// initialization so delegate to them now.
if let Some(id) = module.start.take() {
exprs.push(builder.call(id, Box::new([])));
body.call(id);
}
// Finish off our newly generated function.
let ty = module.types.add(&[], &[]);
let id = builder.finish(ty, Vec::new(), exprs, module);
let id = builder.finish(Vec::new(), &mut module.funcs);
// ... and finally flag it as the new start function
module.start = Some(id);
@ -559,54 +537,39 @@ fn implement_thread_intrinsics(module: &mut Module, globals: &Globals) -> Result
struct Visitor<'a> {
map: &'a HashMap<FunctionId, Intrinsic>,
globals: &'a Globals,
func: &'a mut LocalFunction,
}
module.funcs.iter_local_mut().for_each(|(_id, func)| {
let mut entry = func.entry_block();
Visitor {
map: &map,
globals,
func,
}
.visit_block_id_mut(&mut entry);
let entry = func.entry_block();
dfs_pre_order_mut(&mut Visitor { map: &map, globals }, func, entry);
});
impl VisitorMut for Visitor<'_> {
fn local_function_mut(&mut self) -> &mut LocalFunction {
self.func
}
fn visit_expr_mut(&mut self, expr: &mut Expr) {
let call = match expr {
Expr::Call(e) => e,
other => return other.visit_mut(self),
fn visit_instr_mut(&mut self, instr: &mut Instr) {
let call = match instr {
Instr::Call(e) => e,
_ => return,
};
match self.map.get(&call.func) {
Some(Intrinsic::GetThreadId) => {
assert!(call.args.is_empty());
*expr = GlobalGet {
*instr = GlobalGet {
global: self.globals.thread_id,
}
.into();
}
Some(Intrinsic::GetTcb) => {
assert!(call.args.is_empty());
*expr = GlobalGet {
*instr = GlobalGet {
global: self.globals.thread_tcb,
}
.into();
}
Some(Intrinsic::SetTcb) => {
assert_eq!(call.args.len(), 1);
call.args[0].visit_mut(self);
*expr = GlobalSet {
*instr = GlobalSet {
global: self.globals.thread_tcb,
value: call.args[0],
}
.into();
}
None => call.visit_mut(self),
None => {}
}
}
}

View File

@ -19,8 +19,8 @@
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::ExprId;
use walrus::{FunctionId, LocalFunction, LocalId, Module, TableId};
use walrus::ir::Instr;
use walrus::{FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
@ -46,6 +46,7 @@ pub struct Interpreter {
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
@ -235,7 +236,6 @@ impl Interpreter {
let mut frame = Frame {
module,
local,
interp: self,
locals: BTreeMap::new(),
done: false,
@ -246,121 +246,96 @@ impl Interpreter {
frame.locals.insert(*arg, *val);
}
if block.exprs.len() > 0 {
for expr in block.exprs[..block.exprs.len() - 1].iter() {
let ret = frame.eval(*expr);
for instr in block.instrs.iter() {
frame.eval(instr);
if frame.done {
return ret;
break;
}
}
}
block.exprs.last().and_then(|e| frame.eval(*e))
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
local: &'a LocalFunction,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn local(&self, id: LocalId) -> i32 {
self.locals.get(&id).cloned().unwrap_or(0)
}
fn eval(&mut self, expr: ExprId) -> Option<i32> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
match self.local.get(expr) {
Expr::Const(c) => match c.value {
Value::I32(n) => Some(n),
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Expr::LocalGet(e) => Some(self.local(e.local)),
Expr::LocalSet(e) => {
let val = self.eval(e.value).expect("must eval to i32");
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
None
}
// Blindly assume all globals are the stack pointer
Expr::GlobalGet(_) => Some(self.interp.sp),
Expr::GlobalSet(e) => {
let val = self.eval(e.value).expect("must eval to i32");
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
None
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Expr::Binop(e) => {
let lhs = self.eval(e.lhs).expect("must eval to i32");
let rhs = self.eval(e.rhs).expect("must eval to i32");
match e.op {
BinaryOp::I32Sub => Some(lhs - rhs),
BinaryOp::I32Add => Some(lhs + rhs),
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
}
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Expr::Load(e) => {
let address = self.eval(e.address).expect("must eval to i32");
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
Some(self.interp.mem[address as usize / 4])
stack.push(self.interp.mem[address as usize / 4])
}
Expr::Store(e) => {
let address = self.eval(e.address).expect("must eval to i32");
let value = self.eval(e.value).expect("must eval to i32");
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
None
}
Expr::Return(e) => {
Instr::Return(_) => {
log::debug!("return");
self.done = true;
assert!(e.values.len() <= 1);
e.values.get(0).and_then(|id| self.eval(*id))
}
Expr::Drop(e) => {
Instr::Drop(_) => {
log::debug!("drop");
self.eval(e.expr);
None
stack.pop().unwrap();
}
Expr::WithSideEffects(e) => {
log::debug!("side effects");
for x in e.before.iter() {
self.eval(*x);
}
let ret = self.eval(e.value);
for x in e.after.iter() {
self.eval(*x);
}
return ret;
}
Expr::Call(e) => {
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
assert_eq!(e.args.len(), 1);
let val = self.eval(e.args[0]).expect("must eval to i32");
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
None
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
@ -368,21 +343,20 @@ impl Frame<'_> {
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
assert_eq!(e.args.len(), 3);
let val = self.eval(e.args[2]).expect("must eval to i32");
let val = stack.pop().unwrap();
drop(stack.pop());
drop(stack.pop());
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
Some(0)
stack.push(0)
// ... otherwise this is a normal call so we recurse.
} else {
let args = e
.args
.iter()
.map(|e| self.eval(*e).expect("must eval to i32"))
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
None
}
}