360: Single-pass fixes and optimizations. r=syrusakbary a=losfair



Co-authored-by: losfair <zhy20000919@hotmail.com>
Co-authored-by: Lachlan Sneff <lachlan.sneff@gmail.com>
This commit is contained in:
bors[bot] 2019-04-18 15:04:09 +00:00
commit ccad8874e9
4 changed files with 374 additions and 324 deletions

View File

@ -8,22 +8,40 @@ use wasmparser::{Operator, Type as WpType};
pub trait ModuleCodeGenerator<FCG: FunctionCodeGenerator, RM: RunnableModule> {
fn check_precondition(&mut self, module_info: &ModuleInfo) -> Result<(), CodegenError>;
/// Creates a new function and returns the function-scope code generator for it.
fn next_function(&mut self) -> Result<&mut FCG, CodegenError>;
fn finalize(self, module_info: &ModuleInfo) -> Result<RM, CodegenError>;
fn feed_signatures(&mut self, signatures: Map<SigIndex, FuncSig>) -> Result<(), CodegenError>;
/// Sets function signatures.
fn feed_function_signatures(
&mut self,
assoc: Map<FuncIndex, SigIndex>,
) -> Result<(), CodegenError>;
/// Adds an import function.
fn feed_import_function(&mut self) -> Result<(), CodegenError>;
}
/// The function-scope code generator trait.
pub trait FunctionCodeGenerator {
/// Sets the return type.
fn feed_return(&mut self, ty: WpType) -> Result<(), CodegenError>;
/// Adds a parameter to the function.
fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError>;
/// Adds `n` locals to the function.
fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError>;
/// Called before the first call to `feed_opcode`.
fn begin_body(&mut self) -> Result<(), CodegenError>;
fn feed_opcode(&mut self, op: Operator, module_info: &ModuleInfo) -> Result<(), CodegenError>;
/// Called for each operator.
fn feed_opcode(&mut self, op: &Operator, module_info: &ModuleInfo) -> Result<(), CodegenError>;
/// Finalizes the function.
fn finalize(&mut self) -> Result<(), CodegenError>;
}

View File

@ -26,6 +26,7 @@ use wasmer_runtime_core::{
use wasmparser::{Operator, Type as WpType};
lazy_static! {
/// Performs a System V call to `target` with [stack_top..stack_base] as the argument list, from right to left.
static ref CONSTRUCT_STACK_AND_CALL_WASM: unsafe extern "C" fn (stack_top: *const u64, stack_base: *const u64, ctx: *mut vm::Ctx, target: *const vm::Func) -> u64 = {
let mut assembler = Assembler::new().unwrap();
let offset = assembler.offset();
@ -403,6 +404,9 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext> for X64ModuleCode
a.emit_label(label);
labels.insert(id, (label, Some(offset)));
// Emits a tail call trampoline that loads the address of the target import function
// from Ctx and jumps to it.
a.emit_mov(
Size::S64,
Location::Memory(GPR::RDI, vm::Ctx::offset_imported_funcs() as i32),
@ -426,6 +430,7 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext> for X64ModuleCode
}
impl X64FunctionCode {
/// Moves `loc` to a valid location for `div`/`idiv`.
fn emit_relaxed_xdiv(
a: &mut Assembler,
_m: &mut Machine,
@ -444,6 +449,7 @@ impl X64FunctionCode {
}
}
/// Moves `src` and `dst` to valid locations for `movzx`/`movsx`.
fn emit_relaxed_zx_sx(
a: &mut Assembler,
m: &mut Machine,
@ -481,6 +487,7 @@ impl X64FunctionCode {
m.release_temp_gpr(tmp_src);
}
/// Moves `src` and `dst` to valid locations for generic instructions.
fn emit_relaxed_binop(
a: &mut Assembler,
m: &mut Machine,
@ -546,6 +553,7 @@ impl X64FunctionCode {
}
}
/// Moves `src1` and `src2` to valid locations and possibly adds a layer of indirection for `dst` for AVX instructions.
fn emit_relaxed_avx(
a: &mut Assembler,
m: &mut Machine,
@ -564,6 +572,7 @@ impl X64FunctionCode {
)
}
/// Moves `src1` and `src2` to valid locations and possibly adds a layer of indirection for `dst` for AVX instructions.
fn emit_relaxed_avx_base<F: FnOnce(&mut Assembler, &mut Machine, XMM, XMMOrMemory, XMM)>(
a: &mut Assembler,
m: &mut Machine,
@ -633,6 +642,7 @@ impl X64FunctionCode {
m.release_temp_xmm(tmp1);
}
/// I32 binary operation with both operands popped from the virtual stack.
fn emit_binop_i32(
a: &mut Assembler,
m: &mut Machine,
@ -671,6 +681,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// I64 binary operation with both operands popped from the virtual stack.
fn emit_binop_i64(
a: &mut Assembler,
m: &mut Machine,
@ -709,6 +720,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// I32 comparison with `loc_b` from input.
fn emit_cmpop_i32_dynamic_b(
a: &mut Assembler,
m: &mut Machine,
@ -739,6 +751,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// I32 comparison with both operands popped from the virtual stack.
fn emit_cmpop_i32(
a: &mut Assembler,
m: &mut Machine,
@ -749,6 +762,7 @@ impl X64FunctionCode {
Self::emit_cmpop_i32_dynamic_b(a, m, value_stack, c, loc_b);
}
/// I64 comparison with `loc_b` from input.
fn emit_cmpop_i64_dynamic_b(
a: &mut Assembler,
m: &mut Machine,
@ -779,6 +793,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// I64 comparison with both operands popped from the virtual stack.
fn emit_cmpop_i64(
a: &mut Assembler,
m: &mut Machine,
@ -789,6 +804,7 @@ impl X64FunctionCode {
Self::emit_cmpop_i64_dynamic_b(a, m, value_stack, c, loc_b);
}
/// I32 `lzcnt`/`tzcnt`/`popcnt` with operand popped from the virtual stack.
fn emit_xcnt_i32(
a: &mut Assembler,
m: &mut Machine,
@ -827,6 +843,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// I64 `lzcnt`/`tzcnt`/`popcnt` with operand popped from the virtual stack.
fn emit_xcnt_i64(
a: &mut Assembler,
m: &mut Machine,
@ -865,6 +882,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// I32 shift with both operands popped from the virtual stack.
fn emit_shift_i32(
a: &mut Assembler,
m: &mut Machine,
@ -885,6 +903,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// I64 shift with both operands popped from the virtual stack.
fn emit_shift_i64(
a: &mut Assembler,
m: &mut Machine,
@ -905,6 +924,7 @@ impl X64FunctionCode {
value_stack.push((ret, LocalOrTemp::Temp));
}
/// Floating point (AVX) binary operation with both operands popped from the virtual stack.
fn emit_fp_binop_avx(
a: &mut Assembler,
m: &mut Machine,
@ -919,6 +939,7 @@ impl X64FunctionCode {
Self::emit_relaxed_avx(a, m, f, loc_a, loc_b, ret);
}
/// Floating point (AVX) comparison with both operands popped from the virtual stack.
fn emit_fp_cmpop_avx(
a: &mut Assembler,
m: &mut Machine,
@ -934,6 +955,7 @@ impl X64FunctionCode {
a.emit_and(Size::S32, Location::Imm32(1), ret); // FIXME: Why?
}
/// Floating point (AVX) binary operation with both operands popped from the virtual stack.
fn emit_fp_unop_avx(
a: &mut Assembler,
m: &mut Machine,
@ -947,7 +969,9 @@ impl X64FunctionCode {
Self::emit_relaxed_avx(a, m, f, loc, loc, ret);
}
// This function must not use RAX before `cb` is called.
/// Emits a System V call sequence.
///
/// This function must not use RAX before `cb` is called.
fn emit_call_sysv<I: Iterator<Item = Location>, F: FnOnce(&mut Assembler)>(
a: &mut Assembler,
m: &mut Machine,
@ -1103,6 +1127,7 @@ impl X64FunctionCode {
}
}
/// Emits a System V call sequence, specialized for labels as the call target.
fn emit_call_sysv_label<I: Iterator<Item = Location>>(
a: &mut Assembler,
m: &mut Machine,
@ -1112,6 +1137,7 @@ impl X64FunctionCode {
Self::emit_call_sysv(a, m, |a| a.emit_call_label(label), params)
}
/// Emits a memory operation.
fn emit_memory_op<F: FnOnce(&mut Assembler, &mut Machine, GPR)>(
module_info: &ModuleInfo,
a: &mut Assembler,
@ -1125,6 +1151,7 @@ impl X64FunctionCode {
let tmp_base = m.acquire_temp_gpr().unwrap();
let tmp_bound = m.acquire_temp_gpr().unwrap();
// Loads both base and bound into temporary registers.
a.emit_mov(
Size::S64,
Location::Memory(
@ -1151,8 +1178,11 @@ impl X64FunctionCode {
Location::Memory(tmp_base, LocalMemory::offset_base() as i32),
Location::GPR(tmp_base),
);
// Adds base to bound so `tmp_bound` now holds the end of linear memory.
a.emit_add(Size::S64, Location::GPR(tmp_base), Location::GPR(tmp_bound));
// If the memory is dynamic, we need to do bound checking at runtime.
let mem_desc = match MemoryIndex::new(0).local_or_import(module_info) {
LocalOrImport::Local(local_mem_index) => &module_info.memories[local_mem_index],
LocalOrImport::Import(import_mem_index) => {
@ -1166,11 +1196,27 @@ impl X64FunctionCode {
if need_check {
a.emit_mov(Size::S32, addr, Location::GPR(tmp_addr));
a.emit_add(
Size::S64,
Location::Imm32((offset + value_size) as u32),
Location::GPR(tmp_addr),
);
// This branch is used for emitting "faster" code for the special case of (offset + value_size) not exceeding u32 range.
match (offset as u32).checked_add(value_size as u32) {
Some(x) => {
a.emit_add(Size::S64, Location::Imm32(x), Location::GPR(tmp_addr));
}
None => {
a.emit_add(
Size::S64,
Location::Imm32(offset as u32),
Location::GPR(tmp_addr),
);
a.emit_add(
Size::S64,
Location::Imm32(value_size as u32),
Location::GPR(tmp_addr),
);
}
}
// Trap if the end address of the requested area is above that of the linear memory.
a.emit_add(Size::S64, Location::GPR(tmp_base), Location::GPR(tmp_addr));
a.emit_cmp(Size::S64, Location::GPR(tmp_bound), Location::GPR(tmp_addr));
a.emit_conditional_trap(Condition::Above);
@ -1178,6 +1224,7 @@ impl X64FunctionCode {
m.release_temp_gpr(tmp_bound);
// Calculates the real address, and loads from it.
a.emit_mov(Size::S32, addr, Location::GPR(tmp_addr));
a.emit_add(
Size::S64,
@ -1192,6 +1239,7 @@ impl X64FunctionCode {
m.release_temp_gpr(tmp_addr);
}
// Checks for underflow/overflow/nan before IxxTrunc{U/S}F32.
fn emit_f32_int_conv_check(
a: &mut Assembler,
m: &mut Machine,
@ -1211,18 +1259,18 @@ impl X64FunctionCode {
// Underflow.
a.emit_mov(Size::S32, Location::Imm32(lower_bound), Location::GPR(tmp));
a.emit_mov(Size::S32, Location::GPR(tmp), Location::XMM(tmp_x));
a.emit_vcmpltss(tmp_x, XMMOrMemory::XMM(reg), tmp_x);
a.emit_vcmpless(reg, XMMOrMemory::XMM(tmp_x), tmp_x);
a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp));
a.emit_cmp(Size::S32, Location::Imm32(1), Location::GPR(tmp));
a.emit_jmp(Condition::Equal, trap);
a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp));
a.emit_jmp(Condition::NotEqual, trap);
// Overflow.
a.emit_mov(Size::S32, Location::Imm32(upper_bound), Location::GPR(tmp));
a.emit_mov(Size::S32, Location::GPR(tmp), Location::XMM(tmp_x));
a.emit_vcmpgtss(tmp_x, XMMOrMemory::XMM(reg), tmp_x);
a.emit_vcmpgess(reg, XMMOrMemory::XMM(tmp_x), tmp_x);
a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp));
a.emit_cmp(Size::S32, Location::Imm32(1), Location::GPR(tmp));
a.emit_jmp(Condition::Equal, trap);
a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp));
a.emit_jmp(Condition::NotEqual, trap);
// NaN.
a.emit_vcmpeqss(reg, XMMOrMemory::XMM(reg), tmp_x);
@ -1239,6 +1287,7 @@ impl X64FunctionCode {
m.release_temp_gpr(tmp);
}
// Checks for underflow/overflow/nan before IxxTrunc{U/S}F64.
fn emit_f64_int_conv_check(
a: &mut Assembler,
m: &mut Machine,
@ -1258,18 +1307,18 @@ impl X64FunctionCode {
// Underflow.
a.emit_mov(Size::S64, Location::Imm64(lower_bound), Location::GPR(tmp));
a.emit_mov(Size::S64, Location::GPR(tmp), Location::XMM(tmp_x));
a.emit_vcmpltsd(tmp_x, XMMOrMemory::XMM(reg), tmp_x);
a.emit_vcmplesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x);
a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp));
a.emit_cmp(Size::S32, Location::Imm32(1), Location::GPR(tmp));
a.emit_jmp(Condition::Equal, trap);
a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp));
a.emit_jmp(Condition::NotEqual, trap);
// Overflow.
a.emit_mov(Size::S64, Location::Imm64(upper_bound), Location::GPR(tmp));
a.emit_mov(Size::S64, Location::GPR(tmp), Location::XMM(tmp_x));
a.emit_vcmpgtsd(tmp_x, XMMOrMemory::XMM(reg), tmp_x);
a.emit_vcmpgesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x);
a.emit_mov(Size::S32, Location::XMM(tmp_x), Location::GPR(tmp));
a.emit_cmp(Size::S32, Location::Imm32(1), Location::GPR(tmp));
a.emit_jmp(Condition::Equal, trap);
a.emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp));
a.emit_jmp(Condition::NotEqual, trap);
// NaN.
a.emit_vcmpeqsd(reg, XMMOrMemory::XMM(reg), tmp_x);
@ -1329,13 +1378,13 @@ impl FunctionCodeGenerator for X64FunctionCode {
Ok(())
}
fn feed_opcode(&mut self, op: Operator, module_info: &ModuleInfo) -> Result<(), CodegenError> {
fn feed_opcode(&mut self, op: &Operator, module_info: &ModuleInfo) -> Result<(), CodegenError> {
//println!("{:?} {}", op, self.value_stack.len());
let was_unreachable;
if self.unreachable_depth > 0 {
was_unreachable = true;
match op {
match *op {
Operator::Block { .. } | Operator::Loop { .. } | Operator::If { .. } => {
self.unreachable_depth += 1;
}
@ -1362,7 +1411,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
}
let a = self.assembler.as_mut().unwrap();
match op {
match *op {
Operator::GetGlobal { global_index } => {
let global_index = global_index as usize;
@ -2633,16 +2682,28 @@ impl FunctionCodeGenerator for X64FunctionCode {
let tmp_out = self.machine.acquire_temp_gpr().unwrap();
let tmp_in = self.machine.acquire_temp_xmm().unwrap();
a.emit_mov(Size::S64, loc, Location::XMM(tmp_in));
let real_in = match loc {
Location::Imm32(_) | Location::Imm64(_) => {
a.emit_mov(Size::S64, loc, Location::GPR(tmp_out));
a.emit_mov(Size::S64, Location::GPR(tmp_out), Location::XMM(tmp_in));
tmp_in
}
Location::XMM(x) => x,
_ => {
a.emit_mov(Size::S64, loc, Location::XMM(tmp_in));
tmp_in
}
};
Self::emit_f64_int_conv_check(
a,
&mut self.machine,
tmp_in,
real_in,
-2147483649.0,
2147483648.0,
);
a.emit_cvttsd2si_32(XMMOrMemory::XMM(tmp_in), tmp_out);
a.emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out);
a.emit_mov(Size::S32, Location::GPR(tmp_out), ret);
self.machine.release_temp_xmm(tmp_in);
@ -3252,7 +3313,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
self.value_stack.push((ret, LocalOrTemp::Temp));
a.emit_mov(Size::S64, Location::GPR(GPR::RAX), ret);
}
Operator::I32Load { memarg } => {
Operator::I32Load { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I32], false)[0];
@ -3277,7 +3338,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::F32Load { memarg } => {
Operator::F32Load { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::F32], false)[0];
@ -3302,7 +3363,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I32Load8U { memarg } => {
Operator::I32Load8U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I32], false)[0];
@ -3328,7 +3389,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I32Load8S { memarg } => {
Operator::I32Load8S { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I32], false)[0];
@ -3354,7 +3415,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I32Load16U { memarg } => {
Operator::I32Load16U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I32], false)[0];
@ -3380,7 +3441,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I32Load16S { memarg } => {
Operator::I32Load16S { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I32], false)[0];
@ -3406,7 +3467,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I32Store { memarg } => {
Operator::I32Store { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3431,7 +3492,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::F32Store { memarg } => {
Operator::F32Store { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3456,7 +3517,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I32Store8 { memarg } => {
Operator::I32Store8 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3481,7 +3542,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I32Store16 { memarg } => {
Operator::I32Store16 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3506,7 +3567,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Load { memarg } => {
Operator::I64Load { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I64], false)[0];
@ -3531,7 +3592,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::F64Load { memarg } => {
Operator::F64Load { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::F64], false)[0];
@ -3556,7 +3617,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Load8U { memarg } => {
Operator::I64Load8U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I64], false)[0];
@ -3582,7 +3643,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Load8S { memarg } => {
Operator::I64Load8S { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I64], false)[0];
@ -3608,7 +3669,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Load16U { memarg } => {
Operator::I64Load16U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I64], false)[0];
@ -3634,7 +3695,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Load16S { memarg } => {
Operator::I64Load16S { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I64], false)[0];
@ -3660,7 +3721,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Load32U { memarg } => {
Operator::I64Load32U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I64], false)[0];
@ -3691,7 +3752,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Load32S { memarg } => {
Operator::I64Load32S { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(a, &[WpType::I64], false)[0];
@ -3717,7 +3778,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Store { memarg } => {
Operator::I64Store { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3742,7 +3803,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::F64Store { memarg } => {
Operator::F64Store { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3767,7 +3828,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Store8 { memarg } => {
Operator::I64Store8 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3792,7 +3853,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Store16 { memarg } => {
Operator::I64Store16 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3817,7 +3878,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
},
);
}
Operator::I64Store32 { memarg } => {
Operator::I64Store32 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
@ -3917,7 +3978,7 @@ impl FunctionCodeGenerator for X64FunctionCode {
a.emit_label(after);
}
Operator::BrTable { table } => {
Operator::BrTable { ref table } => {
let (targets, default_target) = table.read_table().unwrap();
let cond =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());

View File

@ -502,18 +502,42 @@ impl Emitter for Assembler {
(Size::S8, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; mov Rb(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S8, Location::Imm32(src), Location::GPR(dst)) => {
dynasm!(self ; mov Rb(dst as u8), src as i8);
}
(Size::S8, Location::Imm64(src), Location::GPR(dst)) => {
dynasm!(self ; mov Rb(dst as u8), src as i8);
}
(Size::S8, Location::Imm32(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov BYTE [Rq(dst as u8) + disp], src as i8);
}
(Size::S8, Location::Imm64(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov BYTE [Rq(dst as u8) + disp], src as i8);
}
(Size::S16, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov [Rq(dst as u8) + disp], Rw(src as u8));
}
(Size::S16, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; mov Rw(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S16, Location::Imm32(src), Location::GPR(dst)) => {
dynasm!(self ; mov Rw(dst as u8), src as i16);
}
(Size::S16, Location::Imm64(src), Location::GPR(dst)) => {
dynasm!(self ; mov Rw(dst as u8), src as i16);
}
(Size::S16, Location::Imm32(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov WORD [Rq(dst as u8) + disp], src as i16);
}
(Size::S16, Location::Imm64(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov WORD [Rq(dst as u8) + disp], src as i16);
}
(Size::S32, Location::Imm64(src), Location::GPR(dst)) => {
dynasm!(self ; mov Rd(dst as u8), src as i32);
}
(Size::S32, Location::Imm64(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov DWORD [Rq(dst as u8) + disp], src as i32);
}
(Size::S32, Location::GPR(src), Location::XMM(dst)) => {
dynasm!(self ; movd Rx(dst as u8), Rd(src as u8));
}

View File

@ -38,30 +38,6 @@ impl From<CodegenError> for LoadError {
}
}
fn validate(bytes: &[u8]) -> Result<(), LoadError> {
let mut parser = wasmparser::ValidatingParser::new(
bytes,
Some(wasmparser::ValidatingParserConfig {
operator_config: wasmparser::OperatorValidatorConfig {
enable_threads: false,
enable_reference_types: false,
enable_simd: false,
enable_bulk_memory: false,
},
mutable_global_imports: false,
}),
);
loop {
let state = parser.read();
match *state {
wasmparser::ParserState::EndWasm => break Ok(()),
wasmparser::ParserState::Error(err) => Err(LoadError::Parse(err))?,
_ => {}
}
}
}
pub fn read_module<
MCG: ModuleCodeGenerator<FCG, RM>,
FCG: FunctionCodeGenerator,
@ -72,7 +48,6 @@ pub fn read_module<
mcg: &mut MCG,
compiler_config: &CompilerConfig,
) -> Result<ModuleInfo, LoadError> {
validate(wasm)?;
let mut info = ModuleInfo {
memories: Map::new(),
globals: Map::new(),
@ -102,277 +77,251 @@ pub fn read_module<
custom_sections: HashMap::new(),
};
let mut reader = ModuleReader::new(wasm)?;
let mut parser = wasmparser::ValidatingParser::new(
wasm,
Some(wasmparser::ValidatingParserConfig {
operator_config: wasmparser::OperatorValidatorConfig {
enable_threads: false,
enable_reference_types: false,
enable_simd: false,
enable_bulk_memory: false,
},
mutable_global_imports: false,
}),
);
let mut namespace_builder = Some(StringTableBuilder::new());
let mut name_builder = Some(StringTableBuilder::new());
let mut func_count: usize = ::std::usize::MAX;
loop {
if reader.eof() {
return Ok(info);
}
let section = reader.read()?;
match section.code {
SectionCode::Type => {
let type_reader = section.get_type_section_reader()?;
for ty in type_reader {
let ty = ty?;
info.signatures.push(func_type_to_func_sig(ty)?);
}
mcg.feed_signatures(info.signatures.clone())?;
use wasmparser::ParserState;
let state = parser.read();
match *state {
ParserState::EndWasm => break Ok(info),
ParserState::Error(err) => Err(LoadError::Parse(err))?,
ParserState::TypeSectionEntry(ref ty) => {
info.signatures.push(func_type_to_func_sig(ty)?);
}
SectionCode::Import => {
let import_reader = section.get_import_section_reader()?;
let mut namespace_builder = StringTableBuilder::new();
let mut name_builder = StringTableBuilder::new();
ParserState::ImportSectionEntry { module, field, ty } => {
let namespace_index = namespace_builder.as_mut().unwrap().register(module);
let name_index = name_builder.as_mut().unwrap().register(field);
let import_name = ImportName {
namespace_index,
name_index,
};
for import in import_reader {
let Import { module, field, ty } = import?;
match ty {
ImportSectionEntryType::Function(sigindex) => {
let sigindex = SigIndex::new(sigindex as usize);
info.imported_functions.push(import_name);
info.func_assoc.push(sigindex);
mcg.feed_import_function()?;
}
ImportSectionEntryType::Table(table_ty) => {
assert_eq!(table_ty.element_type, WpType::AnyFunc);
let table_desc = TableDescriptor {
element: ElementType::Anyfunc,
minimum: table_ty.limits.initial,
maximum: table_ty.limits.maximum,
};
let namespace_index = namespace_builder.register(module);
let name_index = name_builder.register(field);
let import_name = ImportName {
namespace_index,
name_index,
};
match ty {
ImportSectionEntryType::Function(sigindex) => {
let sigindex = SigIndex::new(sigindex as usize);
info.imported_functions.push(import_name);
info.func_assoc.push(sigindex);
mcg.feed_import_function()?;
}
ImportSectionEntryType::Table(table_ty) => {
assert_eq!(table_ty.element_type, WpType::AnyFunc);
let table_desc = TableDescriptor {
element: ElementType::Anyfunc,
minimum: table_ty.limits.initial,
maximum: table_ty.limits.maximum,
};
info.imported_tables.push((import_name, table_desc));
}
ImportSectionEntryType::Memory(memory_ty) => {
let mem_desc = MemoryDescriptor {
minimum: Pages(memory_ty.limits.initial),
maximum: memory_ty.limits.maximum.map(|max| Pages(max)),
shared: memory_ty.shared,
};
info.imported_memories.push((import_name, mem_desc));
}
ImportSectionEntryType::Global(global_ty) => {
let global_desc = GlobalDescriptor {
mutable: global_ty.mutable,
ty: wp_type_to_type(global_ty.content_type)?,
};
info.imported_globals.push((import_name, global_desc));
}
info.imported_tables.push((import_name, table_desc));
}
ImportSectionEntryType::Memory(memory_ty) => {
let mem_desc = MemoryDescriptor {
minimum: Pages(memory_ty.limits.initial),
maximum: memory_ty.limits.maximum.map(|max| Pages(max)),
shared: memory_ty.shared,
};
info.imported_memories.push((import_name, mem_desc));
}
ImportSectionEntryType::Global(global_ty) => {
let global_desc = GlobalDescriptor {
mutable: global_ty.mutable,
ty: wp_type_to_type(global_ty.content_type)?,
};
info.imported_globals.push((import_name, global_desc));
}
}
info.namespace_table = namespace_builder.finish();
info.name_table = name_builder.finish();
}
SectionCode::Function => {
let func_decl_reader = section.get_function_section_reader()?;
for sigindex in func_decl_reader {
let sigindex = sigindex?;
let sigindex = SigIndex::new(sigindex as usize);
info.func_assoc.push(sigindex);
}
mcg.feed_function_signatures(info.func_assoc.clone())?;
ParserState::FunctionSectionEntry(sigindex) => {
let sigindex = SigIndex::new(sigindex as usize);
info.func_assoc.push(sigindex);
}
SectionCode::Table => {
let table_decl_reader = section.get_table_section_reader()?;
ParserState::TableSectionEntry(table_ty) => {
let table_desc = TableDescriptor {
element: ElementType::Anyfunc,
minimum: table_ty.limits.initial,
maximum: table_ty.limits.maximum,
};
for table_ty in table_decl_reader {
let table_ty = table_ty?;
let table_desc = TableDescriptor {
element: ElementType::Anyfunc,
minimum: table_ty.limits.initial,
maximum: table_ty.limits.maximum,
};
info.tables.push(table_desc);
}
info.tables.push(table_desc);
}
SectionCode::Memory => {
let mem_decl_reader = section.get_memory_section_reader()?;
ParserState::MemorySectionEntry(memory_ty) => {
let mem_desc = MemoryDescriptor {
minimum: Pages(memory_ty.limits.initial),
maximum: memory_ty.limits.maximum.map(|max| Pages(max)),
shared: memory_ty.shared,
};
for memory_ty in mem_decl_reader {
let memory_ty = memory_ty?;
let mem_desc = MemoryDescriptor {
minimum: Pages(memory_ty.limits.initial),
maximum: memory_ty.limits.maximum.map(|max| Pages(max)),
shared: memory_ty.shared,
};
info.memories.push(mem_desc);
}
info.memories.push(mem_desc);
}
SectionCode::Global => {
let global_decl_reader = section.get_global_section_reader()?;
ParserState::ExportSectionEntry { field, kind, index } => {
let export_index = match kind {
ExternalKind::Function => ExportIndex::Func(FuncIndex::new(index as usize)),
ExternalKind::Table => ExportIndex::Table(TableIndex::new(index as usize)),
ExternalKind::Memory => ExportIndex::Memory(MemoryIndex::new(index as usize)),
ExternalKind::Global => ExportIndex::Global(GlobalIndex::new(index as usize)),
};
for global in global_decl_reader {
let global = global?;
let desc = GlobalDescriptor {
mutable: global.ty.mutable,
ty: wp_type_to_type(global.ty.content_type)?,
};
let global_init = GlobalInit {
desc,
init: eval_init_expr(&global.init_expr)?,
};
info.globals.push(global_init);
}
info.exports.insert(field.to_string(), export_index);
}
SectionCode::Export => {
let export_reader = section.get_export_section_reader()?;
for export in export_reader {
let Export { field, kind, index } = export?;
let export_index = match kind {
ExternalKind::Function => ExportIndex::Func(FuncIndex::new(index as usize)),
ExternalKind::Table => ExportIndex::Table(TableIndex::new(index as usize)),
ExternalKind::Memory => {
ExportIndex::Memory(MemoryIndex::new(index as usize))
}
ExternalKind::Global => {
ExportIndex::Global(GlobalIndex::new(index as usize))
}
};
info.exports.insert(field.to_string(), export_index);
}
}
SectionCode::Start => {
let start_index = section.get_start_section_content()?;
ParserState::StartSectionEntry(start_index) => {
info.start_func = Some(FuncIndex::new(start_index as usize));
}
SectionCode::Element => {
let element_reader = section.get_element_section_reader()?;
ParserState::BeginFunctionBody { .. } => {
let id = func_count.wrapping_add(1);
func_count = id;
if func_count == 0 {
info.namespace_table = namespace_builder.take().unwrap().finish();
info.name_table = name_builder.take().unwrap().finish();
mcg.feed_signatures(info.signatures.clone())?;
mcg.feed_function_signatures(info.func_assoc.clone())?;
mcg.check_precondition(&info)?;
}
for element in element_reader {
let Element { kind, items } = element?;
let fcg = mcg.next_function()?;
let sig = info
.signatures
.get(
*info
.func_assoc
.get(FuncIndex::new(id as usize + info.imported_functions.len()))
.unwrap(),
)
.unwrap();
for ret in sig.returns() {
fcg.feed_return(type_to_wp_type(*ret))?;
}
for param in sig.params() {
fcg.feed_param(type_to_wp_type(*param))?;
}
match kind {
ElementKind::Active {
table_index,
init_expr,
} => {
let table_index = TableIndex::new(table_index as usize);
let base = eval_init_expr(&init_expr)?;
let items_reader = items.get_items_reader()?;
let mut body_begun = false;
let elements: Vec<_> = items_reader
.into_iter()
.map(|res| res.map(|index| FuncIndex::new(index as usize)))
.collect::<Result<_, _>>()?;
let table_init = TableInitializer {
table_index,
base,
elements,
};
info.elem_initializers.push(table_init);
}
ElementKind::Passive(_ty) => {
return Err(BinaryReaderError {
message: "passive tables are not yet supported",
offset: -1isize as usize,
loop {
let state = parser.read();
match *state {
ParserState::Error(err) => return Err(LoadError::Parse(err)),
ParserState::FunctionBodyLocals { ref locals } => {
for &(count, ty) in locals.iter() {
fcg.feed_local(ty, count as usize)?;
}
.into());
}
}
}
}
SectionCode::Code => {
let mut code_reader = section.get_code_section_reader()?;
if code_reader.get_count() as usize > info.func_assoc.len() {
return Err(BinaryReaderError {
message: "code_reader.get_count() > info.func_assoc.len()",
offset: ::std::usize::MAX,
}
.into());
}
mcg.check_precondition(&info)?;
for i in 0..code_reader.get_count() {
let item = code_reader.read()?;
let fcg = mcg.next_function()?;
let sig = info
.signatures
.get(
*info
.func_assoc
.get(FuncIndex::new(i as usize + info.imported_functions.len()))
.unwrap(),
)
.unwrap();
for ret in sig.returns() {
fcg.feed_return(type_to_wp_type(*ret))?;
}
for param in sig.params() {
fcg.feed_param(type_to_wp_type(*param))?;
}
for local in item.get_locals_reader()? {
let (count, ty) = local?;
fcg.feed_local(ty, count as usize)?;
}
fcg.begin_body()?;
for op in item.get_operators_reader()? {
let op = op?;
fcg.feed_opcode(op, &info)?;
}
fcg.finalize()?;
}
}
SectionCode::Data => {
let data_reader = section.get_data_section_reader()?;
for data in data_reader {
let Data { kind, data } = data?;
match kind {
DataKind::Active {
memory_index,
init_expr,
} => {
let memory_index = MemoryIndex::new(memory_index as usize);
let base = eval_init_expr(&init_expr)?;
let data_init = DataInitializer {
memory_index,
base,
data: data.to_vec(),
};
info.data_initializers.push(data_init);
}
DataKind::Passive => {
return Err(BinaryReaderError {
message: "passive memories are not yet supported",
offset: -1isize as usize,
ParserState::CodeOperator(ref op) => {
if !body_begun {
body_begun = true;
fcg.begin_body()?;
}
.into());
fcg.feed_opcode(op, &info)?;
}
ParserState::EndFunctionBody => break,
_ => unreachable!(),
}
}
fcg.finalize()?;
}
SectionCode::DataCount => {}
SectionCode::Custom { .. } => {}
ParserState::BeginActiveElementSectionEntry(table_index) => {
let table_index = TableIndex::new(table_index as usize);
let mut elements: Option<Vec<FuncIndex>> = None;
let mut base: Option<Initializer> = None;
loop {
let state = parser.read();
match *state {
ParserState::Error(err) => return Err(LoadError::Parse(err)),
ParserState::InitExpressionOperator(ref op) => {
base = Some(eval_init_expr(op)?)
}
ParserState::ElementSectionEntryBody(ref _elements) => {
elements = Some(
_elements
.iter()
.cloned()
.map(|index| FuncIndex::new(index as usize))
.collect(),
);
}
ParserState::BeginInitExpressionBody
| ParserState::EndInitExpressionBody => {}
ParserState::EndElementSectionEntry => break,
_ => unreachable!(),
}
}
let table_init = TableInitializer {
table_index,
base: base.unwrap(),
elements: elements.unwrap(),
};
info.elem_initializers.push(table_init);
}
ParserState::BeginActiveDataSectionEntry(memory_index) => {
let memory_index = MemoryIndex::new(memory_index as usize);
let mut base: Option<Initializer> = None;
let mut data: Vec<u8> = vec![];
loop {
let state = parser.read();
match *state {
ParserState::Error(err) => return Err(LoadError::Parse(err)),
ParserState::InitExpressionOperator(ref op) => {
base = Some(eval_init_expr(op)?)
}
ParserState::DataSectionEntryBodyChunk(chunk) => {
data = chunk.to_vec();
}
ParserState::BeginInitExpressionBody
| ParserState::EndInitExpressionBody => {}
ParserState::BeginDataSectionEntryBody(_)
| ParserState::EndDataSectionEntryBody => {}
ParserState::EndDataSectionEntry => break,
_ => unreachable!(),
}
}
let data_init = DataInitializer {
memory_index,
base: base.unwrap(),
data,
};
info.data_initializers.push(data_init);
}
ParserState::BeginGlobalSectionEntry(ty) => {
let init = loop {
let state = parser.read();
match *state {
ParserState::Error(err) => return Err(LoadError::Parse(err)),
ParserState::InitExpressionOperator(ref op) => {
break eval_init_expr(op)?;
}
ParserState::BeginInitExpressionBody => {}
_ => unreachable!(),
}
};
let desc = GlobalDescriptor {
mutable: ty.mutable,
ty: wp_type_to_type(ty.content_type)?,
};
let global_init = GlobalInit { desc, init };
info.globals.push(global_init);
}
_ => {}
}
}
}
@ -402,7 +351,7 @@ pub fn type_to_wp_type(ty: Type) -> WpType {
}
}
fn func_type_to_func_sig(func_ty: FuncType) -> Result<FuncSig, BinaryReaderError> {
fn func_type_to_func_sig(func_ty: &FuncType) -> Result<FuncSig, BinaryReaderError> {
assert_eq!(func_ty.form, WpType::Func);
Ok(FuncSig::new(
@ -421,10 +370,8 @@ fn func_type_to_func_sig(func_ty: FuncType) -> Result<FuncSig, BinaryReaderError
))
}
fn eval_init_expr(expr: &InitExpr) -> Result<Initializer, BinaryReaderError> {
let mut reader = expr.get_operators_reader();
let (op, offset) = reader.read_with_offset()?;
Ok(match op {
fn eval_init_expr(op: &Operator) -> Result<Initializer, BinaryReaderError> {
Ok(match *op {
Operator::GetGlobal { global_index } => {
Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize))
}
@ -439,7 +386,7 @@ fn eval_init_expr(expr: &InitExpr) -> Result<Initializer, BinaryReaderError> {
_ => {
return Err(BinaryReaderError {
message: "init expr evaluation failed: unsupported opcode",
offset,
offset: -1isize as usize,
});
}
})