mirror of
https://github.com/fluencelabs/wasmer
synced 2025-03-16 16:20:49 +00:00
Merge #1183
1183: Make full preemption an optional feature. r=syrusakbary a=losfair Full preemption requires two additional memory loads on loop backedges and function calls. This PR allows disabling full preemption at code generation time, and disables it by default. Co-authored-by: losfair <zhy20000919@hotmail.com> Co-authored-by: Heyang Zhou <zhy20000919@hotmail.com>
This commit is contained in:
commit
2c44b700c8
@ -109,9 +109,28 @@ impl BackendCompilerConfig {
|
||||
pub struct CompilerConfig {
|
||||
/// Symbol information generated from emscripten; used for more detailed debug messages
|
||||
pub symbol_map: Option<HashMap<u32, String>>,
|
||||
|
||||
/// How to make the decision whether to emit bounds checks for memory accesses.
|
||||
pub memory_bound_check_mode: MemoryBoundCheckMode,
|
||||
|
||||
/// Whether to generate explicit native stack checks against `stack_lower_bound` in `InternalCtx`.
|
||||
///
|
||||
/// Usually it's adequate to use hardware memory protection mechanisms such as `mprotect` on Unix to
|
||||
/// prevent stack overflow. But for low-level environments, e.g. the kernel, faults are generally
|
||||
/// not expected and relying on hardware memory protection would add too much complexity.
|
||||
pub enforce_stack_check: bool,
|
||||
|
||||
/// Whether to enable state tracking. Necessary for managed mode.
|
||||
pub track_state: bool,
|
||||
|
||||
/// Whether to enable full preemption checkpoint generation.
|
||||
///
|
||||
/// This inserts checkpoints at critical locations such as loop backedges and function calls,
|
||||
/// allowing preemptive unwinding/task switching.
|
||||
///
|
||||
/// When enabled there can be a small amount of runtime performance overhead.
|
||||
pub full_preemption: bool,
|
||||
|
||||
pub features: Features,
|
||||
|
||||
// Target info. Presently only supported by LLVM.
|
||||
|
@ -639,6 +639,7 @@ struct CodegenConfig {
|
||||
memory_bound_check_mode: MemoryBoundCheckMode,
|
||||
enforce_stack_check: bool,
|
||||
track_state: bool,
|
||||
full_preemption: bool,
|
||||
}
|
||||
|
||||
impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
|
||||
@ -908,6 +909,7 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
|
||||
memory_bound_check_mode: config.memory_bound_check_mode,
|
||||
enforce_stack_check: config.enforce_stack_check,
|
||||
track_state: config.track_state,
|
||||
full_preemption: config.full_preemption,
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
@ -2103,6 +2105,10 @@ impl X64FunctionCode {
|
||||
true,
|
||||
value_size,
|
||||
|a, m, addr| {
|
||||
// Memory moves with size < 32b do not zero upper bits.
|
||||
if memory_sz < Size::S32 {
|
||||
a.emit_xor(Size::S32, Location::GPR(compare), Location::GPR(compare));
|
||||
}
|
||||
a.emit_mov(memory_sz, Location::Memory(addr, 0), Location::GPR(compare));
|
||||
a.emit_mov(stack_sz, Location::GPR(compare), ret);
|
||||
cb(a, m, compare, value);
|
||||
@ -2478,28 +2484,31 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
||||
// Check interrupt signal without branching
|
||||
let activate_offset = a.get_offset().0;
|
||||
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(
|
||||
Machine::get_vmctx_reg(),
|
||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||
),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
self.fsm.loop_offsets.insert(
|
||||
a.get_offset().0,
|
||||
OffsetInfo {
|
||||
end_offset: a.get_offset().0 + 1,
|
||||
activate_offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
self.fsm.wasm_function_header_target_offset = Some(SuspendOffset::Loop(a.get_offset().0));
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(GPR::RAX, 0),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
if self.config.full_preemption {
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(
|
||||
Machine::get_vmctx_reg(),
|
||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||
),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
self.fsm.loop_offsets.insert(
|
||||
a.get_offset().0,
|
||||
OffsetInfo {
|
||||
end_offset: a.get_offset().0 + 1,
|
||||
activate_offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
self.fsm.wasm_function_header_target_offset =
|
||||
Some(SuspendOffset::Loop(a.get_offset().0));
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(GPR::RAX, 0),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
}
|
||||
|
||||
if self.machine.state.wasm_inst_offset != usize::MAX {
|
||||
return Err(CodegenError {
|
||||
@ -6557,31 +6566,33 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
||||
a.emit_label(label);
|
||||
|
||||
// Check interrupt signal without branching
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(
|
||||
Machine::get_vmctx_reg(),
|
||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||
),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
self.fsm.loop_offsets.insert(
|
||||
a.get_offset().0,
|
||||
OffsetInfo {
|
||||
end_offset: a.get_offset().0 + 1,
|
||||
activate_offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
self.fsm.wasm_offset_to_target_offset.insert(
|
||||
self.machine.state.wasm_inst_offset,
|
||||
SuspendOffset::Loop(a.get_offset().0),
|
||||
);
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(GPR::RAX, 0),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
if self.config.full_preemption {
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(
|
||||
Machine::get_vmctx_reg(),
|
||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||
),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
self.fsm.loop_offsets.insert(
|
||||
a.get_offset().0,
|
||||
OffsetInfo {
|
||||
end_offset: a.get_offset().0 + 1,
|
||||
activate_offset,
|
||||
diff_id: state_diff_id,
|
||||
},
|
||||
);
|
||||
self.fsm.wasm_offset_to_target_offset.insert(
|
||||
self.machine.state.wasm_inst_offset,
|
||||
SuspendOffset::Loop(a.get_offset().0),
|
||||
);
|
||||
a.emit_mov(
|
||||
Size::S64,
|
||||
Location::Memory(GPR::RAX, 0),
|
||||
Location::GPR(GPR::RAX),
|
||||
);
|
||||
}
|
||||
}
|
||||
Operator::Nop => {}
|
||||
Operator::MemorySize { reserved } => {
|
||||
|
@ -703,6 +703,10 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
symbol_map: em_symbol_map.clone(),
|
||||
memory_bound_check_mode: MemoryBoundCheckMode::Disable,
|
||||
enforce_stack_check: true,
|
||||
|
||||
// Kernel loader does not support explicit preemption checkpoints.
|
||||
full_preemption: false,
|
||||
|
||||
track_state,
|
||||
features: options.features.into_backend_features(),
|
||||
backend_specific_config,
|
||||
@ -717,6 +721,11 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
CompilerConfig {
|
||||
symbol_map: em_symbol_map.clone(),
|
||||
track_state,
|
||||
|
||||
// Enable full preemption if state tracking is enabled.
|
||||
// Preemption only makes sense with state information.
|
||||
full_preemption: track_state,
|
||||
|
||||
features: options.features.into_backend_features(),
|
||||
backend_specific_config,
|
||||
..Default::default()
|
||||
@ -813,7 +822,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
LoaderName::Kernel => Box::new(
|
||||
instance
|
||||
.load(::wasmer_kernel_loader::KernelLoader)
|
||||
.map_err(|e| format!("Can't use the local loader: {:?}", e))?,
|
||||
.map_err(|e| format!("Can't use the kernel loader: {:?}", e))?,
|
||||
),
|
||||
};
|
||||
println!("{:?}", ins.call(index, &args));
|
||||
|
Loading…
x
Reference in New Issue
Block a user