mirror of
https://github.com/fluencelabs/wasmer
synced 2025-04-26 19:02:13 +00:00
Merge #1183
1183: Make full preemption an optional feature. r=syrusakbary a=losfair Full preemption requires two additional memory loads on loop backedges and function calls. This PR allows disabling full preemption at code generation time, and disables it by default. Co-authored-by: losfair <zhy20000919@hotmail.com> Co-authored-by: Heyang Zhou <zhy20000919@hotmail.com>
This commit is contained in:
commit
2c44b700c8
@ -109,9 +109,28 @@ impl BackendCompilerConfig {
|
|||||||
pub struct CompilerConfig {
|
pub struct CompilerConfig {
|
||||||
/// Symbol information generated from emscripten; used for more detailed debug messages
|
/// Symbol information generated from emscripten; used for more detailed debug messages
|
||||||
pub symbol_map: Option<HashMap<u32, String>>,
|
pub symbol_map: Option<HashMap<u32, String>>,
|
||||||
|
|
||||||
|
/// How to make the decision whether to emit bounds checks for memory accesses.
|
||||||
pub memory_bound_check_mode: MemoryBoundCheckMode,
|
pub memory_bound_check_mode: MemoryBoundCheckMode,
|
||||||
|
|
||||||
|
/// Whether to generate explicit native stack checks against `stack_lower_bound` in `InternalCtx`.
|
||||||
|
///
|
||||||
|
/// Usually it's adequate to use hardware memory protection mechanisms such as `mprotect` on Unix to
|
||||||
|
/// prevent stack overflow. But for low-level environments, e.g. the kernel, faults are generally
|
||||||
|
/// not expected and relying on hardware memory protection would add too much complexity.
|
||||||
pub enforce_stack_check: bool,
|
pub enforce_stack_check: bool,
|
||||||
|
|
||||||
|
/// Whether to enable state tracking. Necessary for managed mode.
|
||||||
pub track_state: bool,
|
pub track_state: bool,
|
||||||
|
|
||||||
|
/// Whether to enable full preemption checkpoint generation.
|
||||||
|
///
|
||||||
|
/// This inserts checkpoints at critical locations such as loop backedges and function calls,
|
||||||
|
/// allowing preemptive unwinding/task switching.
|
||||||
|
///
|
||||||
|
/// When enabled there can be a small amount of runtime performance overhead.
|
||||||
|
pub full_preemption: bool,
|
||||||
|
|
||||||
pub features: Features,
|
pub features: Features,
|
||||||
|
|
||||||
// Target info. Presently only supported by LLVM.
|
// Target info. Presently only supported by LLVM.
|
||||||
|
@ -639,6 +639,7 @@ struct CodegenConfig {
|
|||||||
memory_bound_check_mode: MemoryBoundCheckMode,
|
memory_bound_check_mode: MemoryBoundCheckMode,
|
||||||
enforce_stack_check: bool,
|
enforce_stack_check: bool,
|
||||||
track_state: bool,
|
track_state: bool,
|
||||||
|
full_preemption: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
|
impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
|
||||||
@ -908,6 +909,7 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
|
|||||||
memory_bound_check_mode: config.memory_bound_check_mode,
|
memory_bound_check_mode: config.memory_bound_check_mode,
|
||||||
enforce_stack_check: config.enforce_stack_check,
|
enforce_stack_check: config.enforce_stack_check,
|
||||||
track_state: config.track_state,
|
track_state: config.track_state,
|
||||||
|
full_preemption: config.full_preemption,
|
||||||
}));
|
}));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -2103,6 +2105,10 @@ impl X64FunctionCode {
|
|||||||
true,
|
true,
|
||||||
value_size,
|
value_size,
|
||||||
|a, m, addr| {
|
|a, m, addr| {
|
||||||
|
// Memory moves with size < 32b do not zero upper bits.
|
||||||
|
if memory_sz < Size::S32 {
|
||||||
|
a.emit_xor(Size::S32, Location::GPR(compare), Location::GPR(compare));
|
||||||
|
}
|
||||||
a.emit_mov(memory_sz, Location::Memory(addr, 0), Location::GPR(compare));
|
a.emit_mov(memory_sz, Location::Memory(addr, 0), Location::GPR(compare));
|
||||||
a.emit_mov(stack_sz, Location::GPR(compare), ret);
|
a.emit_mov(stack_sz, Location::GPR(compare), ret);
|
||||||
cb(a, m, compare, value);
|
cb(a, m, compare, value);
|
||||||
@ -2478,28 +2484,31 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
// Check interrupt signal without branching
|
// Check interrupt signal without branching
|
||||||
let activate_offset = a.get_offset().0;
|
let activate_offset = a.get_offset().0;
|
||||||
|
|
||||||
a.emit_mov(
|
if self.config.full_preemption {
|
||||||
Size::S64,
|
a.emit_mov(
|
||||||
Location::Memory(
|
Size::S64,
|
||||||
Machine::get_vmctx_reg(),
|
Location::Memory(
|
||||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
Machine::get_vmctx_reg(),
|
||||||
),
|
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||||
Location::GPR(GPR::RAX),
|
),
|
||||||
);
|
Location::GPR(GPR::RAX),
|
||||||
self.fsm.loop_offsets.insert(
|
);
|
||||||
a.get_offset().0,
|
self.fsm.loop_offsets.insert(
|
||||||
OffsetInfo {
|
a.get_offset().0,
|
||||||
end_offset: a.get_offset().0 + 1,
|
OffsetInfo {
|
||||||
activate_offset,
|
end_offset: a.get_offset().0 + 1,
|
||||||
diff_id: state_diff_id,
|
activate_offset,
|
||||||
},
|
diff_id: state_diff_id,
|
||||||
);
|
},
|
||||||
self.fsm.wasm_function_header_target_offset = Some(SuspendOffset::Loop(a.get_offset().0));
|
);
|
||||||
a.emit_mov(
|
self.fsm.wasm_function_header_target_offset =
|
||||||
Size::S64,
|
Some(SuspendOffset::Loop(a.get_offset().0));
|
||||||
Location::Memory(GPR::RAX, 0),
|
a.emit_mov(
|
||||||
Location::GPR(GPR::RAX),
|
Size::S64,
|
||||||
);
|
Location::Memory(GPR::RAX, 0),
|
||||||
|
Location::GPR(GPR::RAX),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if self.machine.state.wasm_inst_offset != usize::MAX {
|
if self.machine.state.wasm_inst_offset != usize::MAX {
|
||||||
return Err(CodegenError {
|
return Err(CodegenError {
|
||||||
@ -6557,31 +6566,33 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_label(label);
|
a.emit_label(label);
|
||||||
|
|
||||||
// Check interrupt signal without branching
|
// Check interrupt signal without branching
|
||||||
a.emit_mov(
|
if self.config.full_preemption {
|
||||||
Size::S64,
|
a.emit_mov(
|
||||||
Location::Memory(
|
Size::S64,
|
||||||
Machine::get_vmctx_reg(),
|
Location::Memory(
|
||||||
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
Machine::get_vmctx_reg(),
|
||||||
),
|
vm::Ctx::offset_interrupt_signal_mem() as i32,
|
||||||
Location::GPR(GPR::RAX),
|
),
|
||||||
);
|
Location::GPR(GPR::RAX),
|
||||||
self.fsm.loop_offsets.insert(
|
);
|
||||||
a.get_offset().0,
|
self.fsm.loop_offsets.insert(
|
||||||
OffsetInfo {
|
a.get_offset().0,
|
||||||
end_offset: a.get_offset().0 + 1,
|
OffsetInfo {
|
||||||
activate_offset,
|
end_offset: a.get_offset().0 + 1,
|
||||||
diff_id: state_diff_id,
|
activate_offset,
|
||||||
},
|
diff_id: state_diff_id,
|
||||||
);
|
},
|
||||||
self.fsm.wasm_offset_to_target_offset.insert(
|
);
|
||||||
self.machine.state.wasm_inst_offset,
|
self.fsm.wasm_offset_to_target_offset.insert(
|
||||||
SuspendOffset::Loop(a.get_offset().0),
|
self.machine.state.wasm_inst_offset,
|
||||||
);
|
SuspendOffset::Loop(a.get_offset().0),
|
||||||
a.emit_mov(
|
);
|
||||||
Size::S64,
|
a.emit_mov(
|
||||||
Location::Memory(GPR::RAX, 0),
|
Size::S64,
|
||||||
Location::GPR(GPR::RAX),
|
Location::Memory(GPR::RAX, 0),
|
||||||
);
|
Location::GPR(GPR::RAX),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Operator::Nop => {}
|
Operator::Nop => {}
|
||||||
Operator::MemorySize { reserved } => {
|
Operator::MemorySize { reserved } => {
|
||||||
|
@ -703,6 +703,10 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
|||||||
symbol_map: em_symbol_map.clone(),
|
symbol_map: em_symbol_map.clone(),
|
||||||
memory_bound_check_mode: MemoryBoundCheckMode::Disable,
|
memory_bound_check_mode: MemoryBoundCheckMode::Disable,
|
||||||
enforce_stack_check: true,
|
enforce_stack_check: true,
|
||||||
|
|
||||||
|
// Kernel loader does not support explicit preemption checkpoints.
|
||||||
|
full_preemption: false,
|
||||||
|
|
||||||
track_state,
|
track_state,
|
||||||
features: options.features.into_backend_features(),
|
features: options.features.into_backend_features(),
|
||||||
backend_specific_config,
|
backend_specific_config,
|
||||||
@ -717,6 +721,11 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
|||||||
CompilerConfig {
|
CompilerConfig {
|
||||||
symbol_map: em_symbol_map.clone(),
|
symbol_map: em_symbol_map.clone(),
|
||||||
track_state,
|
track_state,
|
||||||
|
|
||||||
|
// Enable full preemption if state tracking is enabled.
|
||||||
|
// Preemption only makes sense with state information.
|
||||||
|
full_preemption: track_state,
|
||||||
|
|
||||||
features: options.features.into_backend_features(),
|
features: options.features.into_backend_features(),
|
||||||
backend_specific_config,
|
backend_specific_config,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@ -813,7 +822,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
|||||||
LoaderName::Kernel => Box::new(
|
LoaderName::Kernel => Box::new(
|
||||||
instance
|
instance
|
||||||
.load(::wasmer_kernel_loader::KernelLoader)
|
.load(::wasmer_kernel_loader::KernelLoader)
|
||||||
.map_err(|e| format!("Can't use the local loader: {:?}", e))?,
|
.map_err(|e| format!("Can't use the kernel loader: {:?}", e))?,
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
println!("{:?}", ins.call(index, &args));
|
println!("{:?}", ins.call(index, &args));
|
||||||
|
Loading…
x
Reference in New Issue
Block a user