Make full preemption an optional feature.

This commit is contained in:
losfair 2020-02-01 01:12:06 +08:00
parent a6c55ea548
commit 455783aa21
3 changed files with 79 additions and 48 deletions

View File

@ -109,9 +109,24 @@ impl BackendCompilerConfig {
pub struct CompilerConfig {
/// Symbol information generated from emscripten; used for more detailed debug messages
pub symbol_map: Option<HashMap<u32, String>>,
/// Optionally override the automatically determined memory bound check mode.
pub memory_bound_check_mode: MemoryBoundCheckMode,
/// Whether to generate explicit stack checks against a field in `InternalCtx`.
pub enforce_stack_check: bool,
/// Whether to enable state tracking. Necessary for managed mode.
pub track_state: bool,
/// Whether to enable full preemption checkpoint generation.
///
/// This inserts checkpoints at critical locations such as loop backedges and function calls,
/// allowing non-cooperative unwinding/task switching.
///
/// When enabled there can be a small amount of runtime performance overhead.
pub full_preemption: bool,
pub features: Features,
// Target info. Presently only supported by LLVM.

View File

@ -639,6 +639,7 @@ struct CodegenConfig {
memory_bound_check_mode: MemoryBoundCheckMode,
enforce_stack_check: bool,
track_state: bool,
full_preemption: bool,
}
impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
@ -908,6 +909,7 @@ impl ModuleCodeGenerator<X64FunctionCode, X64ExecutionContext, CodegenError>
memory_bound_check_mode: config.memory_bound_check_mode,
enforce_stack_check: config.enforce_stack_check,
track_state: config.track_state,
full_preemption: config.full_preemption,
}));
Ok(())
}
@ -2478,28 +2480,31 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
// Check interrupt signal without branching
let activate_offset = a.get_offset().0;
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_function_header_target_offset = Some(SuspendOffset::Loop(a.get_offset().0));
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
if self.config.full_preemption {
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_function_header_target_offset =
Some(SuspendOffset::Loop(a.get_offset().0));
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
}
if self.machine.state.wasm_inst_offset != usize::MAX {
return Err(CodegenError {
@ -6557,31 +6562,33 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a.emit_label(label);
// Check interrupt signal without branching
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_offset_to_target_offset.insert(
self.machine.state.wasm_inst_offset,
SuspendOffset::Loop(a.get_offset().0),
);
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
if self.config.full_preemption {
a.emit_mov(
Size::S64,
Location::Memory(
Machine::get_vmctx_reg(),
vm::Ctx::offset_interrupt_signal_mem() as i32,
),
Location::GPR(GPR::RAX),
);
self.fsm.loop_offsets.insert(
a.get_offset().0,
OffsetInfo {
end_offset: a.get_offset().0 + 1,
activate_offset,
diff_id: state_diff_id,
},
);
self.fsm.wasm_offset_to_target_offset.insert(
self.machine.state.wasm_inst_offset,
SuspendOffset::Loop(a.get_offset().0),
);
a.emit_mov(
Size::S64,
Location::Memory(GPR::RAX, 0),
Location::GPR(GPR::RAX),
);
}
}
Operator::Nop => {}
Operator::MemorySize { reserved } => {

View File

@ -703,6 +703,10 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
symbol_map: em_symbol_map.clone(),
memory_bound_check_mode: MemoryBoundCheckMode::Disable,
enforce_stack_check: true,
// Kernel loader does not support explicit preemption checkpoints.
full_preemption: false,
track_state,
features: options.features.into_backend_features(),
backend_specific_config,
@ -717,6 +721,11 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
CompilerConfig {
symbol_map: em_symbol_map.clone(),
track_state,
// Enable full preemption if state tracking is enabled.
// Preemption only makes sense with state information.
full_preemption: track_state,
features: options.features.into_backend_features(),
backend_specific_config,
..Default::default()
@ -813,7 +822,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
LoaderName::Kernel => Box::new(
instance
.load(::wasmer_kernel_loader::KernelLoader)
.map_err(|e| format!("Can't use the local loader: {:?}", e))?,
.map_err(|e| format!("Can't use the kernel loader: {:?}", e))?,
),
};
println!("{:?}", ins.call(index, &args));