From fb04ba0bce8c87e1f85a1b74405fb03b0959345e Mon Sep 17 00:00:00 2001 From: Lachlan Sneff Date: Mon, 24 Dec 2018 23:06:19 -0500 Subject: [PATCH] Replace webassembly folder with master --- src/webassembly/instance.rs | 294 +++++++++++++++++++++++++---------- src/webassembly/memory.rs | 207 ++++++++++++++++++++++++ src/webassembly/mod.rs | 31 ++-- src/webassembly/module.rs | 4 +- src/webassembly/vmcontext.rs | 151 ++++++++++++++++++ src/webassembly/vmoffsets.rs | 9 ++ 6 files changed, 593 insertions(+), 103 deletions(-) create mode 100644 src/webassembly/memory.rs create mode 100644 src/webassembly/vmcontext.rs create mode 100644 src/webassembly/vmoffsets.rs diff --git a/src/webassembly/instance.rs b/src/webassembly/instance.rs index fee7d6577..a958f27c2 100644 --- a/src/webassembly/instance.rs +++ b/src/webassembly/instance.rs @@ -20,6 +20,7 @@ use std::iter::FromIterator; use std::iter::Iterator; use std::mem::size_of; use std::ptr::write_unaligned; +use std::sync::Arc; use std::{fmt, mem, slice}; use super::super::common::slice::{BoundedSlice, UncheckedSlice}; @@ -29,8 +30,6 @@ use super::libcalls; use super::memory::LinearMemory; use super::module::{Export, ImportableExportable, Module}; use super::relocation::{Reloc, RelocSink, RelocationType}; -use super::vm; -use super::backing::{LocalBacking, ImportsBacking}; type TablesSlice = UncheckedSlice>; // TODO: this should be `type MemoriesSlice = UncheckedSlice>;`, but that crashes for some reason. @@ -75,6 +74,81 @@ pub struct EmscriptenData { pub stack_alloc: extern "C" fn(u32, &Instance) -> u32, } +impl EmscriptenData { + pub fn new(module: &Module, instance: &Instance) -> Self { + unsafe { + debug!("emscripten::new"); + let malloc_export = module.info.exports.get("_malloc"); + let free_export = module.info.exports.get("_free"); + let memalign_export = module.info.exports.get("_memalign"); + let memset_export = module.info.exports.get("_memset"); + let stack_alloc_export = module.info.exports.get("stackAlloc"); + + let mut malloc_addr = 0 as *const u8; + let mut free_addr = 0 as *const u8; + let mut memalign_addr = 0 as *const u8; + let mut memset_addr = 0 as *const u8; + let mut stack_alloc_addr = 0 as _; + + if let Some(Export::Function(malloc_index)) = malloc_export { + malloc_addr = instance.get_function_pointer(*malloc_index); + } + + if let Some(Export::Function(free_index)) = free_export { + free_addr = instance.get_function_pointer(*free_index); + } + + if let Some(Export::Function(memalign_index)) = memalign_export { + memalign_addr = instance.get_function_pointer(*memalign_index); + } + + if let Some(Export::Function(memset_index)) = memset_export { + memset_addr = instance.get_function_pointer(*memset_index); + } + + if let Some(Export::Function(stack_alloc_index)) = stack_alloc_export { + stack_alloc_addr = instance.get_function_pointer(*stack_alloc_index); + } + + EmscriptenData { + malloc: mem::transmute(malloc_addr), + free: mem::transmute(free_addr), + memalign: mem::transmute(memalign_addr), + memset: mem::transmute(memset_addr), + stack_alloc: mem::transmute(stack_alloc_addr), + } + } + } + + // Emscripten __ATINIT__ + pub fn atinit(&self, module: &Module, instance: &Instance) -> Result<(), String> { + debug!("emscripten::atinit"); + if let Some(&Export::Function(environ_constructor_index)) = + module.info.exports.get("___emscripten_environ_constructor") + { + debug!("emscripten::___emscripten_environ_constructor"); + let ___emscripten_environ_constructor: extern "C" fn(&Instance) = + get_instance_function!(instance, environ_constructor_index); + call_protected!(___emscripten_environ_constructor(&instance)) + .map_err(|err| format!("{}", err))?; + }; + // TODO: We also need to handle TTY.init() and SOCKFS.root = FS.mount(SOCKFS, {}, null) + Ok(()) + } + + // Emscripten __ATEXIT__ + pub fn atexit(&self, _module: &Module, _instance: &Instance) -> Result<(), String> { + debug!("emscripten::atexit"); + use libc::fflush; + use std::ptr; + // Flush all open streams + unsafe { + fflush(ptr::null_mut()); + }; + Ok(()) + } +} + impl fmt::Debug for EmscriptenData { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("EmscriptenData") @@ -96,16 +170,25 @@ pub enum InstanceABI { #[derive(Debug)] #[repr(C)] pub struct Instance { - pub vmctx: vm::Ctx, // C-like pointers to data (heaps, globals, tables) pub data_pointers: DataPointers, + /// WebAssembly table data + // pub tables: Arc>>>, + pub tables: Arc>>, + + /// WebAssembly linear memory data + pub memories: Arc>, + + /// WebAssembly global variable data + pub globals: Vec, + /// Webassembly functions - finalized_funcs: Box<[*const vm::Func]>, + // functions: Vec, + functions: Vec>, - backing: LocalBacking, - - imports: ImportsBacking, + /// Imported functions + import_functions: Vec<*const u8>, /// The module start function pub start_func: Option, @@ -114,14 +197,6 @@ pub struct Instance { pub emscripten_data: Option, } -impl Instance { - /// Shortcut for converting from a `vm::Ctx` pointer to a reference to the `Instance`. - /// This works because of the `vm::Ctx` is the first field of the `Instance`. - pub unsafe fn from_vmctx<'a>(ctx: *mut vm::Ctx) -> &'a mut Instance { - &mut *(ctx as *mut Instance) - } -} - /// Contains pointers to data (heaps, globals, tables) needed /// by Cranelift. /// NOTE: Rearranging the fields will break the memory arrangement model @@ -421,13 +496,114 @@ impl Instance { debug!("Instance - Instantiating tables"); // Instantiate tables { - + // Reserve space for tables + tables.reserve_exact(module.info.tables.len()); + + // Get tables in module + for table in &module.info.tables { + let table: Vec = match table.import_name.as_ref() { + Some((module_name, field_name)) => { + let imported = + import_object.get(&module_name.as_str(), &field_name.as_str()); + match imported { + Some(ImportValue::Table(t)) => t.to_vec(), + None => { + if options.mock_missing_tables { + debug!( + "The Imported table {}.{} is not provided, therefore will be mocked.", + module_name, field_name + ); + let len = table.entity.minimum as usize; + let mut v = Vec::with_capacity(len); + v.resize(len, 0); + v + } else { + panic!( + "Imported table value was not provided ({}.{})", + module_name, field_name + ) + } + } + _ => panic!( + "Expected global table, but received {:?} ({}.{})", + imported, module_name, field_name + ), + } + } + None => { + let len = table.entity.minimum as usize; + let mut v = Vec::with_capacity(len); + v.resize(len, 0); + v + } + }; + tables.push(table); + } + + // instantiate tables + for table_element in &module.info.table_elements { + let base = match table_element.base { + Some(global_index) => globals_data[global_index.index()] as usize, + None => 0, + }; + + let table = &mut tables[table_element.table_index.index()]; + for (i, func_index) in table_element.elements.iter().enumerate() { + // since the table just contains functions in the MVP + // we get the address of the specified function indexes + // to populate the table. + + // let func_index = *elem_index - module.info.imported_funcs.len() as u32; + // let func_addr = functions[func_index.index()].as_ptr(); + let func_addr = get_function_addr(&func_index, &import_functions, &functions); + table[base + table_element.offset + i] = func_addr as _; + } + } } debug!("Instance - Instantiating memories"); // Instantiate memories { - + // Reserve space for memories + memories.reserve_exact(module.info.memories.len()); + + // Get memories in module + for memory in &module.info.memories { + let memory = memory.entity; + // If we use emscripten, we set a fixed initial and maximum + debug!( + "Instance - init memory ({}, {:?})", + memory.minimum, memory.maximum + ); + let memory = if options.abi == InstanceABI::Emscripten { + // We use MAX_PAGES, so at the end the result is: + // (initial * LinearMemory::PAGE_SIZE) == LinearMemory::DEFAULT_HEAP_SIZE + // However, it should be: (initial * LinearMemory::PAGE_SIZE) == 16777216 + LinearMemory::new(LinearMemory::MAX_PAGES, None) + } else { + LinearMemory::new(memory.minimum, memory.maximum.map(|m| m as u32)) + }; + memories.push(memory); + } + + for init in &module.info.data_initializers { + debug_assert!(init.base.is_none(), "globalvar base not supported yet"); + let offset = init.offset; + let mem = &mut memories[init.memory_index.index()]; + let end_of_init = offset + init.data.len(); + if end_of_init > mem.current_size() { + let grow_pages = (end_of_init / LinearMemory::PAGE_SIZE as usize) + 1; + mem.grow(grow_pages as u32) + .expect("failed to grow memory for data initializers"); + } + let to_init = &mut mem[offset..offset + init.data.len()]; + to_init.copy_from_slice(&init.data); + } + if options.abi == InstanceABI::Emscripten { + debug!("emscripten::setup memory"); + crate::apis::emscripten::emscripten_set_up_memory(&mut memories[0]); + debug!("emscripten::finish setup memory"); + } } let start_func: Option = @@ -453,83 +629,37 @@ impl Instance { tables: tables_pointer[..].into(), }; - let emscripten_data = if options.abi == InstanceABI::Emscripten { - unsafe { - debug!("emscripten::initiating data"); - let malloc_export = module.info.exports.get("_malloc"); - let free_export = module.info.exports.get("_free"); - let memalign_export = module.info.exports.get("_memalign"); - let memset_export = module.info.exports.get("_memset"); - let stack_alloc_export = module.info.exports.get("stackAlloc"); - - let mut malloc_addr = 0 as *const u8; - let mut free_addr = 0 as *const u8; - let mut memalign_addr = 0 as *const u8; - let mut memset_addr = 0 as *const u8; - let mut stack_alloc_addr = 0 as _; - - if malloc_export.is_none() - && free_export.is_none() - && memalign_export.is_none() - && memset_export.is_none() - { - None - } else { - if let Some(Export::Function(malloc_index)) = malloc_export { - malloc_addr = - get_function_addr(&malloc_index, &import_functions, &functions); - } - - if let Some(Export::Function(free_index)) = free_export { - free_addr = get_function_addr(&free_index, &import_functions, &functions); - } - - if let Some(Export::Function(memalign_index)) = memalign_export { - memalign_addr = - get_function_addr(&memalign_index, &import_functions, &functions); - } - - if let Some(Export::Function(memset_index)) = memset_export { - memset_addr = - get_function_addr(&memset_index, &import_functions, &functions); - } - - if let Some(Export::Function(stack_alloc_index)) = stack_alloc_export { - stack_alloc_addr = - get_function_addr(&stack_alloc_index, &import_functions, &functions); - } - - Some(EmscriptenData { - malloc: mem::transmute(malloc_addr), - free: mem::transmute(free_addr), - memalign: mem::transmute(memalign_addr), - memset: mem::transmute(memset_addr), - stack_alloc: mem::transmute(stack_alloc_addr), - }) - } - } - } else { - None - }; - - Ok(Instance { + let mut instance = Instance { data_pointers, - tables: tables.into_iter().collect(), - memories: memories.into_iter().collect(), + tables: Arc::new(tables.into_iter().collect()), // tables.into_iter().map(|table| RwLock::new(table)).collect()), + memories: Arc::new(memories.into_iter().collect()), globals, functions, import_functions, start_func, - emscripten_data, - }) + emscripten_data: None, + }; + + if options.abi == InstanceABI::Emscripten { + instance.emscripten_data = Some(EmscriptenData::new(module, &instance)); + } + + Ok(instance) } pub fn memory_mut(&mut self, memory_index: usize) -> &mut LinearMemory { - self.memories + let memories = Arc::get_mut(&mut self.memories).unwrap_or_else(|| { + panic!("Can't get memories as a mutable pointer (there might exist more mutable pointers to the memories)") + }); + memories .get_mut(memory_index) .unwrap_or_else(|| panic!("no memory for index {}", memory_index)) } + pub fn memories(&self) -> Arc> { + self.memories.clone() + } + pub fn get_function_pointer(&self, func_index: FuncIndex) -> *const u8 { get_function_addr(&func_index, &self.import_functions, &self.functions) } diff --git a/src/webassembly/memory.rs b/src/webassembly/memory.rs new file mode 100644 index 000000000..cc69a66f2 --- /dev/null +++ b/src/webassembly/memory.rs @@ -0,0 +1,207 @@ +//! The webassembly::Memory() constructor creates a new Memory object which is +//! a structure that holds the raw bytes of memory accessed by a +//! webassembly::Instance. +//! A memory created by Rust or in WebAssembly code will be accessible and +//! mutable from both Rust and WebAssembly. +use region; +use std::ops::{Deref, DerefMut}; +use std::slice; + +use crate::common::mmap::Mmap; + +/// A linear memory instance. +#[derive(Debug)] +pub struct LinearMemory { + // The mmap allocation + mmap: Mmap, + + // current number of wasm pages + current: u32, + + // The maximum size the WebAssembly Memory is allowed to grow + // to, in units of WebAssembly pages. When present, the maximum + // parameter acts as a hint to the engine to reserve memory up + // front. However, the engine may ignore or clamp this reservation + // request. In general, most WebAssembly modules shouldn't need + // to set a maximum. + maximum: Option, + + // The size of the extra guard pages after the end. + // Is used to optimize loads and stores with constant offsets. + offset_guard_size: usize, +} + +/// It holds the raw bytes of memory accessed by a WebAssembly Instance +impl LinearMemory { + pub const PAGE_SIZE: u32 = 65536; + pub const MAX_PAGES: u32 = 65536; + pub const DEFAULT_HEAP_SIZE: usize = 1 << 32; // 4 GiB + pub const DEFAULT_GUARD_SIZE: usize = 1 << 31; // 2 GiB + pub const DEFAULT_SIZE: usize = Self::DEFAULT_HEAP_SIZE + Self::DEFAULT_GUARD_SIZE; // 6 GiB + + /// Create a new linear memory instance with specified initial and maximum number of pages. + /// + /// `maximum` cannot be set to more than `65536` pages. + pub fn new(initial: u32, maximum: Option) -> Self { + assert!(initial <= Self::MAX_PAGES); + assert!(maximum.is_none() || maximum.unwrap() <= Self::MAX_PAGES); + debug!( + "Instantiate LinearMemory(initial={:?}, maximum={:?})", + initial, maximum + ); + + let mut mmap = Mmap::with_size(Self::DEFAULT_SIZE).expect("Can't create mmap"); + + let base = mmap.as_mut_ptr(); + + // map initial pages as readwrite since the inital mmap is mapped as not accessible. + if initial != 0 { + unsafe { + region::protect( + base, + initial as usize * Self::PAGE_SIZE as usize, + region::Protection::ReadWrite, + ) + } + .expect("unable to make memory inaccessible"); + } + + debug!("LinearMemory instantiated"); + debug!( + " - usable: {:#x}..{:#x}", + base as usize, + (base as usize) + LinearMemory::DEFAULT_HEAP_SIZE + ); + debug!( + " - guard: {:#x}..{:#x}", + (base as usize) + LinearMemory::DEFAULT_HEAP_SIZE, + (base as usize) + LinearMemory::DEFAULT_SIZE + ); + Self { + mmap, + current: initial, + offset_guard_size: LinearMemory::DEFAULT_GUARD_SIZE, + maximum, + } + } + + /// Returns an base address of this linear memory. + pub fn base(&mut self) -> *mut u8 { + self.mmap.as_mut_ptr() as _ + } + + /// Returns a number of allocated wasm pages. + pub fn current_size(&self) -> usize { + self.current as usize * Self::PAGE_SIZE as usize + } + + pub fn current_pages(&self) -> u32 { + self.current + } + + /// Returns the maximum number of wasm pages allowed. + pub fn maximum_size(&self) -> u32 { + self.maximum.unwrap_or(Self::MAX_PAGES) + } + + /// Grow memory by the specified amount of pages. + /// + /// Returns `None` if memory can't be grown by the specified amount + /// of pages. + pub fn grow(&mut self, add_pages: u32) -> Option { + debug!("grow_memory called!"); + if add_pages == 0 { + return Some(self.current as _); + } + + let prev_pages = self.current; + + let new_pages = match self.current.checked_add(add_pages) { + Some(new_pages) => new_pages, + None => return None, + }; + + if let Some(val) = self.maximum { + if new_pages > val { + return None; + } + // Wasm linear memories are never allowed to grow beyond what is + // indexable. If the memory has no maximum, enforce the greatest + // limit here. + } else if new_pages >= Self::MAX_PAGES { + return None; + } + + let prev_bytes = (prev_pages * Self::PAGE_SIZE) as usize; + let new_bytes = (new_pages * Self::PAGE_SIZE) as usize; + + // if new_bytes > self.mmap.len() - self.offset_guard_size { + unsafe { + region::protect( + self.mmap.as_ptr().add(prev_bytes) as _, + new_bytes - prev_bytes, + region::Protection::ReadWrite, + ) + } + .expect("unable to make memory inaccessible"); + // }; + // if new_bytes > self.mmap.len() - self.offset_guard_size { + // // If we have no maximum, this is a "dynamic" heap, and it's allowed to move. + // assert!(self.maximum.is_none()); + // let guard_bytes = self.offset_guard_size; + // let request_bytes = new_bytes.checked_add(guard_bytes)?; + + // let mut new_mmap = Mmap::with_size(request_bytes).ok()?; + + // // Make the offset-guard pages inaccessible. + // unsafe { + // region::protect( + // new_mmap.as_ptr().add(new_bytes), + // guard_bytes, + // region::Protection::Read | region::Protection::Write, + // // region::Protection::None, + // ) + // } + // .expect("unable to make memory inaccessible"); + + // let copy_len = self.mmap.len() - self.offset_guard_size; + // new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]); + + // self.mmap = new_mmap; + // } + + self.current = new_pages; + + Some(prev_pages as i32) + } +} + +// Not comparing based on memory content. That would be inefficient. +impl PartialEq for LinearMemory { + fn eq(&self, other: &LinearMemory) -> bool { + self.current == other.current && self.maximum == other.maximum + } +} + +impl Deref for LinearMemory { + type Target = [u8]; + fn deref(&self) -> &[u8] { + unsafe { + slice::from_raw_parts( + self.mmap.as_ptr() as _, + self.current as usize * Self::PAGE_SIZE as usize, + ) + } + } +} + +impl DerefMut for LinearMemory { + fn deref_mut(&mut self) -> &mut [u8] { + unsafe { + slice::from_raw_parts_mut( + self.mmap.as_mut_ptr() as _, + self.current as usize * Self::PAGE_SIZE as usize, + ) + } + } +} diff --git a/src/webassembly/mod.rs b/src/webassembly/mod.rs index 4036628c8..2183c8f62 100644 --- a/src/webassembly/mod.rs +++ b/src/webassembly/mod.rs @@ -6,15 +6,15 @@ pub mod memory; pub mod module; pub mod relocation; pub mod utils; -pub mod vm; -pub mod table; -pub mod backing; +pub mod vmcontext; +pub mod vmoffsets; use cranelift_codegen::{ isa, settings::{self, Configurable}, }; use cranelift_wasm::ModuleEnvironment; +use std::io::{self, Write}; use std::panic; use std::str::FromStr; use target_lexicon; @@ -154,7 +154,7 @@ pub fn get_isa() -> Box { isa::lookup(triple!("x86_64")).unwrap().finish(flags) } -fn store_module_arguments(path: &str, args: Vec<&str>, instance: &mut Instance) -> (u32, u32) { +fn store_module_arguments(path: &str, args: Vec<&str>, instance: &Instance) -> (u32, u32) { let argc = args.len() + 1; let (argv_offset, argv_slice): (_, &mut [u32]) = @@ -212,19 +212,9 @@ pub fn start_instance( path: &str, args: Vec<&str>, ) -> Result<(), String> { - if is_emscripten_module(&module) { - // Emscripten __ATINIT__ - if let Some(&Export::Function(environ_constructor_index)) = - module.info.exports.get("___emscripten_environ_constructor") - { - debug!("emscripten::___emscripten_environ_constructor"); - let ___emscripten_environ_constructor: extern "C" fn(&Instance) = - get_instance_function!(instance, environ_constructor_index); - call_protected!(___emscripten_environ_constructor(&instance)) - .map_err(|err| format!("{}", err))?; - }; + if let Some(ref emscripten_data) = &instance.emscripten_data { + emscripten_data.atinit(module, instance)?; - // TODO: We also need to handle TTY.init() and SOCKFS.root = FS.mount(SOCKFS, {}, null) let func_index = match module.info.exports.get("_main") { Some(&Export::Function(index)) => index, _ => panic!("_main emscripten function not found"), @@ -233,7 +223,7 @@ pub fn start_instance( let sig_index = module.get_func_type(func_index); let signature = module.get_signature(sig_index); let num_params = signature.params.len(); - match num_params { + let result = match num_params { 2 => { let main: extern "C" fn(u32, u32, &Instance) = get_instance_function!(instance, func_index); @@ -249,8 +239,11 @@ pub fn start_instance( num_params ), } - .map_err(|err| format!("{}", err)) - // TODO: We should implement emscripten __ATEXIT__ + .map_err(|err| format!("{}", err)); + + emscripten_data.atexit(module, instance)?; + + result } else { let func_index = instance diff --git a/src/webassembly/module.rs b/src/webassembly/module.rs index 84440a137..5a1b9f951 100644 --- a/src/webassembly/module.rs +++ b/src/webassembly/module.rs @@ -6,7 +6,7 @@ use std::string::String; use std::vec::Vec; use cranelift_codegen::cursor::FuncCursor; -use cranelift_codegen::ir::immediates::{Offset32, Uimm64}; +use cranelift_codegen::ir::immediates::{Imm64, Offset32, Uimm64}; use cranelift_codegen::ir::types::*; use cranelift_codegen::ir::{ self, AbiParam, ArgumentPurpose, ExtFuncData, ExternalName, FuncRef, InstBuilder, Signature, @@ -199,7 +199,7 @@ pub struct DataInitializer { #[derive(Clone, Debug)] pub enum TableElement { /// A element that, if called, produces a trap. - Trap, + Trap(), /// A function. Function(FuncIndex), } diff --git a/src/webassembly/vmcontext.rs b/src/webassembly/vmcontext.rs new file mode 100644 index 000000000..cb5e97a2a --- /dev/null +++ b/src/webassembly/vmcontext.rs @@ -0,0 +1,151 @@ +use crate::webassembly::vmoffsets::VMOffsets; +use cranelift_wasm::{ + DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, + GlobalIndex, MemoryIndex, SignatureIndex, TableIndex, +}; + +#[repr(C)] +pub struct VMContext { + /// A pointer to an array of imported functions, indexed by `FuncIndex`. + imported_functions: *const *const VMFunctionBody, + + /// A pointer to an array of imported tables, indexed by `TableIndex`. + imported_tables: *mut VMTableImport, + + /// A pointer to an array of imported memories, indexed by `MemoryIndex, + imported_memories: *mut VMMemoryImport, + + /// A pointer to an array of imported globals, indexed by `GlobalIndex`. + imported_globals: *mut VMGlobalImport, + + /// A pointer to an array of locally-defined tables, indexed by `DefinedTableIndex`. + tables: *mut VMTableDefinition, + + /// A pointer to an array of locally-defined memories, indexed by `DefinedMemoryIndex`. + memories: *mut VMMemoryDefinition, + + /// A pointer to an array of locally-defined globals, indexed by ``DefinedGlobalIndex`. + globals: *mut VMGlobalDefinition, + + /// Signature identifiers for signature-checked indirect calls. + signature_ids: *mut VMSharedSigIndex, +} + +/// Used to provide type safety for passing around function pointers. +/// The typesystem ensures this cannot be dereferenced. +pub enum VMFunctionBody {} + +/// Definition of a table used by the VM. (obviously) +#[repr(C)] +pub struct VMTableDefinition { + /// pointer to the elements in the table. + pub base: *mut u8, + /// Number of elements in the table (NOT necessarily the size of the table in bytes!). + pub current_elements: usize, +} + +impl VMTableDefinition { + pub fn offset_base(offsets: &VMOffsets) -> u8 { + 0 * offsets.ptr_size + } + + pub fn offset_current_elements(offsets: &VMOffsets) -> u8 { + 1 * offsets.ptr_size + } +} + +#[repr(C)] +pub struct VMTableImport { + /// A pointer to the table definition. + pub table: *mut VMTableDefinition, + /// A pointer to the vmcontext that owns this table definition. + pub vmctx: *mut VMContext, +} + +impl VMTableImport { + pub fn offset_table(offsets: &VMOffsets) -> u8 { + 0 * offsets.ptr_size + } + + pub fn offset_vmctx(offsets: &VMOffsets) -> u8 { + 1 * offsets.ptr_size + } +} + +/// Definition of a memory used by the VM. +#[repr(C)] +pub struct VMMemoryDefinition { + /// Pointer to the bottom of linear memory. + pub base: *mut u8, + /// Current logical size of this linear memory in bytes. + pub size: usize, +} + +impl VMMemoryDefinition { + pub fn offset_base(offsets: &VMOffsets) -> u8 { + 0 * offsets.ptr_size + } + + pub fn offset_size(offsets: &VMOffsets) -> u8 { + 1 * offsets.ptr_size + } +} + +#[repr(C)] +pub struct VMMemoryImport { + /// A pointer to the memory definition. + pub memory: *mut VMMemoryDefinition, + /// A pointer to the vmcontext that owns this memory definition. + pub vmctx: *mut VMContext, +} + +impl VMMemoryImport { + pub fn offset_memory(offsets: &VMOffsets) -> u8 { + 0 * offsets.ptr_size + } + + pub fn offset_vmctx(offsets: &VMOffsets) -> u8 { + 1 * offsets.ptr_size + } +} + +/// Definition of a global used by the VM. +#[repr(C, align(8))] +pub struct VMGlobalDefinition { + pub data: [u8; 8], +} + +#[repr(C)] +pub struct VMGlobalImport { + pub globals: *mut VMGlobalDefinition, +} + +impl VMGlobalImport { + pub fn offset_globals(offsets: &VMOffsets) -> u8 { + 0 * offsets.ptr_size + } +} + +#[repr(C)] +pub struct VMSharedSigIndex(u32); + +#[repr(C)] +pub struct VMCallerCheckedAnyfunc { + pub func: *const VMFunctionBody, + pub type_index: VMSharedSigIndex, + pub vmctx: *mut VMContext, +} + +impl VMCallerCheckedAnyfunc { + pub fn offset_func(offsets: &VMOffsets) -> u8 { + 0 * offsets.ptr_size + } + + pub fn offset_type_index(offsets: &VMOffsets) -> u8 { + 1 * offsets.ptr_size + } + + pub fn offset_vmctx(offsets: &VMOffsets) -> u8 { + 2 * offsets.ptr_size + } +} diff --git a/src/webassembly/vmoffsets.rs b/src/webassembly/vmoffsets.rs new file mode 100644 index 000000000..267787142 --- /dev/null +++ b/src/webassembly/vmoffsets.rs @@ -0,0 +1,9 @@ +pub struct VMOffsets { + pub(in crate::webassembly) ptr_size: u8, +} + +impl VMOffsets { + pub fn new(ptr_size: u8) -> Self { + Self { ptr_size } + } +}