Add touch-ups to new memory api

This commit is contained in:
Lachlan Sneff 2019-02-05 10:20:04 -08:00
parent f75006c062
commit 8a8290b155
5 changed files with 55 additions and 43 deletions

View File

@ -97,9 +97,7 @@ impl LocalBacking {
assert!(memory_desc.minimum.bytes().0 >= data_top); assert!(memory_desc.minimum.bytes().0 >= data_top);
let mem = &memories[local_memory_index]; let mem = &memories[local_memory_index];
for (mem_byte, data_byte) in mem for (mem_byte, data_byte) in mem.view()[init_base..init_base + init.data.len()]
.view(init_base..init_base + init.data.len())
.unwrap()
.iter() .iter()
.zip(init.data.iter()) .zip(init.data.iter())
{ {

View File

@ -29,22 +29,22 @@ pub trait IntCast:
macro_rules! intcast { macro_rules! intcast {
($($type:ident)+) => { ($($type:ident)+) => {
$( $(
impl IntCast for Wrapping<$type> { impl IntCast for $type {
type Public = $type; type Public = $type;
fn from(u: usize) -> Self { fn from(u: usize) -> Self {
Wrapping(u as $type) u as $type
} }
fn to(self) -> usize { fn to(self) -> usize {
self.0 as usize self as usize
} }
fn new(p: $type) -> Self { fn new(p: $type) -> Self {
Wrapping(p) p
} }
fn unwrap(self) -> $type { fn unwrap(self) -> $type {
self.0 self
} }
} }
)+ )+

View File

@ -9,14 +9,9 @@ use crate::{
vm, vm,
}; };
use std::{ use std::{
cell::{Cell, Ref, RefCell, RefMut}, cell::{Cell, RefCell},
fmt, fmt, mem, ptr,
marker::PhantomData,
mem,
ops::{Bound, Deref, DerefMut, Index, RangeBounds},
ptr,
rc::Rc, rc::Rc,
slice,
}; };
pub use self::atomic::Atomic; pub use self::atomic::Atomic;
@ -35,6 +30,9 @@ enum MemoryVariant {
Shared(SharedMemory), Shared(SharedMemory),
} }
/// A shared or unshared wasm linear memory.
///
/// A `Memory` represents the memory used by a wasm instance.
#[derive(Clone)] #[derive(Clone)]
pub struct Memory { pub struct Memory {
desc: MemoryDescriptor, desc: MemoryDescriptor,
@ -98,36 +96,53 @@ impl Memory {
} }
} }
pub fn view<T: ValueType, R: RangeBounds<usize>>(&self, range: R) -> Option<MemoryView<T>> { /// Return a "view" of the currently accessible memory. By
/// default, the view is unsyncronized, using regular memory
/// accesses. You can force a memory view to use atomic accesses
/// by calling the [`atomically`] method.
///
/// [`atomically`]: memory/struct.MemoryView.html#method.atomically
///
/// # Notes:
///
/// This method is safe (as in, it won't cause the host to crash or have UB),
/// but it doesn't obey rust's rules involving data races, especially concurrent ones.
/// Therefore, if this memory is shared between multiple threads, a single memory
/// location can be mutated concurrently without synchronization.
///
/// # Usage:
///
/// ```
/// # use wasmer_runtime_core::memory::{Memory, MemoryView};
/// # use std::sync::atomic::Ordering;
/// # fn view_memory(memory: Memory) {
/// // Without synchronization.
/// let view: MemoryView<u8> = memory.view();
/// for byte in view[0x1000 .. 0x1010].iter().map(|cell| cell.get()) {
/// println!("byte: {}", byte);
/// }
///
/// // With synchronization.
/// let atomic_view = view.atomically();
/// for byte in atomic_view[0x1000 .. 0x1010].iter().map(|atom| atom.load(Ordering::SeqCst)) {
/// println!("byte: {}", byte);
/// }
/// # }
/// ```
pub fn view<T: ValueType>(&self) -> MemoryView<T> {
let vm::LocalMemory { let vm::LocalMemory {
base, base,
bound, bound: _,
memory: _, memory: _,
} = unsafe { *self.vm_local_memory() }; } = unsafe { *self.vm_local_memory() };
let range_start = match range.start_bound() { let length = self.size().bytes().0 / mem::size_of::<T>();
Bound::Included(start) => *start,
Bound::Excluded(start) => *start + 1,
Bound::Unbounded => 0,
};
let range_end = match range.end_bound() { unsafe { MemoryView::new(base as _, length as u32) }
Bound::Included(end) => *end + 1,
Bound::Excluded(end) => *end,
Bound::Unbounded => bound as usize,
};
let length = range_end - range_start;
let size_in_bytes = mem::size_of::<T>() * length;
if range_end < range_start || range_start + size_in_bytes >= bound {
return None;
}
Some(unsafe { MemoryView::new(base as _, length as u32) })
} }
/// Convert this memory to a shared memory if the shared flag
/// is present in the description used to create it.
pub fn shared(self) -> Option<SharedMemory> { pub fn shared(self) -> Option<SharedMemory> {
if self.desc.shared { if self.desc.shared {
Some(SharedMemory { desc: self.desc }) Some(SharedMemory { desc: self.desc })
@ -139,7 +154,7 @@ impl Memory {
pub(crate) fn vm_local_memory(&self) -> *mut vm::LocalMemory { pub(crate) fn vm_local_memory(&self) -> *mut vm::LocalMemory {
match &self.variant { match &self.variant {
MemoryVariant::Unshared(unshared_mem) => unshared_mem.vm_local_memory(), MemoryVariant::Unshared(unshared_mem) => unshared_mem.vm_local_memory(),
MemoryVariant::Shared(shared_mem) => unimplemented!(), MemoryVariant::Shared(_) => unimplemented!(),
} }
} }
} }

View File

@ -28,8 +28,8 @@ where
} }
} }
impl<'a, T> MemoryView<'a, T, NonAtomically> { impl<'a, T: IntCast> MemoryView<'a, T, NonAtomically> {
pub fn atomically(self) -> MemoryView<'a, T, Atomically> { pub fn atomically(&self) -> MemoryView<'a, T, Atomically> {
MemoryView { MemoryView {
ptr: self.ptr, ptr: self.ptr,
length: self.length, length: self.length,

View File

@ -106,12 +106,11 @@ impl Ctx {
/// ``` /// ```
/// # use wasmer_runtime_core::{ /// # use wasmer_runtime_core::{
/// # vm::Ctx, /// # vm::Ctx,
/// # memory::Memory,
/// # }; /// # };
/// fn read_memory(ctx: &Ctx) -> u8 { /// fn read_memory(ctx: &Ctx) -> u8 {
/// let first_memory: &Memory = ctx.memory(0); /// let first_memory = ctx.memory(0);
/// // Read the first byte of that linear memory. /// // Read the first byte of that linear memory.
/// first_memory.access()[0] /// first_memory.view()[0].get()
/// } /// }
/// ``` /// ```
pub fn memory(&self, mem_index: u32) -> &Memory { pub fn memory(&self, mem_index: u32) -> &Memory {