Skip to content

Commit

Permalink
wiggle: add initial support for shared memory (bytecodealliance#5225)
Browse files Browse the repository at this point in the history
This change is the first in a series of changes to support shared memory
in Wiggle. Since Wiggle was written under the assumption of
single-threaded guest-side access, this change introduces a `shared`
field to guest memories in order to flag when this assumption will not
be the case. This change always sets `shared` to `false`; once a few
more pieces are in place, `shared` will be set dynamically when a shared
memory is detected, e.g., in a change like bytecodealliance#5054.

Using the `shared` field, we can now decide to load Wiggle values
differently under the new assumptions. This change  makes the guest
`T::read` and `T::write` calls into `Relaxed` atomic loads and stores in
order to maintain WebAssembly's expected memory consistency guarantees.
We choose Rust's `Relaxed` here to match the `Unordered` memory
consistency described in the [memory model] section of the ECMA spec.
These relaxed accesses are done unconditionally, since we theorize that
the performance benefit of an additional branch vs a relaxed load is
not much.

[memory model]: https://tc39.es/ecma262/multipage/memory-model.html#sec-memory-model

Since 128-bit scalar types do not have `Atomic*` equivalents, we remove
their `T::read` and `T::write` implementations here. They are unused by
any WASI implementations in the project.
  • Loading branch information
abrown authored Nov 8, 2022
1 parent 50cffad commit f026d95
Show file tree
Hide file tree
Showing 4 changed files with 122 additions and 15 deletions.
2 changes: 1 addition & 1 deletion crates/wiggle/generate/src/wasmtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ fn generate_func(
};
let (mem , ctx) = mem.data_and_store_mut(&mut caller);
let ctx = get_cx(ctx);
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem);
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem, false);
Ok(<#ret_ty>::from(#abi_func(ctx, &mem #(, #arg_names)*) #await_ ?))
};

Expand Down
122 changes: 109 additions & 13 deletions crates/wiggle/src/guest_type.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
use crate::{region::Region, GuestError, GuestPtr};
use std::mem;
use std::sync::atomic::{
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8, Ordering,
};

/// A trait for types which are used to report errors. Each type used in the
/// first result position of an interface function is used, by convention, to
Expand Down Expand Up @@ -60,9 +63,9 @@ pub unsafe trait GuestTypeTransparent<'a>: GuestType<'a> {
fn validate(ptr: *mut Self) -> Result<(), GuestError>;
}

macro_rules! primitives {
($($i:ident)*) => ($(
impl<'a> GuestType<'a> for $i {
macro_rules! integer_primitives {
($([$ty:ident, $ty_atomic:ident],)*) => ($(
impl<'a> GuestType<'a> for $ty {
fn guest_size() -> u32 { mem::size_of::<Self>() as u32 }
fn guest_align() -> usize { mem::align_of::<Self>() }

Expand All @@ -88,7 +91,14 @@ macro_rules! primitives {
if ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
Ok(unsafe { <$i>::from_le_bytes(*host_ptr.cast::<[u8; mem::size_of::<Self>()]>()) })
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
Ok($ty::from_le(atomic_value_ref.load(Ordering::Relaxed)))
}

#[inline]
Expand All @@ -107,16 +117,100 @@ macro_rules! primitives {
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
unsafe {
*host_ptr.cast::<[u8; mem::size_of::<Self>()]>() = <$i>::to_le_bytes(val);
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
atomic_value_ref.store(val.to_le(), Ordering::Relaxed);
Ok(())
}
}

unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
#[inline]
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
// All bit patterns are safe, nothing to do here
Ok(())
}
}

)*)
}

macro_rules! float_primitives {
($([$ty:ident, $ty_unsigned:ident, $ty_atomic:ident],)*) => ($(
impl<'a> GuestType<'a> for $ty {
fn guest_size() -> u32 { mem::size_of::<Self>() as u32 }
fn guest_align() -> usize { mem::align_of::<Self>() }

#[inline]
fn read(ptr: &GuestPtr<'a, Self>) -> Result<Self, GuestError> {
// Any bit pattern for any primitive implemented with this
// macro is safe, so our `validate_size_align` method will
// guarantee that if we are given a pointer it's valid for the
// size of our type as well as properly aligned. Consequently we
// should be able to safely ready the pointer just after we
// validated it, returning it along here.
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
offset,
Self::guest_align(),
size,
)?;
let region = Region {
start: offset,
len: size,
};
if ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
let value = $ty_unsigned::from_le(atomic_value_ref.load(Ordering::Relaxed));
Ok($ty::from_bits(value))
}

#[inline]
fn write(ptr: &GuestPtr<'_, Self>, val: Self) -> Result<(), GuestError> {
let offset = ptr.offset();
let size = Self::guest_size();
let host_ptr = ptr.mem().validate_size_align(
offset,
Self::guest_align(),
size,
)?;
let region = Region {
start: offset,
len: size,
};
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
return Err(GuestError::PtrBorrowed(region));
}
// If the accessed memory is shared, we need to load the bytes
// with the correct memory consistency. We could check if the
// memory is shared each time, but we expect little performance
// difference between an additional branch and a relaxed memory
// access and thus always do the relaxed access here.
let atomic_value_ref: &$ty_atomic =
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
let le_value = $ty_unsigned::to_le(val.to_bits());
atomic_value_ref.store(le_value, Ordering::Relaxed);
Ok(())
}
}

unsafe impl<'a> GuestTypeTransparent<'a> for $i {
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
#[inline]
fn validate(_ptr: *mut $i) -> Result<(), GuestError> {
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
// All bit patterns are safe, nothing to do here
Ok(())
}
Expand All @@ -125,13 +219,15 @@ macro_rules! primitives {
)*)
}

primitives! {
integer_primitives! {
// signed
i8 i16 i32 i64 i128
[i8, AtomicI8], [i16, AtomicI16], [i32, AtomicI32], [i64, AtomicI64],
// unsigned
u8 u16 u32 u64 u128
// floats
f32 f64
[u8, AtomicU8], [u16, AtomicU16], [u32, AtomicU32], [u64, AtomicU64],
}

float_primitives! {
[f32, u32, AtomicU32], [f64, u64, AtomicU64],
}

// Support pointers-to-pointers where pointers are always 32-bits in wasm land
Expand Down
6 changes: 6 additions & 0 deletions crates/wiggle/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,12 @@ pub unsafe trait GuestMemory: Send + Sync {
/// `GuestStr` are implemented correctly, a shared `BorrowHandle` should only be
/// unborrowed once.
fn shared_unborrow(&self, h: BorrowHandle);

/// Check if the underlying memory is shared across multiple threads; e.g.,
/// with a WebAssembly shared memory.
fn is_shared_memory(&self) -> bool {
false
}
}

/// A handle to a borrow on linear memory. It is produced by `{mut, shared}_borrow` and
Expand Down
7 changes: 6 additions & 1 deletion crates/wiggle/src/wasmtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@ use crate::{BorrowHandle, GuestError, GuestMemory, Region};
pub struct WasmtimeGuestMemory<'a> {
mem: &'a mut [u8],
bc: BorrowChecker,
shared: bool,
}

impl<'a> WasmtimeGuestMemory<'a> {
pub fn new(mem: &'a mut [u8]) -> Self {
pub fn new(mem: &'a mut [u8], shared: bool) -> Self {
Self {
mem,
// Wiggle does not expose any methods for functions to re-enter
Expand All @@ -22,6 +23,7 @@ impl<'a> WasmtimeGuestMemory<'a> {
// integrated fully with wasmtime:
// https://github.com/bytecodealliance/wasmtime/issues/1917
bc: BorrowChecker::new(),
shared,
}
}
}
Expand Down Expand Up @@ -51,4 +53,7 @@ unsafe impl GuestMemory for WasmtimeGuestMemory<'_> {
fn mut_unborrow(&self, h: BorrowHandle) {
self.bc.mut_unborrow(h)
}
fn is_shared_memory(&self) -> bool {
self.shared
}
}

0 comments on commit f026d95

Please sign in to comment.