From 6d88167b9f717791cbfafb9aac12b666ed51fd79 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Fri, 12 Jun 2020 17:22:54 -0700 Subject: [PATCH] externref: Address review feedback --- Cargo.lock | 22 +- crates/environ/src/cranelift.rs | 2 +- crates/runtime/src/externref.rs | 321 +++++++++---------------- crates/wasmtime/src/func.rs | 45 ++-- crates/wasmtime/src/instance.rs | 2 +- crates/wasmtime/src/ref.rs | 0 crates/wasmtime/src/runtime.rs | 7 +- crates/wasmtime/src/trampoline/func.rs | 2 +- 8 files changed, 150 insertions(+), 251 deletions(-) mode change 100755 => 100644 crates/wasmtime/src/ref.rs diff --git a/Cargo.lock b/Cargo.lock index fcfb241d34f9..a0060f6117db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -107,7 +107,7 @@ dependencies = [ "cfg-if", "libc", "miniz_oxide", - "object 0.19.0", + "object 0.19.0 (git+https://github.com/gimli-rs/object.git)", "rustc-demangle", ] @@ -499,7 +499,7 @@ dependencies = [ "anyhow", "cranelift-codegen", "cranelift-module", - "object 0.18.0", + "object 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", "target-lexicon", ] @@ -1241,6 +1241,11 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" +[[package]] +name = "object" +version = "0.19.0" +source = "git+https://github.com/gimli-rs/object.git#2ad508810e5470f5bcb7ede8d866c8b1eda4c4d3" + [[package]] name = "object" version = "0.19.0" @@ -1251,11 +1256,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "object" -version = "0.19.0" -source = "git+https://github.com/gimli-rs/object.git#2ad508810e5470f5bcb7ede8d866c8b1eda4c4d3" - [[package]] name = "once_cell" version = "1.3.1" @@ -2374,7 +2374,7 @@ dependencies = [ "libc", "log", "more-asserts", - "object 0.18.0", + "object 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_env_logger", "rayon", "structopt", @@ -2401,7 +2401,7 @@ dependencies = [ "anyhow", "gimli", "more-asserts", - "object 0.18.0", + "object 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", "target-lexicon", "thiserror", "wasmparser 0.57.0", @@ -2502,7 +2502,7 @@ version = "0.18.0" dependencies = [ "anyhow", "more-asserts", - "object 0.18.0", + "object 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", "wasmtime-environ", ] @@ -2516,7 +2516,7 @@ dependencies = [ "ittapi-rs", "lazy_static", "libc", - "object 0.18.0", + "object 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)", "scroll", "serde", "target-lexicon", diff --git a/crates/environ/src/cranelift.rs b/crates/environ/src/cranelift.rs index d261200848dc..89a7dfd6f6fb 100644 --- a/crates/environ/src/cranelift.rs +++ b/crates/environ/src/cranelift.rs @@ -221,7 +221,7 @@ impl binemit::StackmapSink for StackMapSink { impl StackMapSink { fn finish(mut self) -> Vec { - self.infos.sort_by_key(|info| info.code_offset); + self.infos.sort_unstable_by_key(|info| info.code_offset); self.infos } } diff --git a/crates/runtime/src/externref.rs b/crates/runtime/src/externref.rs index 92d7105a8a55..603cf8bbc376 100644 --- a/crates/runtime/src/externref.rs +++ b/crates/runtime/src/externref.rs @@ -105,7 +105,7 @@ use std::cell::{Cell, RefCell, UnsafeCell}; use std::cmp::Ordering; use std::collections::BTreeMap; use std::collections::HashSet; -use std::hash::Hasher; +use std::hash::{Hash, Hasher}; use std::mem; use std::ops::Deref; use std::ptr::{self, NonNull}; @@ -337,37 +337,6 @@ impl VMExternRef { } } - // /// Turn this `VMExternRef` into a raw, untyped pointer. - // /// - // /// This forgets `self` and does *not* decrement the reference count on the - // /// pointed-to data. - // /// - // /// This `VMExternRef` may be recovered with `VMExternRef::from_raw`. - // pub fn into_raw(self) -> *mut u8 { - // let ptr = self.as_raw(); - // mem::forget(self); - // ptr - // } - - // /// Recreate a `VMExternRef` from a pointer returned from a previous call to - // /// `VMExternRef::into_raw`. - // /// - // /// # Safety - // /// - // /// Wildly unsafe to use with anything other than the result of a previous - // /// `into_raw` call! - // /// - // /// This method does *not* increment the reference count on the pointed-to - // /// data, so `from_raw` must be called at most *once* on the result of a - // /// previous `into_raw` call. (Ideally, every `into_raw` is later followed - // /// by a `from_raw`, but it is technically memory safe to never call - // /// `from_raw` after `into_raw`: it will leak the pointed-to value, which is - // /// memory safe). - // pub unsafe fn from_raw(ptr: *mut u8) -> Self { - // debug_assert!(!ptr.is_null()); - // VMExternRef(NonNull::new_unchecked(ptr).cast()) - // } - /// Turn this `VMExternRef` into a raw, untyped pointer. /// /// Unlike `into_raw`, this does not consume and forget `self`. It is *not* @@ -379,7 +348,6 @@ impl VMExternRef { /// `clone_from_raw` is called. pub fn as_raw(&self) -> *mut u8 { let ptr = self.0.cast::().as_ptr(); - mem::forget(self); ptr } @@ -461,6 +429,31 @@ impl Deref for VMExternRef { } } +/// A wrapper around a `VMExternRef` that implements `Eq` and `Hash` with +/// pointer semantics. +/// +/// We use this so that we can morally put `VMExternRef`s inside of `HashSet`s +/// even though they don't implement `Eq` and `Hash` to avoid foot guns. +#[derive(Clone)] +struct VMExternRefWithTraits(VMExternRef); + +impl Hash for VMExternRefWithTraits { + fn hash(&self, hasher: &mut H) + where + H: Hasher, + { + VMExternRef::hash(&self.0, hasher) + } +} + +impl PartialEq for VMExternRefWithTraits { + fn eq(&self, other: &Self) -> bool { + VMExternRef::eq(&self.0, &other.0) + } +} + +impl Eq for VMExternRefWithTraits {} + type TableElem = UnsafeCell>; /// A table that over-approximizes the set of `VMExternRef`s that any Wasm @@ -470,35 +463,37 @@ type TableElem = UnsafeCell>; /// entries. Deduplication happens at GC time. #[repr(C)] pub struct VMExternRefActivationsTable { - /// Bump-allocation finger within the current chunk. + /// Bump-allocation finger within the `chunk`. /// - /// NB: this is an `UnsafeCell` because it is read from and written to by - /// compiled Wasm code. + /// NB: this is an `UnsafeCell` because it is written to by compiled Wasm + /// code. next: UnsafeCell>, - /// Pointer to just after the current chunk. + /// Pointer to just after the `chunk`. /// /// This is *not* within the current chunk and therefore is not a valid /// place to insert a reference! - /// - /// This is only updated from host code. - end: Cell>, + end: NonNull, - /// The chunks within which we are bump allocating. + /// Bump allocation chunk that stores fast-path insertions. + chunk: Box<[TableElem]>, + + /// When unioned with `chunk`, this is an over-approximation of the GC roots + /// on the stack, inside Wasm frames. /// - /// This is only updated from host code. - chunks: RefCell>>, + /// This is used by slow-path insertion, and when a GC cycle finishes, is + /// re-initialized to the just-discovered precise set of stack roots (which + /// immediately becomes an over-approximation again as soon as Wasm runs and + /// potentially drops references). + over_approximated_stack_roots: RefCell>, /// The precise set of on-stack, inside-Wasm GC roots that we discover via /// walking the stack and interpreting stack maps. /// - /// That is, this is the precise set that the bump allocation table is - /// over-approximating. - /// /// This is *only* used inside the `gc` function, and is empty otherwise. It /// is just part of this struct so that we can reuse the allocation, rather /// than create a new hash set every GC. - precise_stack_roots: RefCell>>, + precise_stack_roots: RefCell>, /// A pointer to a `u8` on the youngest host stack frame before we called /// into Wasm for the first time. When walking the stack in garbage @@ -510,119 +505,82 @@ pub struct VMExternRefActivationsTable { } impl VMExternRefActivationsTable { - const INITIAL_CHUNK_SIZE: usize = 4096 / mem::size_of::(); + const CHUNK_SIZE: usize = 4096 / mem::size_of::(); /// Create a new `VMExternRefActivationsTable`. pub fn new() -> Self { - let chunk = Self::new_chunk(Self::INITIAL_CHUNK_SIZE); + let chunk = Self::new_chunk(Self::CHUNK_SIZE); let next = chunk.as_ptr() as *mut TableElem; let end = unsafe { next.add(chunk.len()) }; VMExternRefActivationsTable { next: UnsafeCell::new(NonNull::new(next).unwrap()), - end: Cell::new(NonNull::new(end).unwrap()), - chunks: RefCell::new(vec![chunk]), - precise_stack_roots: RefCell::new(HashSet::with_capacity(Self::INITIAL_CHUNK_SIZE)), + end: NonNull::new(end).unwrap(), + chunk, + over_approximated_stack_roots: RefCell::new(HashSet::with_capacity(Self::CHUNK_SIZE)), + precise_stack_roots: RefCell::new(HashSet::with_capacity(Self::CHUNK_SIZE)), stack_canary: Cell::new(None), } } fn new_chunk(size: usize) -> Box<[UnsafeCell>]> { - assert!(size >= Self::INITIAL_CHUNK_SIZE); - let mut chunk = Vec::with_capacity(size); - for _ in 0..size { - chunk.push(UnsafeCell::new(None)); - } - chunk.into_boxed_slice() + assert!(size >= Self::CHUNK_SIZE); + (0..size).map(|_| UnsafeCell::new(None)).collect() } /// Try and insert a `VMExternRef` into this table. /// - /// This is a fast path that only succeeds when the current chunk has the + /// This is a fast path that only succeeds when the bump chunk has the /// capacity for the requested insertion. /// /// If the insertion fails, then the `VMExternRef` is given back. Callers /// may attempt a GC to free up space and try again, or may call - /// `insert_slow_path` to allocate a new bump chunk for this insertion. + /// `insert_slow_path` to infallibly insert the reference (potentially + /// allocating additional space in the table to hold it). #[inline] pub fn try_insert(&self, externref: VMExternRef) -> Result<(), VMExternRef> { unsafe { let next = *self.next.get(); - let end = self.end.get(); - if next == end { + if next == self.end { return Err(externref); } debug_assert!((*next.as_ref().get()).is_none()); ptr::write(next.as_ptr(), UnsafeCell::new(Some(externref))); + let next = NonNull::new_unchecked(next.as_ptr().add(1)); - debug_assert!(next <= end); + debug_assert!(next <= self.end); *self.next.get() = next; + Ok(()) } } - /// This is a slow path for inserting a reference into the table when the - /// current bump chunk is full. - /// - /// This method is infallible, and will allocate an additional bump chunk if - /// necessary. + /// This is a slow path for infallibly inserting a reference into the table. #[inline(never)] pub fn insert_slow_path(&self, externref: VMExternRef) { - let externref = match self.try_insert(externref) { - Ok(()) => return, - Err(x) => x, - }; - - { - let mut chunks = self.chunks.borrow_mut(); - - let new_size = chunks.last().unwrap().len() * 2; - let new_chunk = Self::new_chunk(new_size); - - unsafe { - let next = new_chunk.as_ptr() as *mut TableElem; - debug_assert!(!next.is_null()); - *self.next.get() = NonNull::new_unchecked(next); - - let end = next.add(new_chunk.len()); - debug_assert!(!end.is_null()); - self.end.set(NonNull::new_unchecked(end)); - } - - chunks.push(new_chunk); - } - - self.try_insert(externref) - .expect("insertion should always succeed after we allocate a new chunk"); + let mut roots = self.over_approximated_stack_roots.borrow_mut(); + roots.insert(VMExternRefWithTraits(externref)); } - fn num_filled_in_last_chunk(&self, chunks: &[Box<[TableElem]>]) -> usize { - let last_chunk = chunks.last().unwrap(); + fn num_filled_in_bump_chunk(&self) -> usize { let next = unsafe { *self.next.get() }; - let end = self.end.get(); - let num_unused_in_last_chunk = - ((end.as_ptr() as usize) - (next.as_ptr() as usize)) / mem::size_of::(); - last_chunk.len().saturating_sub(num_unused_in_last_chunk) + let bytes_unused = (self.end.as_ptr() as usize) - (next.as_ptr() as usize); + let slots_unused = bytes_unused / mem::size_of::(); + self.chunk.len().saturating_sub(slots_unused) } fn elements(&self, mut f: impl FnMut(&VMExternRef)) { - // Every chunk except the last one is full, so we can simply iterate - // over all of their elements. - let chunks = self.chunks.borrow(); - for chunk in chunks.iter().take(chunks.len() - 1) { - for elem in chunk.iter() { - if let Some(elem) = unsafe { &*elem.get() } { - f(elem); - } - } + let roots = self.over_approximated_stack_roots.borrow(); + for elem in roots.iter() { + f(&elem.0); } - // The last chunk is not all the way full, so we only iterate over its - // full parts. - let num_filled_in_last_chunk = self.num_filled_in_last_chunk(&chunks); - for elem in chunks.last().unwrap().iter().take(num_filled_in_last_chunk) { - if let Some(elem) = unsafe { &*elem.get() } { + // The bump chunk is not all the way full, so we only iterate over its + // filled-in slots. + let num_filled = self.num_filled_in_bump_chunk(); + for slot in self.chunk.iter().take(num_filled) { + if let Some(elem) = unsafe { &*slot.get() } { f(elem); } } @@ -630,94 +588,43 @@ impl VMExternRefActivationsTable { fn insert_precise_stack_root(&self, root: NonNull) { let mut precise_stack_roots = self.precise_stack_roots.borrow_mut(); - if precise_stack_roots.insert(root) { - // If this root was not already in the set, then we need to - // increment its reference count, so that it doesn't get freed in - // `reset` when we're overwriting old bump allocation table entries - // with new ones. - unsafe { - root.as_ref().increment_ref_count(); - } - } + let root = unsafe { VMExternRef::clone_from_raw(root.as_ptr() as *mut _) }; + precise_stack_roots.insert(VMExternRefWithTraits(root)); } - /// Refill the bump allocation table with our precise stack roots, and sweep - /// away everything else. - fn reset(&self) { - let mut chunks = self.chunks.borrow_mut(); - - let mut precise_roots = self.precise_stack_roots.borrow_mut(); - if precise_roots.is_empty() { - // Get rid of all but our first bump chunk, and set our `next` and - // `end` bump allocation fingers into it. + /// Sweep the bump allocation table after we've discovered our precise stack + /// roots. + fn sweep(&self) { + // Sweep our bump chunk. + let num_filled = self.num_filled_in_bump_chunk(); + for slot in self.chunk.iter().take(num_filled) { unsafe { - let chunk = chunks.first().unwrap(); - - let next = chunk.as_ptr() as *mut TableElem; - debug_assert!(!next.is_null()); - *self.next.get() = NonNull::new_unchecked(next); - - let end = next.add(chunk.len()); - debug_assert!(!end.is_null()); - self.end.set(NonNull::new_unchecked(end)); - } - chunks.truncate(1); - } else { - // Drain our precise stack roots into the bump allocation table. - // - // This overwrites old entries, which drops them and decrements their - // reference counts. Safety relies on the reference count increment in - // `insert_precise_stack_root` to avoid over-eagerly dropping references - // that are in `self.precise_stack_roots` but haven't been inserted into - // the bump allocation table yet. - let mut precise_roots = precise_roots.drain(); - 'outer: for (chunk_index, chunk) in chunks.iter().enumerate() { - for (slot_index, slot) in chunk.iter().enumerate() { - if let Some(root) = precise_roots.next() { - unsafe { - // NB: there is no reference count increment here - // because everything in `self.precise_stack_roots` - // already had its reference count incremented for us, - // and this is logically a move out from there, rather - // than a clone. - *slot.get() = Some(VMExternRef(root)); - } - } else { - // We've inserted all of our precise, on-stack roots back - // into the bump allocation table. Update our `next` and - // `end` bump pointer members for the new current chunk, and - // free any excess chunks. - let start = chunk.as_ptr() as *mut TableElem; - unsafe { - let next = start.add(slot_index + 1); - debug_assert!(!next.is_null()); - *self.next.get() = NonNull::new_unchecked(next); - - let end = start.add(chunk.len()); - debug_assert!(!end.is_null()); - self.end.set(NonNull::new_unchecked(end)); - } - chunks.truncate(chunk_index + 1); - break 'outer; - } - } + *slot.get() = None; } - - debug_assert!( - precise_roots.next().is_none(), - "should always have enough capacity in the bump allocations table \ - to hold all of our precise, on-stack roots" - ); } + debug_assert!( + self.chunk + .iter() + .all(|slot| unsafe { (*slot.get()).as_ref().is_none() }), + "after sweeping the bump chunk, all slots should be `None`" + ); - // Finally, sweep away excess capacity within our new last/current - // chunk, so that old, no-longer-live roots get dropped. - let num_filled_in_last_chunk = self.num_filled_in_last_chunk(&chunks); - for slot in chunks.last().unwrap().iter().skip(num_filled_in_last_chunk) { - unsafe { - *slot.get() = None; - } + // Reset our `next` bump allocation finger. + unsafe { + let next = self.chunk.as_ptr() as *mut TableElem; + debug_assert!(!next.is_null()); + *self.next.get() = NonNull::new_unchecked(next); } + + // The current `precise_roots` becomes our new over-appoximated set for + // the next GC cycle. + let mut precise_roots = self.precise_stack_roots.borrow_mut(); + let mut over_approximated = self.over_approximated_stack_roots.borrow_mut(); + mem::swap(&mut *precise_roots, &mut *over_approximated); + + // And finally, the new `precise_roots` should be cleared and remain + // empty until the next GC cycle. + precise_roots.clear(); } /// Set the stack canary around a call into Wasm. @@ -804,8 +711,8 @@ struct StackMapRegistryInner { #[derive(Debug)] struct ModuleStackMaps { - /// The range of PCs that this module covers. Different modules should - /// always have distinct ranges. + /// The range of PCs that this module covers. Different modules must always + /// have distinct ranges. range: std::ops::Range, /// A map from a PC in this module (that is a GC safepoint) to its @@ -1068,7 +975,7 @@ pub unsafe fn gc( true }); } - externref_activations_table.reset(); + externref_activations_table.sweep(); log::debug!("end GC"); return; } @@ -1086,7 +993,7 @@ pub unsafe fn gc( // * resetting our bump-allocated table's over-approximation to the // newly-discovered precise set. - // The SP of the previous frame we processed. + // The SP of the previous (younger) frame we processed. let mut last_sp = None; // Whether we have found our stack canary or not yet. @@ -1109,6 +1016,8 @@ pub unsafe fn gc( let sp = frame.sp() as usize; if let Some(stack_map) = stack_maps_registry.lookup_stack_map(pc) { + debug_assert!(sp != 0, "we should always get a valid SP for Wasm frames"); + for i in 0..(stack_map.mapped_words() as usize) { if stack_map.get_bit(i) { // Stack maps have one bit per word in the frame, and the @@ -1144,14 +1053,16 @@ pub unsafe fn gc( !found_canary }); - // Only reset the table if we found the stack canary, and therefore know - // that we discovered all the on-stack, inside-a-Wasm-frame roots. If we did - // *not* find the stack canary, then `libunwind` failed to walk the whole - // stack, and we might be missing roots. Reseting the table would free those - // missing roots while they are still in use, leading to use-after-free. + // Only sweep and reset the table if we found the stack canary, and + // therefore know that we discovered all the on-stack, inside-a-Wasm-frame + // roots. If we did *not* find the stack canary, then `libunwind` failed to + // walk the whole stack, and we might be missing roots. Reseting the table + // would free those missing roots while they are still in use, leading to + // use-after-free. if found_canary { - externref_activations_table.reset(); + externref_activations_table.sweep(); } else { + log::warn!("did not find stack canary; skipping GC sweep"); let mut roots = externref_activations_table.precise_stack_roots.borrow_mut(); roots.clear(); } diff --git a/crates/wasmtime/src/func.rs b/crates/wasmtime/src/func.rs index 9fb58c743dfa..53c7e5ab01d7 100644 --- a/crates/wasmtime/src/func.rs +++ b/crates/wasmtime/src/func.rs @@ -195,16 +195,9 @@ macro_rules! getters { let mut ret = None; $(let $args = $args.into_abi();)* - { - let canary = 0; - let _auto_reset = instance - .store - .externref_activations_table() - .set_stack_canary(&canary); - catch_traps(export.vmctx, &instance.store, || { - ret = Some(fnptr(export.vmctx, ptr::null_mut(), $($args,)*)); - })?; - } + invoke_wasm_and_catch_traps(export.vmctx, &instance.store, || { + ret = Some(fnptr(export.vmctx, ptr::null_mut(), $($args,)*)); + })?; Ok(ret.unwrap()) } @@ -560,23 +553,14 @@ impl Func { } // Call the trampoline. - { - let canary = 0; - let _auto_reset = self - .instance - .store - .externref_activations_table() - .set_stack_canary(&canary); - - catch_traps(self.export.vmctx, &self.instance.store, || unsafe { - (self.trampoline)( - self.export.vmctx, - ptr::null_mut(), - self.export.address, - values_vec.as_mut_ptr(), - ) - })?; - } + invoke_wasm_and_catch_traps(self.export.vmctx, &self.instance.store, || unsafe { + (self.trampoline)( + self.export.vmctx, + ptr::null_mut(), + self.export.address, + values_vec.as_mut_ptr(), + ) + })?; // Load the return values out of `values_vec`. let mut results = Vec::with_capacity(my_ty.results().len()); @@ -746,13 +730,18 @@ impl fmt::Debug for Func { } } -pub(crate) fn catch_traps( +pub(crate) fn invoke_wasm_and_catch_traps( vmctx: *mut VMContext, store: &Store, closure: impl FnMut(), ) -> Result<(), Trap> { let signalhandler = store.signal_handler(); unsafe { + let canary = 0; + let _auto_reset_canary = store + .externref_activations_table() + .set_stack_canary(&canary); + wasmtime_runtime::catch_traps( vmctx, store.engine().config().max_wasm_stack, diff --git a/crates/wasmtime/src/instance.rs b/crates/wasmtime/src/instance.rs index 0e15dcdc97dc..ff9e39435fb2 100644 --- a/crates/wasmtime/src/instance.rs +++ b/crates/wasmtime/src/instance.rs @@ -97,7 +97,7 @@ fn instantiate( }; let vmctx_ptr = instance.handle.vmctx_ptr(); unsafe { - super::func::catch_traps(vmctx_ptr, store, || { + super::func::invoke_wasm_and_catch_traps(vmctx_ptr, store, || { mem::transmute::< *const VMFunctionBody, unsafe extern "C" fn(*mut VMContext, *mut VMContext), diff --git a/crates/wasmtime/src/ref.rs b/crates/wasmtime/src/ref.rs old mode 100755 new mode 100644 diff --git a/crates/wasmtime/src/runtime.rs b/crates/wasmtime/src/runtime.rs index 6e3f95b5ff86..234e175896d6 100644 --- a/crates/wasmtime/src/runtime.rs +++ b/crates/wasmtime/src/runtime.rs @@ -195,6 +195,7 @@ impl Config { self.validating_config .operator_config .enable_reference_types = enable; + self.flags .set("enable_safepoints", if enable { "true" } else { "false" }) .unwrap(); @@ -730,7 +731,6 @@ pub struct Engine { struct EngineInner { config: Config, compiler: Compiler, - stack_map_registry: Arc, } impl Engine { @@ -742,7 +742,6 @@ impl Engine { inner: Arc::new(EngineInner { config: config.clone(), compiler: config.build_compiler(), - stack_map_registry: Arc::new(StackMapRegistry::default()), }), } } @@ -843,7 +842,7 @@ impl Store { jit_code_ranges: RefCell::new(Vec::new()), host_info: RefCell::new(HashMap::new()), externref_activations_table: Rc::new(VMExternRefActivationsTable::new()), - stack_map_registry: engine.inner.stack_map_registry.clone(), + stack_map_registry: Arc::new(StackMapRegistry::default()), }), } } @@ -1092,7 +1091,7 @@ impl Store { } pub(crate) fn stack_map_registry(&self) -> &Arc { - &self.inner.engine.inner.stack_map_registry + &self.inner.stack_map_registry } /// Perform garbage collection of `ExternRef`s. diff --git a/crates/wasmtime/src/trampoline/func.rs b/crates/wasmtime/src/trampoline/func.rs index 6ccc700c2431..ae3dc3f7179f 100644 --- a/crates/wasmtime/src/trampoline/func.rs +++ b/crates/wasmtime/src/trampoline/func.rs @@ -217,7 +217,7 @@ pub fn create_handle_with_function( if ft.params().iter().any(|p| *p == ValType::ExternRef) || ft.results().iter().any(|r| *r == ValType::ExternRef) { - flag_builder.set("enable_safepoints", "true").unwrap(); + flag_builder.enable("enable_safepoints").unwrap(); } isa_builder.finish(settings::Flags::new(flag_builder))