diff --git a/cranelift/wasm/src/heap.rs b/cranelift/wasm/src/heap.rs index e71fae8e3d12..b253c62dcee0 100644 --- a/cranelift/wasm/src/heap.rs +++ b/cranelift/wasm/src/heap.rs @@ -31,8 +31,8 @@ entity_impl!(Heap, "heap"); /// always present. /// /// 2. The *unmapped pages* is a possibly empty range of address space that may -/// be mapped in the future when the heap is grown. They are addressable -/// but not accessible. +/// be mapped in the future when the heap is grown. They are addressable but +/// not accessible. /// /// 3. The *offset-guard pages* is a range of address space that is guaranteed /// to always cause a trap when accessed. It is used to optimize bounds @@ -48,10 +48,10 @@ entity_impl!(Heap, "heap"); /// /// #### Static heaps /// -/// A *static heap* starts out with all the address space it will ever need, so it -/// never moves to a different address. At the base address is a number of mapped -/// pages corresponding to the heap's current size. Then follows a number of -/// unmapped pages where the heap can grow up to its maximum size. After the +/// A *static heap* starts out with all the address space it will ever need, so +/// it never moves to a different address. At the base address is a number of +/// mapped pages corresponding to the heap's current size. Then follows a number +/// of unmapped pages where the heap can grow up to its maximum size. After the /// unmapped pages follow the offset-guard pages which are also guaranteed to /// generate a trap when accessed. /// diff --git a/crates/environ/src/tunables.rs b/crates/environ/src/tunables.rs index 06d6b5ca5207..7ba3e6fdfc9b 100644 --- a/crates/environ/src/tunables.rs +++ b/crates/environ/src/tunables.rs @@ -3,7 +3,8 @@ use serde_derive::{Deserialize, Serialize}; /// Tunable parameters for WebAssembly compilation. #[derive(Clone, Hash, Serialize, Deserialize)] pub struct Tunables { - /// For static heaps, the size in wasm pages of the heap protected by bounds checking. + /// For static heaps, the size in wasm pages of the heap protected by bounds + /// checking. pub static_memory_bound: u64, /// The size in bytes of the offset guard for static heaps. @@ -31,7 +32,8 @@ pub struct Tunables { /// Whether or not we use epoch-based interruption. pub epoch_interruption: bool, - /// Whether or not to treat the static memory bound as the maximum for unbounded heaps. + /// Whether or not to treat the static memory bound as the maximum for + /// unbounded heaps. pub static_memory_bound_is_maximum: bool, /// Whether or not linear memory allocations will have a guard region at the diff --git a/crates/jit-icache-coherence/src/libc.rs b/crates/jit-icache-coherence/src/libc.rs index 557cd06921a6..364658bd1813 100644 --- a/crates/jit-icache-coherence/src/libc.rs +++ b/crates/jit-icache-coherence/src/libc.rs @@ -104,7 +104,7 @@ fn riscv_flush_icache(start: u64, end: u64) -> Result<()> { match unsafe { libc::syscall( { - // The syscall isn't defined in `libc`, so we definfe the syscall number here. + // The syscall isn't defined in `libc`, so we define the syscall number here. // https://github.com/torvalds/linux/search?q=__NR_arch_specific_syscall #[allow(non_upper_case_globals)] const __NR_arch_specific_syscall :i64 = 244; diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index 6a17e14682f9..35c1201495d6 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -42,7 +42,8 @@ pub struct InstanceAllocationRequest<'a> { /// A pointer to the "store" for this instance to be allocated. The store /// correlates with the `Store` in wasmtime itself, and lots of contextual - /// information about the execution of wasm can be learned through the store. + /// information about the execution of wasm can be learned through the + /// store. /// /// Note that this is a raw pointer and has a static lifetime, both of which /// are a bit of a lie. This is done purely so a store can learn about @@ -172,7 +173,7 @@ pub unsafe trait InstanceAllocatorImpl { // associated types are not object safe. // // 2. We would want a parameterized `Drop` implementation so that we could - // pass in the `InstaceAllocatorImpl` on drop, but this doesn't exist in + // pass in the `InstanceAllocatorImpl` on drop, but this doesn't exist in // Rust. Therefore, we would be forced to add reference counting and // stuff like that to keep a handle on the instance allocator from this // theoretical type. That's a bummer. @@ -250,11 +251,13 @@ pub unsafe trait InstanceAllocatorImpl { #[cfg(feature = "async")] fn allocate_fiber_stack(&self) -> Result; - /// Deallocates a fiber stack that was previously allocated with `allocate_fiber_stack`. + /// Deallocates a fiber stack that was previously allocated with + /// `allocate_fiber_stack`. /// /// # Safety /// - /// The provided stack is required to have been allocated with `allocate_fiber_stack`. + /// The provided stack is required to have been allocated with + /// `allocate_fiber_stack`. #[cfg(feature = "async")] unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack); diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index 340b780c51c1..ed2c4415ded6 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -182,8 +182,8 @@ pub trait ModuleRuntimeInfo: Send + Sync + 'static { /// not callable from outside the Wasm module itself. fn array_to_wasm_trampoline(&self, index: DefinedFuncIndex) -> Option; - /// Return the addres, in memory, of the trampoline that allows Wasm to call - /// a native function of the given signature. + /// Return the address, in memory, of the trampoline that allows Wasm to + /// call a native function of the given signature. fn wasm_to_native_trampoline( &self, signature: VMSharedSignatureIndex, diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index 69b7435cd6c9..2f657e462cd6 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -1908,7 +1908,7 @@ impl PoolingAllocationConfig { /// allocator additionally track an "affinity" flag to a particular core /// wasm module. When a module is instantiated into a slot then the slot is /// considered affine to that module, even after the instance has been - /// dealloocated. + /// deallocated. /// /// When a new instance is created then a slot must be chosen, and the /// current algorithm for selecting a slot is: @@ -1931,7 +1931,7 @@ impl PoolingAllocationConfig { /// impact of "unused slots" for a long-running wasm server. /// /// If this setting is set to 0, for example, then affine slots are - /// aggressively resused on a least-recently-used basis. A "cold" slot is + /// aggressively reused on a least-recently-used basis. A "cold" slot is /// only used if there are no affine slots available to allocate from. This /// means that the set of slots used over the lifetime of a program is the /// same as the maximum concurrent number of wasm instances.