-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
Copy pathmemory.rs
153 lines (128 loc) · 5.49 KB
/
memory.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
//! Memory management for linear memories.
//!
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
use crate::mmap::Mmap;
use crate::vmcontext::VMMemoryDefinition;
use more_asserts::{assert_ge, assert_le};
use std::cell::RefCell;
use std::convert::TryFrom;
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
/// A linear memory instance.
#[derive(Debug)]
pub struct LinearMemory {
// The underlying allocation.
mmap: RefCell<WasmMmap>,
// The optional maximum size in wasm pages of this linear memory.
maximum: Option<u32>,
// Size in bytes of extra guard pages after the end to optimize loads and stores with
// constant offsets.
offset_guard_size: usize,
// Records whether we're using a bounds-checking strategy which requires
// handlers to catch trapping accesses.
pub(crate) needs_signal_handlers: bool,
}
#[derive(Debug)]
struct WasmMmap {
// Our OS allocation of mmap'd memory.
alloc: Mmap,
// The current logical size in wasm pages of this linear memory.
size: u32,
}
impl LinearMemory {
/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
// `maximum` cannot be set to more than `65536` pages.
assert_le!(plan.memory.minimum, WASM_MAX_PAGES);
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
let offset_guard_bytes = plan.offset_guard_size as usize;
// If we have an offset guard, or if we're doing the static memory
// allocation strategy, we need signal handlers to catch out of bounds
// acceses.
let needs_signal_handlers = offset_guard_bytes > 0
|| match plan.style {
MemoryStyle::Dynamic => false,
MemoryStyle::Static { .. } => true,
};
let minimum_pages = match plan.style {
MemoryStyle::Dynamic => plan.memory.minimum,
MemoryStyle::Static { bound } => {
assert_ge!(bound, plan.memory.minimum);
bound
}
} as usize;
let minimum_bytes = minimum_pages.checked_mul(WASM_PAGE_SIZE as usize).unwrap();
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
let mapped_pages = plan.memory.minimum as usize;
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
let mmap = WasmMmap {
alloc: Mmap::accessible_reserved(mapped_bytes, request_bytes)?,
size: plan.memory.minimum,
};
Ok(Self {
mmap: mmap.into(),
maximum: plan.memory.maximum,
offset_guard_size: offset_guard_bytes,
needs_signal_handlers,
})
}
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
self.mmap.borrow().size
}
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages.
pub fn grow(&self, delta: u32) -> Option<u32> {
// Optimization of memory.grow 0 calls.
let mut mmap = self.mmap.borrow_mut();
if delta == 0 {
return Some(mmap.size);
}
let new_pages = match mmap.size.checked_add(delta) {
Some(new_pages) => new_pages,
// Linear memory size overflow.
None => return None,
};
let prev_pages = mmap.size;
if let Some(maximum) = self.maximum {
if new_pages > maximum {
// Linear memory size would exceed the declared maximum.
return None;
}
}
// Wasm linear memories are never allowed to grow beyond what is
// indexable. If the memory has no maximum, enforce the greatest
// limit here.
if new_pages >= WASM_MAX_PAGES {
// Linear memory size would exceed the index range.
return None;
}
let delta_bytes = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize;
let new_bytes = usize::try_from(new_pages).unwrap() * WASM_PAGE_SIZE as usize;
if new_bytes > mmap.alloc.len() - self.offset_guard_size {
// If the new size is within the declared maximum, but needs more memory than we
// have on hand, it's a dynamic heap and it can move.
let guard_bytes = self.offset_guard_size;
let request_bytes = new_bytes.checked_add(guard_bytes)?;
let mut new_mmap = Mmap::accessible_reserved(new_bytes, request_bytes).ok()?;
let copy_len = mmap.alloc.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&mmap.alloc.as_slice()[..copy_len]);
mmap.alloc = new_mmap;
} else if delta_bytes > 0 {
// Make the newly allocated pages accessible.
mmap.alloc.make_accessible(prev_bytes, delta_bytes).ok()?;
}
mmap.size = new_pages;
Some(prev_pages)
}
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
pub fn vmmemory(&self) -> VMMemoryDefinition {
let mut mmap = self.mmap.borrow_mut();
VMMemoryDefinition {
base: mmap.alloc.as_mut_ptr(),
current_length: mmap.size as usize * WASM_PAGE_SIZE as usize,
}
}
}