2
2
// live in reserved blocks.
3
3
// This makes it dangerous to use, for example, println or anything that will invoke string
4
4
// formatting machinery.
5
- #![ no_std]
5
+ #![ cfg_attr ( target_os = "axle_kernel" , no_std) ]
6
6
#![ feature( format_args_nl) ]
7
7
#![ feature( cstr_from_bytes_until_nul) ]
8
8
#![ feature( default_alloc_error_handler) ]
@@ -12,9 +12,13 @@ extern crate ffi_bindings;
12
12
use core:: ffi:: CStr ;
13
13
use core:: usize:: MAX ;
14
14
use heapless:: spsc:: Queue ;
15
- use heapless:: Vec ;
16
15
use spin:: Mutex ;
17
16
17
+ #[ cfg( target_os = "axle_kernel" ) ]
18
+ use heapless:: Vec ;
19
+ #[ cfg( not( target_os = "axle_kernel" ) ) ]
20
+ use std:: vec:: Vec ;
21
+
18
22
use ffi_bindings:: cstr_core:: CString ;
19
23
use ffi_bindings:: {
20
24
assert, boot_info_get, println, BootInfo , PhysicalMemoryRegionType , _panic,
@@ -42,13 +46,19 @@ const MAX_FRAMES_ALLOCATOR_CAN_BOOKKEEP: usize = MAX_MEMORY_ALLOCATOR_CAN_BOOKKE
42
46
const CONTIGUOUS_CHUNK_POOL_SIZE : usize = MEGABYTE * 128 ;
43
47
const MAX_CONTIGUOUS_CHUNK_FRAMES : usize = CONTIGUOUS_CHUNK_POOL_SIZE / PAGE_SIZE ;
44
48
45
- #[ derive( Debug , Copy , Clone ) ]
46
- struct PhysicalFrame ( u64 ) ;
49
+ #[ derive( Debug , Copy , Clone , PartialEq , Eq ) ]
50
+ struct PhysicalFrame ( usize ) ;
47
51
48
- #[ derive( Debug , Copy , Clone ) ]
52
+ #[ derive( Debug , Copy , Clone , PartialEq , Eq ) ]
49
53
struct ContiguousChunk {
50
54
base : PhysicalFrame ,
51
- size : u64 ,
55
+ size : usize ,
56
+ }
57
+
58
+ impl ContiguousChunk {
59
+ fn new ( base : PhysicalFrame , size : usize ) -> Self {
60
+ Self { base, size }
61
+ }
52
62
}
53
63
54
64
struct ContiguousChunkPoolDescription {
@@ -64,26 +74,72 @@ impl ContiguousChunkPoolDescription {
64
74
65
75
struct ContiguousChunkPool {
66
76
pool_description : Option < ContiguousChunkPoolDescription > ,
77
+
78
+ #[ cfg( target_os = "axle_kernel" ) ]
67
79
allocated_chunks : Vec < ContiguousChunk , MAX_CONTIGUOUS_CHUNK_FRAMES > ,
68
- free_frames : Vec < PhysicalFrame , MAX_CONTIGUOUS_CHUNK_FRAMES > ,
80
+ #[ cfg( target_os = "axle_kernel" ) ]
81
+ free_chunks : Vec < ContiguousChunk , MAX_CONTIGUOUS_CHUNK_FRAMES > ,
82
+ #[ cfg( not( target_os = "axle_kernel" ) ) ]
83
+ allocated_chunks : Vec < ContiguousChunk > ,
84
+ #[ cfg( not( target_os = "axle_kernel" ) ) ]
85
+ free_chunks : Vec < ContiguousChunk > ,
69
86
}
87
+ extern crate alloc;
70
88
71
89
impl ContiguousChunkPool {
72
90
const fn new ( ) -> Self {
73
91
Self {
74
92
pool_description : None ,
75
93
allocated_chunks : Vec :: new ( ) ,
76
- free_frames : Vec :: new ( ) ,
94
+ free_chunks : Vec :: new ( ) ,
77
95
}
78
96
}
79
97
80
98
fn set_pool_description ( & mut self , base : usize , total_size : usize ) {
99
+ println ! ( "Setting pool description" ) ;
81
100
self . pool_description = Some ( ContiguousChunkPoolDescription :: new ( base, total_size) ) ;
101
+ // Start off with a single free chunk the size of the entire pool
102
+ #[ cfg( target_os = "axle_kernel" ) ]
103
+ self . free_chunks
104
+ . push ( ContiguousChunk :: new ( PhysicalFrame ( base) , total_size) )
105
+ . unwrap ( ) ;
106
+ #[ cfg( not( target_os = "axle_kernel" ) ) ]
107
+ self . free_chunks
108
+ . push ( ContiguousChunk :: new ( PhysicalFrame ( base) , total_size) ) ;
82
109
}
83
110
84
111
fn is_pool_configured ( & self ) -> bool {
85
112
self . pool_description . is_some ( )
86
113
}
114
+
115
+ fn alloc ( & mut self , size : usize ) -> usize {
116
+ // Look for a chunk big enough to satisfy the allocation
117
+ let mut chunk_to_drop = None ;
118
+ let mut allocated_chunk = None ;
119
+ for mut chunk in self . free_chunks . iter_mut ( ) {
120
+ // Is the chunk large enough to satisfy this allocation?
121
+ if chunk. size >= size {
122
+ let chunk_base = chunk. base . 0 ;
123
+ let new_size = chunk. size - size;
124
+ if new_size == 0 {
125
+ // Remove the chunk entirely
126
+ chunk_to_drop = Some ( chunk. clone ( ) ) ;
127
+ } else {
128
+ // Shrink the chunk to account for the fact that part of it is now allocated
129
+ chunk. base = PhysicalFrame ( chunk_base + size) ;
130
+ chunk. size -= size;
131
+ }
132
+ // And add an allocated chunk
133
+ allocated_chunk = Some ( ContiguousChunk :: new ( PhysicalFrame ( chunk_base) , size) ) ;
134
+ self . allocated_chunks . push ( allocated_chunk. unwrap ( ) ) ;
135
+ break ;
136
+ }
137
+ }
138
+ if let Some ( chunk_to_drop) = chunk_to_drop {
139
+ self . free_chunks . retain ( |i| * i == chunk_to_drop) ;
140
+ }
141
+ allocated_chunk. unwrap ( ) . base . 0
142
+ }
87
143
}
88
144
89
145
/// If we have a maximum of 16GB of RAM tracked, each array to track the frames will occupy 512kb.
@@ -174,5 +230,32 @@ pub unsafe fn pmm_free(frame_addr: usize) {
174
230
175
231
#[ no_mangle]
176
232
pub unsafe fn pmm_alloc_continuous_range ( size : usize ) -> usize {
177
- todo ! ( )
233
+ let ret = CONTIGUOUS_CHUNK_POOL . alloc ( size) ;
234
+ printf (
235
+ "pmm_alloc_contiguous_range(0x%p) = 0x%p\n \0 " . as_ptr ( ) as * const u8 ,
236
+ size,
237
+ ret,
238
+ ) ;
239
+ ret
240
+ }
241
+
242
+ #[ cfg( test) ]
243
+ mod test {
244
+ use crate :: { ContiguousChunk , ContiguousChunkPool , PhysicalFrame } ;
245
+
246
+ #[ test]
247
+ fn basic_allocation ( ) {
248
+ // Given an empty pool
249
+ let mut pool = Box :: new ( ContiguousChunkPool :: new ( ) ) ;
250
+ pool. set_pool_description ( 0x10000 , 0x20000 ) ;
251
+ // When I allocate a block
252
+ let allocated_chunk = pool. alloc ( 0x4000 ) ;
253
+ // Then it's allocated at the beginning
254
+ assert_eq ! ( allocated_chunk, 0x10000 ) ;
255
+ // And the free chunks are split as expected
256
+ assert_eq ! (
257
+ pool. free_chunks,
258
+ vec![ ContiguousChunk :: new( PhysicalFrame ( 0x14000 ) , 0x1c000 ) ]
259
+ ) ;
260
+ }
178
261
}
0 commit comments