Skip to content

Commit eac7711

Browse files
committed
Implement some basic allocators
1 parent 74cc353 commit eac7711

File tree

7 files changed

+367
-3
lines changed

7 files changed

+367
-3
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ Following this guide: https://os.phil-opp.com/
3838
- [x] Introduction to Paging
3939
- [x] Paging Implementation
4040
- [x] Heap Allocation
41-
- [ ] Allocator Designs
41+
- [x] Allocator Designs
4242

4343
### Multitasking
4444

src/allocator.rs

Lines changed: 34 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,18 @@
11
use alloc::alloc::{GlobalAlloc, Layout};
22
use core::ptr::null_mut;
3-
use linked_list_allocator::LockedHeap;
43
use x86_64::{
54
structures::paging::{
65
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
76
},
87
VirtAddr,
98
};
109

10+
use self::fixed_size_block::FixedSizeBlockAllocator;
11+
12+
pub mod bump;
13+
pub mod fixed_size_block;
14+
pub mod linked_list;
15+
1116
pub const HEAP_START: usize = 0x4444_4444_0000;
1217
pub const HEAP_SIZE: usize = 100 * 1024;
1318

@@ -50,4 +55,31 @@ unsafe impl GlobalAlloc for Dummy {
5055
}
5156

5257
#[global_allocator]
53-
static ALLOCATOR: LockedHeap = LockedHeap::empty();
58+
// static ALLOCATOR: LockedHeap = LockedHeap::empty();
59+
// static ALLOCATOR: Locked<BumpAllocator> = Locked::new(BumpAllocator::new());
60+
// static ALLOCATOR: Locked<LinkedListAllocator> = Locked::new(LinkedListAllocator::new());
61+
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
62+
63+
/// A wrapper around spin::Mutex to permit trait implementations.
64+
pub struct Locked<A> {
65+
inner: spin::Mutex<A>,
66+
}
67+
68+
impl<A> Locked<A> {
69+
pub const fn new(inner: A) -> Self {
70+
Locked {
71+
inner: spin::Mutex::new(inner),
72+
}
73+
}
74+
75+
pub fn lock(&self) -> spin::MutexGuard<A> {
76+
self.inner.lock()
77+
}
78+
}
79+
80+
/// Align the given address `addr` to the alignment `align`.
81+
///
82+
/// Requires that `align` is a power of two.
83+
fn align_up(addr: usize, align: usize) -> usize {
84+
(addr + align - 1) & !(align - 1)
85+
}

src/allocator/bump.rs

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
use core::alloc::{GlobalAlloc, Layout};
2+
3+
use super::{align_up, Locked};
4+
use core::ptr;
5+
6+
pub struct BumpAllocator {
7+
heap_start: usize,
8+
heap_end: usize,
9+
next: usize,
10+
allocations: usize,
11+
}
12+
13+
impl BumpAllocator {
14+
pub const fn new() -> Self {
15+
BumpAllocator {
16+
heap_start: 0,
17+
heap_end: 0,
18+
next: 0,
19+
allocations: 0,
20+
}
21+
}
22+
23+
/// Initialize the bump allocator with the given heap bounds.
24+
///
25+
/// Unsafe because the caller must ensure that the given memory range is
26+
/// unused. Also the method must be only called once.
27+
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
28+
self.heap_start = heap_start;
29+
self.heap_end = heap_start + heap_size;
30+
self.next = heap_start;
31+
}
32+
}
33+
34+
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
35+
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
36+
let mut bump = self.lock(); // get a mutable reference
37+
38+
let alloc_start = align_up(bump.next, layout.align());
39+
let alloc_end = match alloc_start.checked_add(layout.size()) {
40+
Some(end) => end,
41+
None => return ptr::null_mut(),
42+
};
43+
44+
if alloc_end > bump.heap_end {
45+
ptr::null_mut() // out of memory
46+
} else {
47+
bump.next = alloc_end;
48+
bump.allocations += 1;
49+
alloc_start as *mut u8
50+
}
51+
}
52+
53+
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
54+
let mut bump = self.lock(); // get a mutable reference
55+
56+
bump.allocations -= 1;
57+
if bump.allocations == 0 {
58+
bump.next = bump.heap_start;
59+
}
60+
}
61+
}

src/allocator/fixed_size_block.rs

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
use alloc::alloc::{GlobalAlloc, Layout};
2+
use core::{mem, ptr};
3+
4+
use super::Locked;
5+
6+
struct ListNode {
7+
next: Option<&'static mut ListNode>,
8+
}
9+
10+
/// The block sizes to use.
11+
///
12+
/// The sizes must each be power of 2 because they are also used as
13+
/// the block alignment (alignments must always be powers of 2).
14+
/// Allocations greater than the biggest size will fall back to a linked
15+
/// list allocator.
16+
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
17+
18+
/// Choose an appropriate block size for the given layout
19+
///
20+
/// Returns an index into the [BLOCK_SIZES] array.
21+
fn list_index(layout: &Layout) -> Option<usize> {
22+
let required_block_size = layout.size().max(layout.align());
23+
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
24+
}
25+
26+
pub struct FixedSizeBlockAllocator {
27+
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
28+
fallback_allocator: linked_list_allocator::Heap,
29+
}
30+
31+
impl FixedSizeBlockAllocator {
32+
/// Creates an empty FixedSizeBlockAllocator.
33+
pub const fn new() -> Self {
34+
const EMPTY: Option<&'static mut ListNode> = None;
35+
FixedSizeBlockAllocator {
36+
list_heads: [EMPTY; BLOCK_SIZES.len()],
37+
fallback_allocator: linked_list_allocator::Heap::empty(),
38+
}
39+
}
40+
41+
/// Initialize the allocator with the given heap bounds
42+
///
43+
/// Unsafe because the caller must guarantee that the given heap bounds
44+
/// are valid and that the heap is unused. This method must be called
45+
/// only once.
46+
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
47+
self.fallback_allocator.init(heap_start, heap_size);
48+
}
49+
50+
/// Allocates using the fallback allocator
51+
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
52+
match self.fallback_allocator.allocate_first_fit(layout) {
53+
Ok(ptr) => ptr.as_ptr(),
54+
Err(_) => ptr::null_mut(),
55+
}
56+
}
57+
}
58+
59+
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
60+
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
61+
let mut allocator = self.lock();
62+
match list_index(&layout) {
63+
Some(index) => {
64+
match allocator.list_heads[index].take() {
65+
Some(node) => {
66+
allocator.list_heads[index] = node.next.take();
67+
node as *mut ListNode as *mut u8
68+
}
69+
None => {
70+
// no block exists in the list -> allocate new block
71+
let block_size = BLOCK_SIZES[index];
72+
// only works if all block sizes are a power of 2
73+
let block_align = block_size;
74+
let layout = Layout::from_size_align(block_size, block_align).unwrap();
75+
allocator.fallback_alloc(layout)
76+
}
77+
}
78+
}
79+
// Too big to fit in our biggest block
80+
None => allocator.fallback_alloc(layout),
81+
}
82+
}
83+
84+
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
85+
let mut allocator = self.lock();
86+
match list_index(&layout) {
87+
Some(index) => {
88+
let new_node = ListNode {
89+
next: allocator.list_heads[index].take(),
90+
};
91+
// verify that block has size and alignment required for storing node
92+
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
93+
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
94+
let new_node_ptr = ptr as *mut ListNode;
95+
new_node_ptr.write(new_node);
96+
allocator.list_heads[index] = Some(&mut *new_node_ptr);
97+
}
98+
// Too big for biggest block size. Must have been allocated with the fallback allocator
99+
None => {
100+
let ptr = ptr::NonNull::new(ptr).unwrap();
101+
allocator.fallback_allocator.deallocate(ptr, layout);
102+
}
103+
}
104+
}
105+
}

src/allocator/linked_list.rs

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
use alloc::alloc::{GlobalAlloc, Layout};
2+
use core::{mem, ptr};
3+
4+
use super::{align_up, Locked};
5+
6+
struct ListNode {
7+
size: usize,
8+
next: Option<&'static mut ListNode>,
9+
}
10+
11+
impl ListNode {
12+
const fn new(size: usize) -> Self {
13+
ListNode { size, next: None }
14+
}
15+
16+
fn start_addr(&self) -> usize {
17+
// The ListNode itself is stored at the start address
18+
self as *const Self as usize
19+
}
20+
21+
fn end_addr(&self) -> usize {
22+
self.start_addr() + self.size
23+
}
24+
}
25+
26+
/// A worse implementation of a LinkedListAllocator than the `linked_list_allocator` crate.
27+
pub struct LinkedListAllocator {
28+
head: ListNode,
29+
}
30+
31+
impl LinkedListAllocator {
32+
/// Creates an empty LinkedListAllocator
33+
pub const fn new() -> Self {
34+
Self {
35+
head: ListNode::new(0),
36+
}
37+
}
38+
39+
/// Initialize the allocator with the given heap bounds
40+
///
41+
/// Unsafe because the caller must guarantee that the given heap bounds
42+
/// are valid and that the heap is unused. This method must be called
43+
/// only once.
44+
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
45+
self.add_free_region(heap_start, heap_size);
46+
}
47+
48+
/// Adds the given memory reagion to the front of the list
49+
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
50+
// ensure that the freed region is capable of holding ListNode
51+
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
52+
assert!(size >= mem::size_of::<ListNode>());
53+
54+
// create a new list node and append it at the start of the list
55+
let mut node = ListNode::new(size);
56+
node.next = self.head.next.take();
57+
let node_ptr = addr as *mut ListNode;
58+
node_ptr.write(node);
59+
self.head.next = Some(&mut *node_ptr)
60+
}
61+
62+
/// Looks for a free region with the given size and alignment and removes
63+
/// it from the list
64+
///
65+
/// Returns a tuple of the list node and the start address of the
66+
/// allocation.
67+
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
68+
// reference to current list node, updated for each iteration
69+
let mut current = &mut self.head;
70+
// look for a large enough memory region in linked list
71+
while let Some(ref mut region) = current.next {
72+
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
73+
// region suitable for allocation -> remove node from list
74+
let next = region.next.take();
75+
let ret = Some((current.next.take().unwrap(), alloc_start));
76+
current.next = next;
77+
return ret;
78+
} else {
79+
// region not suitable -> continue with next region
80+
current = current.next.as_mut().unwrap();
81+
}
82+
}
83+
84+
// no suitable region found
85+
None
86+
}
87+
88+
/// Try to use the given region for an allocation with given size and
89+
/// alignment.
90+
///
91+
/// Returns the allocation start address on success.
92+
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
93+
let alloc_start = align_up(region.start_addr(), align);
94+
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
95+
96+
if alloc_end > region.end_addr() {
97+
// region too small
98+
return Err(());
99+
}
100+
101+
let excess_size = region.end_addr() - alloc_end;
102+
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
103+
// rest of region too small to hold a ListNode (required because
104+
// the allocation splits the region in a used and a free part)
105+
return Err(());
106+
}
107+
108+
Ok(alloc_start)
109+
}
110+
111+
/// Adjust the given layout so that the resulting allocated memory
112+
/// region is also capable of storing a [ListNode].
113+
fn size_align(layout: Layout) -> (usize, usize) {
114+
let layout = layout
115+
.align_to(mem::align_of::<ListNode>())
116+
.expect("adjusting alignment failed")
117+
.pad_to_align();
118+
let size = layout.size().max(mem::size_of::<ListNode>());
119+
(size, layout.align())
120+
}
121+
}
122+
123+
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
124+
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
125+
// perform layout adjustments
126+
let (size, align) = LinkedListAllocator::size_align(layout);
127+
let mut allocator = self.lock();
128+
129+
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
130+
let alloc_end = alloc_start.checked_add(size).expect("overflow");
131+
let excess_size = region.end_addr() - alloc_end;
132+
if excess_size > 0 {
133+
allocator.add_free_region(alloc_end, excess_size);
134+
}
135+
alloc_start as *mut u8
136+
} else {
137+
ptr::null_mut()
138+
}
139+
}
140+
141+
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
142+
// perform layout adjustments
143+
let (size, _) = LinkedListAllocator::size_align(layout);
144+
self.lock().add_free_region(ptr as usize, size);
145+
}
146+
}

src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#![cfg_attr(test, no_main)]
33
#![feature(abi_x86_interrupt)]
44
#![feature(alloc_error_handler)]
5+
#![feature(const_mut_refs)]
56
#![feature(custom_test_frameworks)]
67
#![test_runner(crate::test_runner)]
78
#![reexport_test_harness_main = "test_main"]

0 commit comments

Comments
 (0)