Skip to content

Commit 4876cbe

Browse files
committed
feat: add bump and linked list allocator
1 parent f5f65a1 commit 4876cbe

6 files changed

Lines changed: 260 additions & 10 deletions

File tree

kernel/src/allocator.rs

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,11 @@ use x86_64::{
88
},
99
};
1010

11+
use crate::allocator::{bump::BumpAllocator, linked_list::LinkedListAllocator};
12+
13+
pub mod bump;
14+
pub mod linked_list;
15+
1116
pub const HEAP_START: usize = 0x_4444_4444_0000;
1217
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
1318

@@ -44,7 +49,7 @@ pub fn init_heap(
4449
}
4550

4651
#[global_allocator]
47-
static ALLOCATOR: LockedHeap = LockedHeap::empty();
52+
static ALLOCATOR: LockedHeap = Locked::new(LinkedListAllocator::new());
4853

4954
pub struct Dummy;
5055

@@ -57,3 +62,42 @@ unsafe impl GlobalAlloc for Dummy {
5762
panic!("dealloc should be never called")
5863
}
5964
}
65+
66+
pub struct Locked<A> {
67+
inner: spin::Mutex<A>,
68+
}
69+
70+
impl<A> Locked<A> {
71+
pub const fn new(inner: A) -> Self {
72+
Locked {
73+
inner: spin::Mutex::new(inner),
74+
}
75+
}
76+
77+
pub fn lock(&self) -> spin::MutexGuard<A> {
78+
self.inner.lock()
79+
}
80+
}
81+
82+
fn align_up_slow(addr: usize, align: usize) -> usize {
83+
let remainder = addr % align;
84+
85+
if remainder == 0 {
86+
addr
87+
} else {
88+
addr - remainder + align
89+
}
90+
}
91+
92+
/// Require that `align` is power of two.
93+
fn align_up(addr: usize, align: usize) -> usize {
94+
// align é uma potência de dois, portanto tem apenas um único bit setado
95+
// align - 1 deixa todos os bits menores desligados
96+
// ! deixa todos os bits setados com exceção daqueles que são menores que align
97+
98+
// fazer um AND com um endereço e essa máscara, alinhamos o endereço para baixo, devido que
99+
// desligamos todos os bits abaixo
100+
101+
// para fazer um alinhamento pra cima, somamos com align - 1 antes de aplicar a máscara.
102+
(addr + align - 1) & !(align - 1)
103+
}

kernel/src/allocator/bump.rs

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
use super::{Layout, align_up};
2+
use core::alloc::{GlobalAlloc, Layout};
3+
4+
pub struct BumpAllocator {
5+
heap_start: usize,
6+
heap_end: usize,
7+
next: usize,
8+
allocations: usize,
9+
}
10+
11+
impl BumpAllocator {
12+
pub const fn new() -> Self {
13+
BumpAllocator {
14+
heap_start: 0,
15+
heap_end: 0,
16+
next: 0,
17+
allocations: 0,
18+
}
19+
}
20+
21+
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
22+
self.heap_start = heap_start;
23+
self.heap_end = heap_start + heap_size;
24+
self.next = heap_start;
25+
}
26+
}
27+
28+
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
29+
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
30+
let mut bump = self.lock();
31+
32+
let alloc_start = align_up(bump.next, layout.align());
33+
let alloc_end = match alloc_start.checked_add(layout.size()) {
34+
Some(end) => end,
35+
None => return ptr::null_mut(),
36+
};
37+
38+
if alloc_end > bump.heap_end {
39+
ptr::null_mut()
40+
} else {
41+
bump.next = alloc_end;
42+
bump.allocations += 1;
43+
alloc_start as *mut u8
44+
}
45+
}
46+
47+
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
48+
let bump = self.lock();
49+
50+
bump.allocations -= 1;
51+
52+
if bump.allocations == 0 {
53+
bump.next = bump.heap_start;
54+
}
55+
}
56+
}
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
use super::align_up;
2+
use core::mem;
3+
4+
struct ListNode {
5+
size: usize,
6+
next: Option<&'static mut ListNode>,
7+
}
8+
9+
impl ListNode {
10+
const fn new(size: usize) -> Self {
11+
ListNode { size, next: None }
12+
}
13+
14+
fn start_addr(&self) -> usize {
15+
self as *const Self as usize
16+
}
17+
18+
fn end_addr(&self) -> usize {
19+
self.start_addr() + self.size
20+
}
21+
}
22+
23+
pub struct LinkedListAllocator {
24+
head: ListNode,
25+
}
26+
27+
impl LinkedListAllocator {
28+
pub const fn new() -> Self {
29+
Self {
30+
head: ListNode::new(0),
31+
}
32+
}
33+
34+
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
35+
unsafe {
36+
self.add_free_region(heap_start, heap_size);
37+
}
38+
}
39+
40+
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
41+
// verifica que a região liberada é capaz de armazenar um ListNode
42+
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
43+
assert!(size >= mem::size_of::<ListNode>());
44+
45+
let mut node = ListNode::new(size);
46+
node.next = self.head.next.take();
47+
let node_ptr = addr as *mut ListNode;
48+
49+
unsafe {
50+
node_ptr.write(node);
51+
self.head.next = Some(&mut *node_ptr);
52+
}
53+
}
54+
55+
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
56+
let mut current = &mut self.head;
57+
58+
while let Some(ref mut region) = current.next {
59+
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
60+
let next = region.next.take();
61+
let ret = Some((current.next.take().unwrap(), alloc_start));
62+
current.next = next;
63+
return ret;
64+
} else {
65+
current = current.next.as_mut().unwrap();
66+
}
67+
}
68+
69+
None
70+
}
71+
72+
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<(usize, ())> {
73+
let alloc_start = align_up(region.start_addr(), align);
74+
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
75+
76+
if alloc_end > region.end_addr() {
77+
// região pequena
78+
return Err(());
79+
}
80+
81+
let excess_size = region.end_addr() - alloc_end;
82+
83+
// a alocação divide a região em uma parte usada e livre
84+
// portanto a parte excedente deve conseguir armazenar um ListNode
85+
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
86+
return Err(());
87+
}
88+
89+
Ok(alloc_start)
90+
}
91+
92+
/// ajusta o layout de forma que a região alocada de memória
93+
/// possa armazenar um ListNode.
94+
/// alguma hora, as regiões serão desalocadas portanto devem serem capazes de armazenarem
95+
/// ListNode sem causar UB.
96+
fn size_align(layout: Layout) -> (usize, usize) {
97+
let layout = layout
98+
.align_to(mem::align_of::<ListNode>()) // aumenta o alinhamento para o alinhamento de um ListNode
99+
.expect("adjusting alignment failed")
100+
// arredonda o tamanho para um múltiplo do alinhamento para garantir que o próximo bloco de memória vai ter
101+
// o alinhamento correto para armazenar um ListNode
102+
.pad_to_align();
103+
104+
// garante um tamanho mínimo de mem::size_of::<ListNode>
105+
// dessa maneira, o dealloc pode escrever de forma segura um ListNode para o bloco de memória liberado
106+
let size = layout.size().max(mem::size_of::<ListNode>());
107+
108+
(size, layout.align())
109+
}
110+
}
111+
112+
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
113+
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
114+
let (size, align) = LinkedListAllocator::size_align(layout);
115+
let mut allocator = self.lock();
116+
117+
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
118+
let alloc_end = alloc_start.checked_add(size).expect("overflow");
119+
let excess_size = region.end_addr() - alloc_end;
120+
121+
if excess_size > 0 {
122+
unsafe {
123+
allocator.add_free_region(alloc_end, excess_size);
124+
}
125+
}
126+
127+
alloc_start as *mut u8
128+
} else {
129+
ptr::null_mut()
130+
}
131+
}
132+
133+
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
134+
let (size, _) = LinkedListAllocator::size_align(layout);
135+
136+
unsafe { self.lock().add_free_region(ptr as usize, size) }
137+
}
138+
}

kernel/src/gdt.rs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -78,13 +78,13 @@ pub fn init() {
7878
GDT.0.load();
7979

8080
unsafe {
81-
CS::set_reg(GDT.1.kernel_code_selector);
82-
DS::set_reg(GDT.1.kernel_data_selector);
83-
ES::set_reg(GDT.1.kernel_data_selector);
84-
FS::set_reg(GDT.1.kernel_data_selector);
85-
SS::set_reg(GDT.1.kernel_data_selector);
86-
GS::set_reg(GDT.1.kernel_data_selector);
87-
88-
load_tss(GDT.1.tss_selector);
81+
// CS::set_reg(GDT.1.kernel_code_selector);
82+
// DS::set_reg(GDT.1.kernel_data_selector);
83+
// ES::set_reg(GDT.1.kernel_data_selector);
84+
// FS::set_reg(GDT.1.kernel_data_selector);
85+
// SS::set_reg(GDT.1.kernel_data_selector);
86+
// GS::set_reg(GDT.1.kernel_data_selector);
87+
88+
// load_tss(GDT.1.tss_selector);
8989
}
9090
}

kernel/src/userspace.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ pub unsafe fn jump_to_userspace(physical_memory_offset: VirtAddr) {
7777
"mov {tmp:r}, rsp",
7878
"push rdx", // SS (DS)
7979
"push {tmp:r}", // Current ESP
80-
"pushfq", // EFLAGS
80+
"pushf", // EFLAGS
8181
"push {code_selector:r}", // CS
8282
"push {user_code:r}", // EIP
8383
"iretq",

kernel/tests/heap_allocation.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,3 +72,15 @@ fn many_boxes() {
7272
assert_eq!(*x, i);
7373
}
7474
}
75+
76+
#[test_case]
77+
fn many_boxes_long_lived() {
78+
let long_lived = Box::new(1);
79+
80+
for i in 0..HEAP_SIZE {
81+
let x = Box::new(i);
82+
assert_eq!(*x, i);
83+
}
84+
85+
assert_eq!(*long_lived, 1);
86+
}

0 commit comments

Comments
 (0)