compiles. In progress new custom allocator

This commit is contained in:
mtgmonkey
2025-05-13 13:38:57 -04:00
parent a46f795b5a
commit b2475a7b27
16 changed files with 763 additions and 23 deletions

56
src/' Normal file
View File

@@ -0,0 +1,56 @@
#![no_std]
#![feature(abi_x86_interrupt)]
extern crate alloc;
extern crate multiboot2;
extern crate rlibc;
mod allocator;
mod gdt;
mod interrupt;
mod vga_buffer;
use core::panic::PanicInfo;
use multiboot2::{BootInformation, BootInformationHeader};
#[unsafe(no_mangle)]
pub extern "C" fn rust_main(multiboot_information_address: usize) {
println!("Hello World!");
// println!(
// "This program booted itself from assembly. This println function manipulates individual bytes in a vga buffer. My power is beyond your understanding\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9"
// );
let boot_info = unsafe {
BootInformation::load(multiboot_information_address as *const BootInformationHeader)
.unwrap()
};
let memory_map_tag = boot_info.memory_map_tag().expect("Memory map tag required");
init();
println!("Got past");
halt()
}
pub fn init() {
allocator::init_heap
println!("got to gdt::init()");
gdt::init();
println!("got to interrupt::init_idt();");
interrupt::init_idt();
println!("got to PICS.lock()");
unsafe { interrupt::PICS.lock().initialize() };
println!("got to enable inst");
x86_64::instructions::interrupts::enable();
}
#[panic_handler]
pub fn panic(info: &PanicInfo) -> ! {
println!("Panic happened: {}", info);
halt()
}
pub fn halt() -> ! {
println!("we've halted");
loop {
x86_64::instructions::hlt();
}
}

View File

@@ -0,0 +1,114 @@
use super::{Locked, align_up};
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
pub struct LinkedListAllocator {
head: ListNode,
}
impl LinkedListAllocator {
pub const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
unsafe { self.add_free_region(heap_start, heap_size) };
}
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
assert!(size >= mem::size_of::<ListNode>());
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = addr as *mut ListNode;
unsafe {
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr)
}
}
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
if alloc_end > region.end_addr() {
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
return Err(());
}
Ok(alloc_start)
}
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
let mut current = &mut self.head;
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
current = current.next.as_mut().unwrap();
}
}
None
}
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::align_of::<ListNode>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let (size, align) = LinkedListAllocator::size_align(layout);
let mut allocator = self.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start.checked_add(size).expect("overflow");
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
unsafe {
allocator.add_free_region(alloc_end, excess_size);
}
}
alloc_start as *mut u8
} else {
ptr::null_mut()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let (size, _) = LinkedListAllocator::size_align(layout);
unsafe { self.lock().add_free_region(ptr as usize, size) }
}
}
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
ListNode { size, next: None }
}
fn start_addr(&self) -> usize {
self as *const Self as usize
}
fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}

62
src/allocator/mod.rs Normal file
View File

@@ -0,0 +1,62 @@
use crate::memory::area_frame_allocator;
use linked_list::LinkedListAllocator;
use x86_64::VirtAddr;
use x86_64::structures::paging::mapper::MapToError;
use x86_64::structures::paging::{FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB};
pub mod linked_list;
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100KiB
#[global_allocator]
static ALLOCATOR: Locked<LinkedListAllocator> = Locked::new(LinkedListAllocator::new());
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
}
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(), MapToError<Size4KiB>> {
let page_range = {
let heap_start = VirtAddr::new(HEAP_START as u64);
let heap_end = heap_start + HEAP_SIZE as u64 - 1u64;
let heap_start_page = Page::containing_address(heap_start);
let heap_end_page = Page::containing_address(heap_end);
Page::range_inclusive(heap_start_page, heap_end_page)
};
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
unsafe { mapper.map_to(page, frame, flags, frame_allocator)?.flush() };
}
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}
Ok(())
}
fn align_up(addr: usize, align: usize) -> usize {
let remainder = addr % align;
if remainder == 0 {
addr
} else {
addr - remainder + align
}
}

52
src/gdt/mod.rs Normal file
View File

@@ -0,0 +1,52 @@
use lazy_static::lazy_static;
use x86_64::VirtAddr;
use x86_64::structures::gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector};
use x86_64::structures::tss::TaskStateSegment;
// selector returned by GDT
// code_selector: where CS register goes in case of DF
// tss_selector: where TSS starts in case of DF
struct Selectors {
code_selector: SegmentSelector,
tss_selector: SegmentSelector,
}
// location of double fault emergency stac
pub const DOUBLE_FAULT_1ST_INDEX: u16 = 0;
pub fn init() {
use x86_64::instructions::segmentation::{CS, Segment};
use x86_64::instructions::tables::load_tss;
GDT.0.load();
unsafe {
CS::set_reg(GDT.1.code_selector);
load_tss(GDT.1.tss_selector);
}
}
lazy_static! {
static ref GDT: (GlobalDescriptorTable, Selectors) = {
let mut gdt = GlobalDescriptorTable::new();
let code_selector = gdt.append(Descriptor::kernel_code_segment());
let tss_selector = gdt.append(Descriptor::tss_segment(&TSS));
(
gdt,
Selectors {
code_selector,
tss_selector,
},
)
};
static ref TSS: TaskStateSegment = {
let mut tss = TaskStateSegment::new();
tss.interrupt_stack_table[DOUBLE_FAULT_1ST_INDEX as usize] = {
const STACK_SIZE: usize = 4096 * 5;
static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE]; // TODO make real stack
let stack_start = VirtAddr::from_ptr(&raw const STACK);
stack_start + STACK_SIZE as u64 // end of stack
};
tss
};
}

110
src/interrupt/mod.rs Normal file
View File

@@ -0,0 +1,110 @@
use crate::{print, println};
use lazy_static::lazy_static;
use pic8259::ChainedPics;
use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode};
pub fn init_idt() {
IDT.load();
}
pub const PIC_1_OFFSET: u8 = 32;
pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8;
pub static PICS: spin::Mutex<ChainedPics> =
spin::Mutex::new(unsafe { ChainedPics::new(PIC_1_OFFSET, PIC_2_OFFSET) });
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum InterruptIndex {
Timer = PIC_1_OFFSET,
Keyboard,
}
impl InterruptIndex {
fn as_u8(self) -> u8 {
self as u8
}
fn as_usize(self) -> usize {
usize::from(self.as_u8())
}
}
lazy_static! {
static ref IDT: InterruptDescriptorTable = {
let mut idt = InterruptDescriptorTable::new();
idt.breakpoint.set_handler_fn(breakpoint_handler);
unsafe {
idt.double_fault
.set_handler_fn(double_fault_handler)
.set_stack_index(crate::gdt::DOUBLE_FAULT_1ST_INDEX);
}
idt[InterruptIndex::Timer.as_u8()].set_handler_fn(timer_interrupt_handler);
idt[InterruptIndex::Keyboard.as_u8()].set_handler_fn(keyboard_interrupt_handler);
idt.page_fault.set_handler_fn(page_fault_handler);
idt
};
}
extern "x86-interrupt" fn breakpoint_handler(stack_frame: InterruptStackFrame) {
println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn double_fault_handler(
stack_frame: InterruptStackFrame,
_error_code: u64,
) -> ! {
panic!("EXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame);
}
extern "x86-interrupt" fn keyboard_interrupt_handler(_keyboard_stack_frame: InterruptStackFrame) {
use pc_keyboard::{DecodedKey, HandleControl, Keyboard, ScancodeSet1, layouts};
use spin::Mutex;
use x86_64::instructions::port::Port;
lazy_static! {
static ref KEYBOARD: Mutex<Keyboard<layouts::Us104Key, ScancodeSet1>> =
Mutex::new(Keyboard::new(
ScancodeSet1::new(),
layouts::Us104Key,
HandleControl::Ignore
));
}
let mut keyboard = KEYBOARD.lock();
let mut port = Port::new(0x60);
let scancode: u8 = unsafe { port.read() };
if let Ok(Some(key_event)) = keyboard.add_byte(scancode) {
if let Some(key) = keyboard.process_keyevent(key_event) {
match key {
DecodedKey::Unicode(character) => print!("{}", character),
DecodedKey::RawKey(key) => print!("{:?}", key),
}
}
}
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Keyboard.as_u8());
}
}
extern "x86-interrupt" fn page_fault_handler(
stack_frame: InterruptStackFrame,
error_code: PageFaultErrorCode,
) {
use x86_64::registers::control::Cr2;
println!("EXCEPTION: PAGE FAULT");
println!("Accessed Address: {:?}", Cr2::read());
println!("Error Code: {:?}?", error_code);
println!("{:#?}", stack_frame);
crate::halt();
}
extern "x86-interrupt" fn timer_interrupt_handler(_stack_frame: InterruptStackFrame) {
print!(".");
unsafe {
PICS.lock()
.notify_end_of_interrupt(InterruptIndex::Timer.as_u8());
}
}

View File

@@ -1,26 +1,93 @@
#![no_std]
#![feature(abi_x86_interrupt)]
extern crate alloc;
extern crate multiboot2;
extern crate rlibc;
mod allocator;
mod gdt;
mod interrupt;
mod memory;
mod vga_buffer;
use core::panic::PanicInfo;
pub fn halt() -> ! {
println!("we've halted");
loop {
x86_64::instructions::hlt();
}
}
use multiboot2::{BootInformation, BootInformationHeader};
#[unsafe(no_mangle)]
pub extern "C" fn rust_main(_multiboot_information_address: usize) {
pub extern "C" fn rust_main(multiboot_information_address: usize) {
println!("Hello World!");
// get multiboot info
let boot_info = unsafe {
BootInformation::load(multiboot_information_address as *const BootInformationHeader)
.unwrap()
};
let memory_map_tag = boot_info.memory_map_tag().expect("Memory map tag required");
println!("memory areas:");
for area in memory_map_tag.memory_areas() {
println!(
" start: 0x{:x}, length: 0x{:x}",
area.start_address(),
area.size()
);
}
let elf_sections_tag = boot_info
.elf_sections_tag()
.expect("Elf sections tag required");
println!("kernel sections:");
for section in elf_sections_tag.sections() {
println!(
" addr: 0x{:x}, size: 0x{:x}, flags: 0x{:x}",
section.start_address(),
section.size(),
section.flags()
);
}
let kernel_start = elf_sections_tag
.sections()
.map(|s| s.start_address())
.min()
.unwrap();
let kernel_end = elf_sections_tag
.sections()
.map(|s| s.end_address())
.max()
.unwrap();
let multiboot_start = multiboot_information_address;
let multiboot_end = multiboot_start + boot_info.total_size() as usize;
println!(
"kernel_start: 0x{:x} kernel_end: 0x{:x} kernel_size: {}B\nmultiboot_start: 0x{:x} multiboot_end: 0x{:x} multiboot_size: {}B",
kernel_start,
kernel_end,
kernel_end - kernel_start,
multiboot_start,
multiboot_end,
multiboot_end - multiboot_start
);
//init();
halt()
}
pub fn init() {
gdt::init();
interrupt::init_idt();
unsafe { interrupt::PICS.lock().initialize() };
x86_64::instructions::interrupts::enable();
}
#[panic_handler]
pub fn panic(info: &PanicInfo) -> ! {
println!("Panic happened: {}", info);
halt()
}
pub fn halt() -> ! {
println!("we've halted");
loop {
x86_64::instructions::hlt();
}
}

View File

@@ -0,0 +1,95 @@
use core::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
use multiboot2::{BootInformation, BootInformationHeader};
use super::{Frame, PAGE_SIZE};
use crate::allocator::{HEAP_SIZE, HEAP_START, Locked};
pub struct AreaFrameAllocator {
next_free_frame: Frame,
kernel_start: Frame,
kernel_end: Frame,
multiboot_start: Frame,
multiboot_end: Frame,
}
impl AreaFrameAllocator {
pub const fn new() -> Self {
Self {
next_free_frame: Frame::new(),
kernel_start: Frame::new(),
kernel_end: Frame::new(),
multiboot_start: Frame::new(),
multiboot_end: Frame::new(),
}
}
pub unsafe fn init(&mut self, multiboot_information_address: usize) -> Self {
let boot_info = unsafe {
BootInformation::load(multiboot_information_address as *const BootInformationHeader)
.unwrap()
};
//let memory_map_tag = boot_info.memory_map_tag().expect("Memory map tag required");
let elf_sections_tag = boot_info
.elf_sections_tag()
.expect("Elf sections tag required");
let kernel_start = elf_sections_tag
.sections()
.map(|s| s.start_address())
.min()
.unwrap();
let kernel_end = elf_sections_tag
.sections()
.map(|s| s.end_address())
.max()
.unwrap();
let multiboot_start = multiboot_information_address;
let multiboot_end = multiboot_start + boot_info.total_size();
AreaFrameAllocator {
next_free_frame: Frame { number: 0 },
kernel_start: Frame {
number: kernel_start as usize,
},
kernel_end: Frame {
number: kernel_end as usize,
},
multiboot_start: Frame {
number: multiboot_start,
},
multiboot_end: Frame {
number: multiboot_end,
},
}
}
fn find_frame(&mut self, size: usize, align: usize) -> Option<(Frame, usize)> {
let next_addr = self.next_free_frame.number * PAGE_SIZE + HEAP_START;
if &next_addr + core::mem::align_of::<Frame>() <= PAGE_SIZE + HEAP_START {
Some((Frame::containing_address(next_addr), next_addr))
} else {
None
}
}
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<Frame>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::align_of::<Frame>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<AreaFrameAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let (size, align) = AreaFrameAllocator::size_align(layout);
let mut allocator = self.lock();
match allocator.find_frame(size, align) {
Some((_, addr)) => addr as *mut u8,
None => ptr::null_mut(),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {}
}

19
src/memory/mod.rs Normal file
View File

@@ -0,0 +1,19 @@
pub mod area_frame_allocator;
pub const PAGE_SIZE: usize = 4096;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
number: usize,
}
impl Frame {
pub const fn new() -> Self {
Self { number: 0 }
}
fn containing_address(address: usize) -> Frame {
Frame {
number: address / PAGE_SIZE,
}
}
}