diff --git a/kernel-rs/.gitmodules b/kernel-rs/.gitmodules
index 7612dca4..820f6043 100644
--- a/kernel-rs/.gitmodules
+++ b/kernel-rs/.gitmodules
@@ -1,3 +1,6 @@
[submodule "multiboot2-elf64"]
path = multiboot2-elf64
url = git@github.com:jzck/multiboot2-elf64.git
+[submodule "x86"]
+ path = x86
+ url = https://github.com/jzck/x86.git
diff --git a/kernel-rs/Cargo.toml b/kernel-rs/Cargo.toml
index 68a70731..558231c0 100644
--- a/kernel-rs/Cargo.toml
+++ b/kernel-rs/Cargo.toml
@@ -10,3 +10,4 @@ crate-type = ["staticlib"]
rlibc = "1.0"
bitflags = "1.0.1"
multiboot2 = { path = "multiboot2-elf64" }
+x86 = { path = "x86" }
diff --git a/kernel-rs/src/console.rs b/kernel-rs/src/console.rs
index bf8bc88e..73a307bc 100644
--- a/kernel-rs/src/console.rs
+++ b/kernel-rs/src/console.rs
@@ -3,7 +3,6 @@ extern crate core;
use acpi;
use cpuio;
-use x86;
use core::char;
use vga::*;
@@ -178,9 +177,11 @@ pub fn acpi_info() -> Result <(), &'static str> {
}
pub fn regs() -> Result <(), &'static str> {
- println!("cr0={:#b}", x86::cr0());
- println!("cr3={:#x}", x86::cr3());
- println!("cr4={:#b}", x86::cr4());
+ use x86::registers::control::*;
+ println!("cr0={:#b}", Cr0::read());
+ println!("cr3={:?}", Cr3::read());
+ flush!();
+ // TODO implement cr4 flags in `x86` module
+ // println!("cr4={:#b}", Cr4::read());
Ok(())
}
-
diff --git a/kernel-rs/src/lib.rs b/kernel-rs/src/lib.rs
index 65b0d353..83e48b4e 100644
--- a/kernel-rs/src/lib.rs
+++ b/kernel-rs/src/lib.rs
@@ -11,8 +11,9 @@
extern crate rlibc;
extern crate multiboot2;
-#[macro_use] extern crate bitflags;
+// #[macro_use] extern crate bitflags;
#[macro_use] extern crate alloc;
+extern crate x86;
/// 80x25 screen and simplistic terminal driver
#[macro_use] pub mod vga;
@@ -20,14 +21,14 @@ extern crate multiboot2;
pub mod keyboard;
/// simplisitc kernel commands
pub mod console;
-/// wrappers around the x86-family I/O instructions.
+/// rust wrappers around cpu I/O instructions.
pub mod cpuio;
/// ACPI self-content module
pub mod acpi;
-/// physical frame allocator + paging module
+/// physical frame allocator + paging module + heap allocator
pub mod memory;
-/// a few x86 register and instruction wrappers
-pub mod x86;
+// x86 interruptions
+// pub mod interrupts;
fn init_kernel(multiboot_info_addr: usize) -> Result <(), &'static str> {
let boot_info = unsafe { multiboot2::load(multiboot_info_addr)};
@@ -40,6 +41,7 @@ fn init_kernel(multiboot_info_addr: usize) -> Result <(), &'static str> {
acpi::init()?;
}
enable_paging();
+
enable_write_protect_bit();
memory::init(&boot_info);
vga::init();
@@ -47,11 +49,13 @@ fn init_kernel(multiboot_info_addr: usize) -> Result <(), &'static str> {
}
fn enable_paging() {
- unsafe { x86::cr0_write(x86::cr0() | (1 << 31)) };
+ use x86::registers::control::{Cr0, Cr0Flags};
+ unsafe { Cr0::write(Cr0::read() | Cr0Flags::PAGING) };
}
fn enable_write_protect_bit() {
- unsafe { x86::cr0_write(x86::cr0() | (1 << 16)) };
+ use x86::registers::control::{Cr0, Cr0Flags};
+ unsafe { Cr0::write(Cr0::read() | Cr0Flags::WRITE_PROTECT) };
}
#[no_mangle]
@@ -61,18 +65,6 @@ pub extern fn kmain(multiboot_info_addr: usize) -> ! {
cpuio::halt();
}
- use alloc::boxed::Box;
- let mut heap_test = Box::new(42);
- *heap_test -= 15;
- let heap_test2 = Box::new("Hello");
- println!("{:?} {:?}", heap_test, heap_test2);
-
- let mut vec_test = vec![1,2,3,4,5,6,7];
- vec_test[3] = 42;
- for i in &vec_test {
- print!("{} ", i);
- }
-
loop { keyboard::kbd_callback(); }
}
@@ -89,7 +81,6 @@ pub extern fn panic_fmt(fmt: core::fmt::Arguments, file: &'static str, line: u32
println!("LINE: {}", line);
flush!();
loop {}
-
}
use memory::BumpAllocator;
diff --git a/kernel-rs/src/memory/area_allocator.rs b/kernel-rs/src/memory/area_allocator.rs
index 09304cb6..6a24bb94 100644
--- a/kernel-rs/src/memory/area_allocator.rs
+++ b/kernel-rs/src/memory/area_allocator.rs
@@ -1,14 +1,15 @@
use memory::*;
use multiboot2::{MemoryAreaIter, MemoryArea};
+use x86::*;
pub struct AreaFrameAllocator {
- next_free_frame: Frame,
+ next_free_frame: PhysFrame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
- kernel_start: Frame,
- kernel_end: Frame,
- multiboot_start: Frame,
- multiboot_end: Frame,
+ kernel_start: PhysFrame,
+ kernel_end: PhysFrame,
+ multiboot_start: PhysFrame,
+ multiboot_end: PhysFrame,
}
impl AreaFrameAllocator {
@@ -16,13 +17,17 @@ impl AreaFrameAllocator {
multiboot_start: usize, multiboot_end: usize,
memory_areas: MemoryAreaIter) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
- next_free_frame: Frame::containing_address(0),
+ next_free_frame: PhysFrame { number: 0 },
current_area: None,
areas: memory_areas,
- kernel_start: Frame::containing_address(kernel_start),
- kernel_end: Frame::containing_address(kernel_end),
- multiboot_start: Frame::containing_address(multiboot_start),
- multiboot_end: Frame::containing_address(multiboot_end),
+ kernel_start: PhysFrame::containing_address(
+ PhysAddr::new(kernel_start as u32)),
+ kernel_end: PhysFrame::containing_address(
+ PhysAddr::new(kernel_end as u32)),
+ multiboot_start: PhysFrame::containing_address(
+ PhysAddr::new(multiboot_start as u32)),
+ multiboot_end: PhysFrame::containing_address(
+ PhysAddr::new(multiboot_end as u32)),
};
allocator.choose_next_area();
allocator
@@ -31,11 +36,12 @@ impl AreaFrameAllocator {
fn choose_next_area(&mut self) {
// get next area with free frames
self.current_area = self.areas.clone().filter(|area| {
- Frame::containing_address(area.end_address()) >= self.next_free_frame
+ area.end_address() >= self.next_free_frame.start_address().as_u32() as usize
}).min_by_key(|area| area.start_address());
if let Some(area) = self.current_area {
- let start_frame = Frame::containing_address(area.start_address());
+ let start_frame = PhysFrame::containing_address(
+ PhysAddr::new(area.start_address() as u32));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
@@ -44,21 +50,22 @@ impl AreaFrameAllocator {
}
impl FrameAllocator for AreaFrameAllocator {
- fn allocate_frame(&mut self) -> Option {
+ fn allocate_frame(&mut self) -> Option {
if let Some(area) = self.current_area {
- let frame = Frame { number: self.next_free_frame.number };
- let current_area_last_frame = Frame::containing_address(area.end_address());
+ let frame = PhysFrame { number: self.next_free_frame.number };
+ let current_area_last_frame = PhysFrame::containing_address(
+ PhysAddr::new(area.end_address() as u32));
if frame > current_area_last_frame {
// all frames are taken in this area
self.choose_next_area();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
// frame used by kernel
- self.next_free_frame = Frame {
+ self.next_free_frame = PhysFrame {
number: self.kernel_end.number + 1,
}
} else if frame >= self.multiboot_start && frame <= self.multiboot_end {
// frame used by multiboot
- self.next_free_frame = Frame {
+ self.next_free_frame = PhysFrame {
number: self.multiboot_end.number + 1,
}
} else {
@@ -72,7 +79,7 @@ impl FrameAllocator for AreaFrameAllocator {
}
}
- fn deallocate_frame(&mut self, frame: Frame) {
+ fn deallocate_frame(&mut self, frame: PhysFrame) {
unimplemented!();
}
}
diff --git a/kernel-rs/src/memory/mod.rs b/kernel-rs/src/memory/mod.rs
index 76189437..c2a6c244 100644
--- a/kernel-rs/src/memory/mod.rs
+++ b/kernel-rs/src/memory/mod.rs
@@ -7,57 +7,13 @@ mod paging;
pub use self::area_allocator::*;
pub use self::heap_allocator::*;
pub use self::paging::remap_the_kernel;
-use self::paging::PhysicalAddress;
use multiboot2;
-
-#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub struct Frame {
- number: usize,
-}
-
-impl Frame {
- fn containing_address(address: usize) -> Frame {
- Frame{ number: address / PAGE_SIZE }
- }
-
- fn start_address(&self) -> PhysicalAddress {
- self.number * PAGE_SIZE
- }
-
- fn clone(&self) ->Frame {
- Frame { number: self.number }
- }
-
- fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
- FrameIter {
- start,
- end,
- }
- }
-}
+use x86::*;
+use x86::structures::paging::*;
pub trait FrameAllocator {
- fn allocate_frame(&mut self) -> Option;
- fn deallocate_frame(&mut self, frame: Frame);
-}
-
-struct FrameIter {
- start: Frame,
- end: Frame,
-}
-
-impl Iterator for FrameIter {
- type Item = Frame;
-
- fn next(&mut self) -> Option {
- if self.start <= self.end {
- let frame = self.start.clone();
- self.start.number += 1;
- Some(frame)
- } else {
- None
- }
- }
+ fn allocate_frame(&mut self) -> Option;
+ fn deallocate_frame(&mut self, frame: PhysFrame);
}
/// memory initialisation should only be called once
@@ -82,13 +38,14 @@ pub fn init(boot_info: &multiboot2::BootInformation) {
let mut active_table = paging::remap_the_kernel(&mut frame_allocator,
boot_info);
- use self::paging::Page;
use {HEAP_START, HEAP_SIZE};
- let heap_start_page = Page::containing_address(HEAP_START);
- let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE - 1);
+ let heap_start_page = Page::containing_address(
+ VirtAddr::new(HEAP_START as u32));
+ let heap_end_page = Page::containing_address(
+ VirtAddr::new(HEAP_START as u32 + HEAP_SIZE as u32 - 1));
- for page in Page::range_inclusive(heap_start_page, heap_end_page) {
- active_table.map(page, paging::EntryFlags::WRITABLE, &mut frame_allocator);
+ for page in heap_start_page..heap_end_page {
+ active_table.map(page, PageTableFlags::WRITABLE, &mut frame_allocator);
}
}
diff --git a/kernel-rs/src/memory/paging/entry.rs b/kernel-rs/src/memory/paging/entry.rs
deleted file mode 100644
index 26edb9c1..00000000
--- a/kernel-rs/src/memory/paging/entry.rs
+++ /dev/null
@@ -1,66 +0,0 @@
-use memory::Frame;
-
-pub struct Entry(u32);
-use multiboot2::ElfSection;
-
-impl Entry {
- pub fn is_unused(&self) -> bool {
- self.0 == 0
- }
-
- pub fn set_unused(&mut self) {
- self.0 = 0;
- }
-
- pub fn flags(&self) -> EntryFlags {
- EntryFlags::from_bits_truncate(self.0)
- }
-
- pub fn pointed_frame(&self) -> Option {
- if self.flags().contains(EntryFlags::PRESENT) {
- Some(Frame::containing_address(
- self.0 as usize & 0xffff_f000))
- } else {
- None
- }
- }
-
- pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
- assert!(frame.start_address() & !0xffff_f000 == 0);
- self.0 = (frame.start_address() as u32) | flags.bits();
- }
-}
-
-bitflags! {
- pub struct EntryFlags: u32 {
- const PRESENT = 1 << 0;
- const WRITABLE = 1 << 1;
- const USER_ACCESSIBLE = 1 << 2;
- const WRITE_THROUGH = 1 << 3;
- const NO_CACHE = 1 << 4;
- const ACCESSED = 1 << 5;
- const DIRTY = 1 << 6;
- const HUGE_PAGE = 1 << 7;
- const GLOBAL = 1 << 8;
- // LONG MODE
- // const NO_EXECUTE = 1 << 63;
- }
-}
-
-impl EntryFlags {
- pub fn from_elf_section_flags(section: &ElfSection) -> EntryFlags {
- use multiboot2::ElfSectionFlags;
-
- let mut flags = EntryFlags::empty();
- if section.flags().contains(ElfSectionFlags::ALLOCATED) {
- flags = flags | EntryFlags::PRESENT;
- }
- if section.flags().contains(ElfSectionFlags::WRITABLE) {
- flags = flags | EntryFlags::WRITABLE;
- }
- // if !section.flags().contains(ElfSectionFlags::EXECUTABLE) {
- // flags = flags | EntryFlags::NO_EXECUTE;
- // }
- flags
- }
-}
diff --git a/kernel-rs/src/memory/paging/mapper.rs b/kernel-rs/src/memory/paging/mapper.rs
index 93d5be86..408149ee 100644
--- a/kernel-rs/src/memory/paging/mapper.rs
+++ b/kernel-rs/src/memory/paging/mapper.rs
@@ -1,82 +1,87 @@
-use super::{VirtualAddress, PhysicalAddress, Page, ENTRY_COUNT};
-use super::entry::*;
-use super::table::{self, Table, Level2};
-use memory::{PAGE_SIZE, Frame, FrameAllocator};
+use memory::{PAGE_SIZE, FrameAllocator};
use core::ptr::Unique;
-use x86;
+use x86::structures::paging::*;
+use x86::instructions::tlb;
+use x86::usize_conversions::usize_from;
+use x86::*;
+use super::paging::table::RecTable;
+
+// virtual address of recursively mapped P2
+// for protected mode non PAE
+// https://wiki.osdev.org/Page_Tables
+pub const P2: *mut PageTable = 0xffff_f000 as *mut _;
pub struct Mapper {
- p2: Unique>,
+ p2: Unique,
}
impl Mapper {
pub unsafe fn new() -> Mapper {
Mapper {
- p2: Unique::new_unchecked(table::P2),
+ p2: Unique::new_unchecked(self::P2),
}
}
// the remaining mapping methods, all public
- pub fn p2(&self) -> &Table {
+ pub fn p2(&self) -> &PageTable {
unsafe { self.p2.as_ref() }
}
- pub fn p2_mut(&mut self) -> &mut Table {
+ pub fn p2_mut(&mut self) -> &mut PageTable {
unsafe { self.p2.as_mut() }
}
- pub fn translate(&self, virtual_address: VirtualAddress) -> Option
+ /// virtual addr to physical addr translation
+ pub fn translate(&self, virtual_address: VirtAddr) -> Option
{
- let offset = virtual_address % PAGE_SIZE;
+ let offset = virtual_address.as_u32() % PAGE_SIZE as u32;
self.translate_page(Page::containing_address(virtual_address))
- .map(|frame| frame.number * PAGE_SIZE + offset)
+ .map(|frame| frame.start_address() + offset)
+
}
- pub fn translate_page(&self, page: Page) -> Option {
-
- let p1 = self.p2().next_table(page.p2_index());
+ /// virtual page to physical frame translation
+ pub fn translate_page(&self, page: Page) -> Option {
+ let p1 = self.p2().next_table(usize_from(u32::from(page.p2_index())));
let huge_page = || {
let p2_entry = &self.p2()[page.p2_index()];
if let Some(start_frame) = p2_entry.pointed_frame() {
- if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) {
- // 4KiB alignment check
- assert!(start_frame.number % ENTRY_COUNT == 0);
- return Some(Frame {
- number: start_frame.number + page.p1_index()
- });
- }
+ if p2_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
+ // TODO 4MiB alignment check
+ return Some(start_frame + u32::from(page.p1_index()));
+ }
}
None
};
p1.and_then(|p1| p1[page.p1_index()].pointed_frame())
- .or_else(huge_page)
+ .or_else(huge_page)
}
-
- pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags,
+ /// map a virtual page to a physical frame in the page tables
+ pub fn map_to(&mut self, page: Page, frame: PhysFrame, flags: PageTableFlags,
allocator: &mut A)
where A: FrameAllocator
{
let p2 = self.p2_mut();
- let p1 = p2.next_table_create(page.p2_index(), allocator);
-
+ let p1 = p2.next_table_create(usize_from(u32::from(page.p2_index())), allocator);
assert!(p1[page.p1_index()].is_unused());
- p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT);
+ p1[page.p1_index()].set(frame, flags | PageTableFlags::PRESENT);
}
- pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
+ pub fn map(&mut self, page: Page, flags: PageTableFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
- pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
+ pub fn identity_map(&mut self, frame: PhysFrame, flags: PageTableFlags, allocator: &mut A)
where A: FrameAllocator
{
- let page = Page::containing_address(frame.start_address());
+ let virt_addr = VirtAddr::new(frame.start_address().as_u32());
+ let page = Page::containing_address(virt_addr);
self.map_to(page, frame, flags, allocator);
}
@@ -86,11 +91,11 @@ impl Mapper {
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p2_mut()
- .next_table_mut(page.p2_index())
+ .next_table_mut(usize_from(u32::from(page.p2_index())))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
- x86::tlb::flush(page.start_address());
+ tlb::flush(page.start_address());
// TODO
// allocator.deallocate_frame(frame);
}
diff --git a/kernel-rs/src/memory/paging/mod.rs b/kernel-rs/src/memory/paging/mod.rs
index 636f235d..5b3fbd67 100644
--- a/kernel-rs/src/memory/paging/mod.rs
+++ b/kernel-rs/src/memory/paging/mod.rs
@@ -1,79 +1,17 @@
#![allow(dead_code)]
-mod entry;
mod table;
mod temporary_page;
mod mapper;
-use memory::PAGE_SIZE;
use memory::*;
use self::mapper::Mapper;
use self::temporary_page::TemporaryPage;
use core::ops::{Deref, DerefMut};
use multiboot2::BootInformation;
-use x86;
-
-pub use self::entry::*;
-pub use self::table::*;
-
-// x86 non PAE has 1024 entries per table
-const ENTRY_COUNT: usize = 1024;
-
-pub type PhysicalAddress = usize;
-pub type VirtualAddress = usize;
-
-#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
-pub struct Page {
- number: usize,
-}
-
-impl Page {
- pub fn containing_address(address: VirtualAddress) -> Page {
- // assert!(address < 0x0000_8000_0000_0000 ||
- // address >= 0xffff_8000_0000_0000,
- // "invalid addres: 0x{:x}", address);
- Page { number: address / PAGE_SIZE }
- }
-
- fn start_address(&self) -> usize {
- self.number * PAGE_SIZE
- }
-
- fn p2_index(&self) -> usize {
- (self.number >> 10) & 0x3ff
- }
-
- fn p1_index(&self) -> usize {
- (self.number >> 0) & 0x3ff
- }
-
- pub fn range_inclusive(start: Page, end: Page) -> PageIter {
- PageIter {
- start,
- end,
- }
- }
-}
-
-#[derive(Clone)]
-pub struct PageIter {
- start: Page,
- end: Page,
-}
-
-impl Iterator for PageIter {
- type Item = Page;
-
- fn next(&mut self) -> Option {
- if self.start <= self.end {
- let page = self.start;
- self.start.number += 1;
- Some(page)
- } else {
- None
- }
- }
-}
+use x86::*;
+use x86::registers::control::Cr3;
+use x86::instructions::tlb;
pub struct ActivePageTable {
mapper: Mapper,
@@ -106,109 +44,129 @@ impl ActivePageTable {
f: F)
where F: FnOnce(&mut Mapper)
{
- let backup = Frame::containing_address(x86::cr3());
+ let (cr3_back, _cr3flags_back) = Cr3::read();
// map temp page to current p2
- let p2_table = temporary_page.map_table_frame(backup.clone(), self);
+ let p2_table = temporary_page.map_table_frame(cr3_back.clone(), self);
// overwrite recursive map
- self.p2_mut()[1023].set(table.p2_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
- x86::tlb::flush_all();
+ self.p2_mut()[1023].set(table.p2_frame.clone(), PageTableFlags::PRESENT | PageTableFlags::WRITABLE);
+ tlb::flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p2 table
- p2_table[1023].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE);
+ p2_table[1023].set(cr3_back, PageTableFlags::PRESENT | PageTableFlags::WRITABLE);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
- let p2_frame = Frame::containing_address(x86::cr3() as usize);
+ let (p2_frame, cr3_flags) = Cr3::read();
+ let old_table = InactivePageTable { p2_frame };
- let old_table = InactivePageTable {
- p2_frame,
- };
-
- unsafe {
- let frame = Frame::containing_address(new_table.p2_frame.start_address());
- x86::cr3_write(frame.start_address());
- }
+ unsafe { Cr3::write(new_table.p2_frame, cr3_flags); }
old_table
}
}
pub struct InactivePageTable {
- p2_frame: Frame,
+ p2_frame: PhysFrame,
}
impl InactivePageTable {
- pub fn new(frame: Frame,
+ pub fn new(frame: PhysFrame,
active_table: &mut ActivePageTable,
- temporary_page: &mut TemporaryPage,
- ) -> InactivePageTable {
- {
- let table = temporary_page.map_table_frame(frame.clone(),
- active_table);
- table.zero();
+ temporary_page: &mut TemporaryPage)
+ -> InactivePageTable {
+ {
+ let table = temporary_page.map_table_frame(frame.clone(), active_table);
- // set up recursive mapping for the table
- table[1023].set(frame.clone(), EntryFlags::PRESENT | EntryFlags:: WRITABLE)
+ table.zero();
+ // set up recursive mapping for the table
+ table[1023].set(frame.clone(), PageTableFlags::PRESENT | PageTableFlags::WRITABLE)
+ }
+ temporary_page.unmap(active_table);
+ InactivePageTable { p2_frame: frame }
}
- temporary_page.unmap(active_table);
- InactivePageTable { p2_frame: frame }
- }
}
pub fn remap_the_kernel(allocator: &mut A, boot_info: &BootInformation)
-> ActivePageTable
where A: FrameAllocator
{
- let mut temporary_page = TemporaryPage::new(Page { number: 0xcafe },
- allocator);
+ let mut temporary_page = TemporaryPage::new(Page{number: 0xcafe}, allocator);
let mut active_table = unsafe { ActivePageTable::new() };
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
+
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// identity map the VGA text buffer
- let vga_buffer_frame = Frame::containing_address(0xb8000);
- mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator);
+ let vga_buffer_frame = PhysFrame::containing_address(PhysAddr::new(0xb8000));
+ mapper.identity_map(vga_buffer_frame, PageTableFlags::WRITABLE, allocator);
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Memory map tag required");
for section in elf_sections_tag.sections() {
- use self::entry::EntryFlags;
-
if !section.is_allocated() {
continue;
}
assert!(section.start_address() % PAGE_SIZE as u64 == 0,
"sections need to be page aligned");
- let flags = EntryFlags::from_elf_section_flags(§ion);
- let start_frame = Frame::containing_address(section.start_address() as usize);
- let end_frame = Frame::containing_address(section.end_address() as usize - 1);
- for frame in Frame::range_inclusive(start_frame, end_frame) {
+ let flags = elf_to_pagetable_flags(§ion.flags());
+ let start_frame = PhysFrame::containing_address(
+ PhysAddr::new(section.start_address() as u32));
+ let end_frame = PhysFrame::containing_address(
+ PhysAddr::new(section.end_address() as u32 - 1));
+ for frame in start_frame..end_frame + 1 {
mapper.identity_map(frame, flags, allocator);
}
}
- let multiboot_start = Frame::containing_address(boot_info.start_address());
- let multiboot_end = Frame::containing_address(boot_info.end_address() - 1);
- for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
- mapper.identity_map(frame, EntryFlags::PRESENT, allocator);
+ let multiboot_start = PhysFrame::containing_address(
+ PhysAddr::new(boot_info.start_address() as u32));
+ let multiboot_end = PhysFrame::containing_address(
+ PhysAddr::new(boot_info.end_address() as u32 - 1));
+ for frame in multiboot_start..multiboot_end + 1 {
+ mapper.identity_map(frame, PageTableFlags::PRESENT, allocator);
}
});
let old_table = active_table.switch(new_table);
- let old_p2_page = Page::containing_address(old_table.p2_frame.start_address());
+
+ let old_p2_page = Page::containing_address(
+ VirtAddr::new(old_table.p2_frame.start_address().as_u32()));
active_table.unmap(old_p2_page, allocator);
+
active_table
}
+
+fn elf_to_pagetable_flags(elf_flags: &multiboot2::ElfSectionFlags)
+ -> PageTableFlags
+{
+ use multiboot2::ElfSectionFlags;
+
+ let mut flags = PageTableFlags::empty();
+
+ if elf_flags.contains(ElfSectionFlags::ALLOCATED) {
+ // section is loaded to memory
+ flags = flags | PageTableFlags::PRESENT;
+ }
+ if elf_flags.contains(ElfSectionFlags::WRITABLE) {
+ flags = flags | PageTableFlags::WRITABLE;
+ }
+ // LONG MODE STUFF
+ // if !elf_flags.contains(ELF_SECTION_EXECUTABLE) {
+ // flags = flags | PageTableFlags::NO_EXECUTE;
+ // }
+
+ flags
+}
diff --git a/kernel-rs/src/memory/paging/table.rs b/kernel-rs/src/memory/paging/table.rs
index da35e011..16060b05 100644
--- a/kernel-rs/src/memory/paging/table.rs
+++ b/kernel-rs/src/memory/paging/table.rs
@@ -1,95 +1,52 @@
use memory::*;
-use memory::paging::*;
+use x86::structures::paging::*;
+use x86::ux::*;
-use core::ops::{Index, IndexMut};
-use core::marker::PhantomData;
-
-// virtual address of P2 because its recursively mapped
-// see protected mode Non-PAE
-// https://wiki.osdev.org/Page_Tables
-pub const P2: *mut Table = 0xffff_f000 as *mut _;
-
-pub struct Table {
- entries: [Entry; ENTRY_COUNT],
- level: PhantomData,
+pub trait RecTable
+{
+ fn next_table_address(&self, index: usize) -> Option;
+ fn next_table(&self, index: usize) -> Option<&PageTable>;
+ fn next_table_mut(&mut self, index: usize) -> Option<&mut PageTable>;
+ fn next_table_create(&mut self,
+ index: usize,
+ allocator: &mut A)
+ -> &mut PageTable;
}
-impl Table where L: TableLevel
+impl RecTable for PageTable
{
- pub fn zero(&mut self) {
- for entry in self.entries.iter_mut() {
- entry.set_unused();
- }
- }
-}
-
-impl Table where L: HierarchicalLevel
-{
- fn next_table_address(&self, index: usize) -> Option {
+ fn next_table_address(&self, index: usize) -> Option {
let entry_flags = self[index].flags();
- if entry_flags.contains(EntryFlags::PRESENT) && !entry_flags.contains(EntryFlags::HUGE_PAGE) {
+ if entry_flags.contains(PageTableFlags::PRESENT) && !entry_flags.contains(PageTableFlags::HUGE_PAGE) {
let table_address = self as *const _ as usize;
- Some((table_address << 10) | (index << 12))
+ Some((table_address << 10 | index << 12) as u32)
} else {
None
}
}
- pub fn next_table(&self, index: usize) -> Option<&Table> {
+ fn next_table(&self, index: usize) -> Option<&PageTable> {
self.next_table_address(index)
.map(|address| unsafe { &*(address as *const _) })
}
- pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> {
+ fn next_table_mut(&mut self, index: usize) -> Option<&mut PageTable> {
self.next_table_address(index)
.map(|address| unsafe { &mut *(address as *mut _) })
}
- pub fn next_table_create(&mut self,
- index: usize,
- allocator: &mut A) -> &mut Table
+ fn next_table_create(&mut self,
+ index: usize,
+ allocator: &mut A) -> &mut PageTable
where A: FrameAllocator
{
if self.next_table(index).is_none() {
- assert!(!self[index].flags().contains(EntryFlags::HUGE_PAGE),
+ assert!(!self[index].flags().contains(PageTableFlags::HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
- self[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE);
+ self[index].set(frame, PageTableFlags::PRESENT | PageTableFlags::WRITABLE);
self.next_table_mut(index).expect("next_table_mut gave None").zero()
}
self.next_table_mut(index).expect("no next table 2")
}
}
-
-impl Index for Table where L: TableLevel
-{
- type Output = Entry;
-
- fn index(&self, index: usize) -> &Entry {
- &self.entries[index]
- }
-}
-
-impl IndexMut for Table where L: TableLevel
-{
- fn index_mut(&mut self, index: usize) -> &mut Entry {
- &mut self.entries[index]
- }
-}
-
-pub trait TableLevel {}
-
-pub enum Level4 {}
-pub enum Level3 {}
-pub enum Level2 {}
-pub enum Level1 {}
-
-impl TableLevel for Level4 {}
-impl TableLevel for Level3 {}
-impl TableLevel for Level2 {}
-impl TableLevel for Level1 {}
-
-pub trait HierarchicalLevel: TableLevel { type NextLevel: TableLevel; }
-impl HierarchicalLevel for Level4 { type NextLevel = Level3; }
-impl HierarchicalLevel for Level3 { type NextLevel = Level2; }
-impl HierarchicalLevel for Level2 { type NextLevel = Level1; }
diff --git a/kernel-rs/src/memory/paging/temporary_page.rs b/kernel-rs/src/memory/paging/temporary_page.rs
index e098fef3..33756ba0 100644
--- a/kernel-rs/src/memory/paging/temporary_page.rs
+++ b/kernel-rs/src/memory/paging/temporary_page.rs
@@ -1,9 +1,10 @@
-use super::{Page, ActivePageTable, VirtualAddress};
-use super::table::{Table, Level1};
-use memory::{Frame, FrameAllocator};
+use super::ActivePageTable;
+use memory::{FrameAllocator};
+use x86::*;
+use x86::structures::paging::*;
pub struct TemporaryPage {
- page: Page,
+ pub page: Page,
allocator: TinyAllocator,
}
@@ -19,14 +20,15 @@ impl TemporaryPage {
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
- pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable)
- -> VirtualAddress
+ pub fn map(&mut self, frame: PhysFrame, active_table: &mut ActivePageTable)
+ -> VirtAddr
{
- use super::entry::EntryFlags;
-
assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped");
- active_table.map_to(self.page, frame, EntryFlags::WRITABLE, &mut self.allocator);
+ active_table.map_to(self.page, frame, PageTableFlags::WRITABLE, &mut self.allocator);
+ // this kind of check should be done in a test routine
+ assert!(active_table.translate_page(self.page).is_some(),
+ "temporary page was not mapped");
self.page.start_address()
}
@@ -38,14 +40,14 @@ impl TemporaryPage {
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self,
- frame: Frame,
+ frame: PhysFrame,
active_table: &mut ActivePageTable)
- -> &mut Table {
- unsafe { &mut *(self.map(frame, active_table) as *mut Table) }
+ -> &mut PageTable {
+ unsafe { &mut *(self.map(frame, active_table).as_u32() as *mut PageTable) }
}
}
-struct TinyAllocator([Option; 1]);
+struct TinyAllocator([Option; 1]);
impl TinyAllocator {
fn new(allocator: &mut A) -> TinyAllocator
@@ -58,7 +60,7 @@ impl TinyAllocator {
}
impl FrameAllocator for TinyAllocator {
- fn allocate_frame(&mut self) -> Option {
+ fn allocate_frame(&mut self) -> Option {
for frame_option in &mut self.0 {
if frame_option.is_some() {
return frame_option.take();
@@ -67,7 +69,7 @@ impl FrameAllocator for TinyAllocator {
None
}
- fn deallocate_frame(&mut self, frame: Frame) {
+ fn deallocate_frame(&mut self, frame: PhysFrame) {
for frame_option in &mut self.0 {
if frame_option.is_none() {
*frame_option = Some(frame);
diff --git a/kernel-rs/src/x86/mod.rs b/kernel-rs/src/x86/mod.rs
deleted file mode 100644
index 7d9cf902..00000000
--- a/kernel-rs/src/x86/mod.rs
+++ /dev/null
@@ -1,29 +0,0 @@
-//! x86 (32 bit) only
-
-pub mod tlb;
-
-pub unsafe fn cr0_write(val: usize) {
- asm!("mov $0, %cr0" :: "r"(val) : "memory");
-}
-
-pub fn cr0() -> usize {
- let ret: usize;
- unsafe { asm!("mov %cr0, $0" : "=r" (ret)) };
- ret
-}
-
-pub fn cr3() -> usize {
- let ret: usize;
- unsafe { asm!("mov %cr3, $0" : "=r" (ret)) };
- ret
-}
-
-pub fn cr4() -> usize {
- let ret: usize;
- unsafe { asm!("mov %cr4, $0" : "=r" (ret)) };
- ret
-}
-
-pub unsafe fn cr3_write(val: usize) {
- asm!("mov $0, %cr3" :: "r" (val) : "memory");
-}
diff --git a/kernel-rs/src/x86/tlb.rs b/kernel-rs/src/x86/tlb.rs
deleted file mode 100644
index 1890ebaa..00000000
--- a/kernel-rs/src/x86/tlb.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-use super::*;
-
-pub fn flush(addr: usize) {
- unsafe { asm!("invlpg ($0)" :: "r"(addr) : "memory")}
-}
-
-pub fn flush_all() {
- let cr3 = cr3();
- unsafe { cr3_write(cr3); }
-}
diff --git a/kernel-rs/x86 b/kernel-rs/x86
new file mode 160000
index 00000000..eae47083
--- /dev/null
+++ b/kernel-rs/x86
@@ -0,0 +1 @@
+Subproject commit eae470839b1ff232dbc4af5389e9a0b4fffe4b30