Step 6: for real this time

This commit is contained in:
Jack Halford 2019-12-18 18:56:27 +01:00
parent a8c68611ce
commit 78f7197cb9
4 changed files with 107 additions and 107 deletions

View file

@ -7,7 +7,6 @@
.type switch_tasks, @function .type switch_tasks, @function
.global switch_tasks .global switch_tasks
switch_tasks: switch_tasks:
push %ebp push %ebp
mov %esp, %ebp mov %esp, %ebp
@ -18,38 +17,17 @@ switch_tasks:
pop %ebp // the top of the forged stack contains ebp pop %ebp // the top of the forged stack contains ebp
ret //the top of the forged stack contains eip to go to ret //the top of the forged stack contains eip to go to
//Save previous task's state // .type jmp_to_entrypoint, @function
//Notes: // .global jmp_to_entrypoint
// For cdecl// EAX, ECX, and EDX are already saved by the caller and don't need to be saved again // jmp_to_entrypoint:
// EIP is already saved on the stack by the caller's "CALL" instruction // mov %esp, %ebp
// The task isn't able to change CR3 so it doesn't need to be saved // mov +4(%esp), %eax
// Segment registers are constants (while running kernel code) so they don't need to be saved // jmp *%eax
// push %ebx // .type birthasm, @function
// push %esi // .global birthasm
// push %edi // birthasm:
// push %ebp // call unlock_scheduler
// mov %esp, %ebp
// mov %edi,[current_task] //edi = address of the previous task's "thread control block" // mov +4(%esp), %eax
// mov [edi+TCB.ESP], %esp //Save ESP for previous task's kernel stack in the thread's TCB // jmp *%eax
// Load next task's state
// mov %esi,[esp+(4+1)*4] //esi = address of the next task's "thread control block" (parameter passed on stack)
// mov [current_task], %esi //Current task's TCB is the next task TCB
// mov %esp,[esi+TCB.ESP] //Load ESP for next task's kernel stack from the thread's TCB
// mov eax,[esi+TCB.CR3] //eax = address of page directory for next task
// mov %ebx,[esi+TCB.ESP0] //ebx = address for the top of the next task's kernel stack
// mov [TSS.ESP0],ebx //Adjust the ESP0 field in the TSS (used by CPU for for CPL=3 -> CPL=0 privilege level changes)
// mov ecx,cr3 //ecx = previous task's virtual address space
// cmp eax,ecx //Does the virtual address space need to being changed?
// je .doneVAS // no, virtual address space is the same, so don't reload it and cause TLB flushes
// mov cr3,eax // yes, load the next task's virtual address space
// .doneVAS:
// pop %ebp
// pop %edi
// pop %esi
// pop %ebx

View file

@ -65,7 +65,9 @@ pub fn loop() void {
keypress(input_ring_buffer[input_read_index]); keypress(input_ring_buffer[input_read_index]);
input_read_index +%= 1; input_read_index +%= 1;
} }
task.lock_scheduler(); task.lock_scheduler();
task.schedule(); task.schedule();
task.unlock_scheduler();
} }
} }

View file

@ -26,14 +26,9 @@ export fn kmain(magic: u32, info: *const multiboot.MultibootInfo) noreturn {
pci.scan(); pci.scan();
task.new(@ptrToInt(topbar)) catch unreachable; task.new(@ptrToInt(topbar)) catch unreachable;
// task.new(@ptrToInt(console.loop)) catch unreachable;
console.loop(); console.loop();
unreachable; unreachable;
// while (true) {
// task.lock_scheduler();
// task.schedule();
// }
} }
pub fn panic(a: []const u8, b: ?*builtin.StackTrace) noreturn { pub fn panic(a: []const u8, b: ?*builtin.StackTrace) noreturn {

View file

@ -1,6 +1,6 @@
pub usingnamespace @import("index.zig"); pub usingnamespace @import("index.zig");
var boot_task = Task{ .tid = 0, .esp = 0x47, .state = .Running }; var boot_task = Task{ .tid = 0, .esp = 0x47, .state = .Running, .born = true };
const TaskNode = std.TailQueue(*Task).Node; const TaskNode = std.TailQueue(*Task).Node;
const SleepNode = DeltaQueue(*TaskNode).Node; const SleepNode = DeltaQueue(*TaskNode).Node;
@ -13,9 +13,6 @@ pub var sleeping_tasks = DeltaQueue(*TaskNode).init();
const STACK_SIZE = x86.PAGE_SIZE; // Size of thread stacks. const STACK_SIZE = x86.PAGE_SIZE; // Size of thread stacks.
var tid_counter: u16 = 1; var tid_counter: u16 = 1;
///ASM
extern fn switch_tasks(new_esp: u32, old_esp_addr: u32) void;
var timer_last_count: u64 = 0; var timer_last_count: u64 = 0;
pub fn update_time_used() void { pub fn update_time_used() void {
const current_count = time.offset_us; const current_count = time.offset_us;
@ -35,6 +32,7 @@ pub const Task = struct {
esp: usize, esp: usize,
tid: u16, tid: u16,
time_used: u64 = 0, time_used: u64 = 0,
born: bool = false,
state: TaskState, state: TaskState,
//context: isr.Context, //context: isr.Context,
//cr3: usize, //cr3: usize,
@ -61,12 +59,29 @@ pub const Task = struct {
return t; return t;
} }
// responsible for calling the task entrypoint
pub fn destroy(self: *Task) void { pub fn destroy(self: *Task) void {
vmem.free(self.esp); vmem.free(self.esp);
vmem.free(@ptrToInt(self)); vmem.free(@ptrToInt(self));
} }
}; };
///ASM
// extern fn jmp_to_entrypoint(entrypoint: usize) void;
// // this is only run once on the first execution of a task
// pub fn birth() void {
// println("birth!");
// unlock_scheduler();
// const entrypoint = current_task.data.entrypoint;
// jmp_to_entrypoint(entrypoint);
// // comptime asm ("jmp %[entrypoint]"
// // :
// // : [entrypoint] "{ecx}" (entrypoint)
// // );
// }
///ASM
extern fn switch_tasks(new_esp: usize, old_esp_addr: usize) void;
pub fn new(entrypoint: usize) !void { pub fn new(entrypoint: usize) !void {
const node = try vmem.create(TaskNode); const node = try vmem.create(TaskNode);
node.data = try Task.create(entrypoint); node.data = try Task.create(entrypoint);
@ -76,12 +91,14 @@ pub fn new(entrypoint: usize) !void {
// TODO: make a sleep without malloc // TODO: make a sleep without malloc
pub fn usleep(usec: u64) !void { pub fn usleep(usec: u64) !void {
const node = try vmem.create(SleepNode); const node = try vmem.create(SleepNode);
lock_scheduler(); lock_scheduler();
current_task.data.state = .Sleeping; current_task.data.state = .Sleeping;
node.data = current_task; node.data = current_task;
node.counter = usec; node.counter = usec;
sleeping_tasks.insert(node); sleeping_tasks.insert(node);
schedule(); schedule();
unlock_scheduler();
} }
pub fn block(state: TaskState) void { pub fn block(state: TaskState) void {
@ -92,6 +109,7 @@ pub fn block(state: TaskState) void {
current_task.data.state = state; current_task.data.state = state;
blocked_tasks.append(current_task); blocked_tasks.append(current_task);
schedule(); schedule();
unlock_scheduler();
} }
pub fn unblock(node: *TaskNode) void { pub fn unblock(node: *TaskNode) void {
@ -108,9 +126,29 @@ pub fn unblock(node: *TaskNode) void {
} }
} }
var IRQ_disable_counter: usize = 0;
pub fn lock_scheduler() void {
if (constants.SMP == false) {
x86.cli();
IRQ_disable_counter += 1;
}
}
pub fn unlock_scheduler() void {
if (IRQ_disable_counter == 0) println("error trying to unlock");
if (constants.SMP == false) {
IRQ_disable_counter -= 1;
if (IRQ_disable_counter == 0) {
x86.sti();
x86.hlt();
}
}
}
// expects: // expects:
// - chosen is .ReadyToRun // - chosen is .ReadyToRun
// - chosen is not in any scheduler lists // - chosen is not in any scheduler lists
// - scheduler is locked
// - the tasks being switched to will unlock_scheduler()
pub fn switch_to(chosen: *TaskNode) void { pub fn switch_to(chosen: *TaskNode) void {
assert(chosen.data.state == .ReadyToRun); assert(chosen.data.state == .ReadyToRun);
@ -127,12 +165,61 @@ pub fn switch_to(chosen: *TaskNode) void {
chosen.data.state = .Running; chosen.data.state = .Running;
current_task = chosen; current_task = chosen;
unlock_scheduler(); if (current_task.data.born == false) {
current_task.data.born = true;
unlock_scheduler();
}
// don't inline the asm function, it needs to ret // don't inline the asm function, it needs to ret
@noInlineCall(switch_tasks, chosen.data.esp, @ptrToInt(old_task_esp_addr)); @noInlineCall(switch_tasks, chosen.data.esp, @ptrToInt(old_task_esp_addr));
} }
pub var CPU_idle_time: u64 = 0;
pub var CPU_idle_start_time: u64 = 0;
pub fn schedule() void {
assert(IRQ_disable_counter > 0);
update_time_used();
// format();
if (ready_tasks.popFirst()) |t| {
// somebody is ready to run
// std doesn't do this, for developer flexibility maybe?
t.prev = null;
t.next = null;
switch_to(t);
} else if (current_task.data.state == .Running) {
// single task mode, carry on
return;
} else {
// idle mode
notify_idle();
// borrow the current task
const borrow = current_task;
CPU_idle_start_time = time.offset_us; //for power management
while (true) { // idle loop
if (ready_tasks.popFirst()) |t| { // found a new task
CPU_idle_time += time.offset_us - CPU_idle_start_time; // count time as idle
timer_last_count = time.offset_us; // don't count time as used
println("went into idle mode for {}usecs", time.offset_us - CPU_idle_start_time);
if (t == borrow) {
t.data.state = .Running;
return; //no need to ctx_switch we are already running this
}
return switch_to(t);
} else { // no tasks ready, let the timer fire
x86.sti(); // enable interrupts to allow the timer to fire
x86.hlt(); // halt and wait for the timer to fire
x86.cli(); // disable interrupts again to see if there is something to do
}
}
}
}
fn notify_idle() void { fn notify_idle() void {
const bg = vga.background; const bg = vga.background;
const fg = vga.foreground; const fg = vga.foreground;
@ -150,66 +237,6 @@ fn notify_idle() void {
vga.foreground = fg; vga.foreground = fg;
} }
pub var CPU_idle_time: u64 = 0;
pub var CPU_idle_start_time: u64 = 0;
pub fn schedule() void {
assert(IRQ_disable_counter > 0);
update_time_used();
if (ready_tasks.popFirst()) |t| {
// somebody is ready to run
// std doesn't do this, for developer flexibility maybe?
t.prev = null;
t.next = null;
switch_to(t);
} else if (current_task.data.state == .Running) {
// single task mode, carry on
return unlock_scheduler();
} else {
// idle mode
notify_idle();
// borrow the current task
const borrow = current_task;
CPU_idle_start_time = time.offset_us; //for power management
while (true) { // idle loop
if (ready_tasks.popFirst()) |t| { // found a new task
CPU_idle_time += time.offset_us - CPU_idle_start_time; // count time as idle
timer_last_count = time.offset_us; // don't count time as used
println("went into idle mode for {}usecs", time.offset_us - CPU_idle_start_time);
if (t == borrow) {
t.data.state = .Running;
return unlock_scheduler(); //no need to ctx_switch we are already running this
}
return switch_to(t);
} else { // no tasks ready, let the timer fire
x86.sti(); // enable interrupts to allow the timer to fire
x86.hlt(); // halt and wait for the timer to fire
x86.cli(); // disable interrupts again to see if there is something to do
}
}
}
}
var IRQ_disable_counter: usize = 0;
pub fn lock_scheduler() void {
if (constants.SMP == false) {
x86.cli();
IRQ_disable_counter += 1;
}
}
pub fn unlock_scheduler() void {
if (IRQ_disable_counter == 0) println("error trying to unlock");
if (constants.SMP == false) {
IRQ_disable_counter -= 1;
if (IRQ_disable_counter == 0) x86.sti();
}
}
pub fn format_short() void { pub fn format_short() void {
print("{}R {}B {}S", ready_tasks.len, blocked_tasks.len, sleeping_tasks.len); print("{}R {}B {}S", ready_tasks.len, blocked_tasks.len, sleeping_tasks.len);
} }
@ -221,10 +248,8 @@ pub fn format() void {
var it = ready_tasks.first; var it = ready_tasks.first;
while (it) |node| : (it = node.next) println("{}", node.data); while (it) |node| : (it = node.next) println("{}", node.data);
it = blocked_tasks.first; it = blocked_tasks.first;
while (it) |node| : (it = node.next) println("{}", node.data); while (it) |node| : (it = node.next) println("{}", node.data);
var sit = sleeping_tasks.first; var sit = sleeping_tasks.first;
while (sit) |node| : (sit = node.next) println("{} {}", node.data.data, node.counter); while (sit) |node| : (sit = node.next) println("{} {}", node.data.data, node.counter);
} }