Step 7+8+9: usleep(), DeltaQueue
This commit is contained in:
parent
a527695202
commit
9ec23055bc
6 changed files with 219 additions and 43 deletions
|
|
@ -197,4 +197,11 @@ pub fn pit_handler() void {
|
|||
// chan0 divisor = 2685
|
||||
// PIT_RATE in us
|
||||
kernel.time.increment(2251);
|
||||
kernel.task.sleeping_tasks.decrement(2251);
|
||||
while (kernel.task.sleeping_tasks.popZero()) |sleepnode| {
|
||||
const tasknode = sleepnode.data;
|
||||
tasknode.data.state = .ReadyToRun;
|
||||
kernel.vmem.free(@ptrToInt(sleepnode));
|
||||
kernel.task.ready_tasks.prepend(tasknode);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
148
src/delta_queue.zig
Normal file
148
src/delta_queue.zig
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
const std = @import("std");
|
||||
|
||||
/// DeltaQueue is a singly-linked list where each
|
||||
/// node has a counter. Each counter is relative
|
||||
/// to the previous.
|
||||
///
|
||||
/// Inspired by https://wiki.osdev.org/Blocking_Process
|
||||
/// Based on std.SinglyLinkedList
|
||||
pub fn DeltaQueue(comptime T: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// Node inside the linked list wrapping the actual data.
|
||||
pub const Node = struct {
|
||||
next: ?*Node,
|
||||
data: T,
|
||||
counter: u64,
|
||||
|
||||
pub fn init(data: T, counter: u64) Node {
|
||||
return Node{
|
||||
.next = null,
|
||||
.data = data,
|
||||
.counter = counter,
|
||||
};
|
||||
}
|
||||
|
||||
/// Insert a new node after the current one.
|
||||
///
|
||||
/// Arguments:
|
||||
/// new_node: Pointer to the new node to insert.
|
||||
pub fn insertAfter(node: *Node, new_node: *Node) void {
|
||||
if (node.next) |after| {
|
||||
std.debug.assert(new_node.counter <= after.counter); //sanity check
|
||||
after.counter -= new_node.counter;
|
||||
}
|
||||
new_node.next = node.next;
|
||||
node.next = new_node;
|
||||
}
|
||||
};
|
||||
|
||||
first: ?*Node,
|
||||
|
||||
/// Initialize a delta queue.
|
||||
///
|
||||
/// Returns:
|
||||
/// An empty linked list.
|
||||
pub fn init() Self {
|
||||
return Self{
|
||||
.first = null,
|
||||
};
|
||||
}
|
||||
|
||||
/// Insert a node in the list
|
||||
///
|
||||
/// Arguments:
|
||||
/// node: Pointer to a node in the list.
|
||||
/// new_node: Pointer to the new node to insert.
|
||||
pub fn insert(list: *Self, node: *Node) void {
|
||||
var target: ?*Node = null;
|
||||
var next: ?*Node = list.first;
|
||||
while (true) {
|
||||
if (next == null or node.counter <= next.?.counter) {
|
||||
if (target) |tg| return tg.insertAfter(node);
|
||||
return list.prepend(node);
|
||||
}
|
||||
if (target) |tg| node.counter -= tg.counter;
|
||||
target = next;
|
||||
next = target.?.next;
|
||||
}
|
||||
}
|
||||
|
||||
/// worst case is O(n)
|
||||
/// Could be better with a different data structure
|
||||
/// Example: case of large list with all counters at 0,
|
||||
/// we need to traverse the whole list.
|
||||
pub fn decrement(list: *Self, count: u64) void {
|
||||
var it = list.first;
|
||||
var i = count;
|
||||
while (it) |node| : (it = node.next) {
|
||||
if (node.counter >= i) {
|
||||
node.counter -= i;
|
||||
return;
|
||||
}
|
||||
i -= node.counter;
|
||||
node.counter = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a new node at the head.
|
||||
///
|
||||
/// Arguments:
|
||||
/// new_node: Pointer to the new node to insert.
|
||||
fn prepend(list: *Self, new_node: *Node) void {
|
||||
if (list.first) |after| {
|
||||
std.debug.assert(new_node.counter <= after.counter); //sanity check
|
||||
after.counter -= new_node.counter;
|
||||
}
|
||||
new_node.next = list.first;
|
||||
list.first = new_node;
|
||||
}
|
||||
|
||||
/// Remove and return the first node in the list
|
||||
/// if its counter is 0.
|
||||
///
|
||||
/// Returns:
|
||||
/// A pointer to the first node in the list.
|
||||
pub fn popZero(list: *Self) ?*Node {
|
||||
const first = list.first orelse return null;
|
||||
if (first.counter != 0) return null;
|
||||
list.first = first.next;
|
||||
return first;
|
||||
}
|
||||
|
||||
/// Allocate a new node.
|
||||
///
|
||||
/// Arguments:
|
||||
/// allocator: Dynamic memory allocator.
|
||||
///
|
||||
/// Returns:
|
||||
/// A pointer to the new node.
|
||||
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
|
||||
return allocator.create(Node);
|
||||
}
|
||||
|
||||
/// Deallocate a node.
|
||||
///
|
||||
/// Arguments:
|
||||
/// node: Pointer to the node to deallocate.
|
||||
/// allocator: Dynamic memory allocator.
|
||||
pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
|
||||
allocator.destroy(node);
|
||||
}
|
||||
|
||||
/// Allocate and initialize a node and its data.
|
||||
///
|
||||
/// Arguments:
|
||||
/// data: The data to put inside the node.
|
||||
/// allocator: Dynamic memory allocator.
|
||||
///
|
||||
/// Returns:
|
||||
/// A pointer to the new node.
|
||||
pub fn createNode(list: *Self, data: T, counter: u64, allocator: *Allocator) !*Node {
|
||||
var node = try list.allocateNode(allocator);
|
||||
node.* = Node.init(data, counter);
|
||||
return node;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
/// std
|
||||
pub const builtin = @import("builtin");
|
||||
pub const std = @import("std");
|
||||
pub const assert = std.debug.assert;
|
||||
|
||||
pub usingnamespace @import("delta_queue.zig");
|
||||
pub usingnamespace @import("vga.zig");
|
||||
|
||||
///arch
|
||||
|
|
@ -18,4 +19,4 @@ pub const time = @import("time.zig");
|
|||
///extra
|
||||
pub const console = @import("console.zig");
|
||||
pub const pci = @import("pci/pci.zig");
|
||||
pub const ps2 = @import("ps2.zig"); // i don't know whether this is x86 specific or not
|
||||
pub const ps2 = @import("ps2.zig");
|
||||
|
|
|
|||
|
|
@ -31,6 +31,11 @@ export fn kmain(magic: u32, info: *const multiboot.MultibootInfo) noreturn {
|
|||
while (true) {
|
||||
task.lock_scheduler();
|
||||
task.schedule();
|
||||
if (time.offset_us / 1000000 == 6) task.unblock(task.blocked_tasks.first.?);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn panic(a: []const u8, b: ?*builtin.StackTrace) noreturn {
|
||||
println("{}", a);
|
||||
println("{}", b);
|
||||
while (true) asm volatile ("hlt");
|
||||
}
|
||||
|
|
|
|||
92
src/task.zig
92
src/task.zig
|
|
@ -2,10 +2,14 @@ pub usingnamespace @import("index.zig");
|
|||
|
||||
var timer_last_count: u64 = 0;
|
||||
var boot_task = Task{ .tid = 0, .esp = 0x47, .state = .Running };
|
||||
const ListOfTasks = std.TailQueue(*Task);
|
||||
pub var current_task = &ListOfTasks.Node.init(&boot_task);
|
||||
pub var ready_tasks = ListOfTasks.init();
|
||||
pub var blocked_tasks = ListOfTasks.init();
|
||||
|
||||
const TaskNode = std.TailQueue(*Task).Node;
|
||||
const SleepNode = DeltaQueue(*TaskNode).Node;
|
||||
|
||||
pub var current_task: *TaskNode = &TaskNode.init(&boot_task);
|
||||
pub var ready_tasks = std.TailQueue(*Task).init();
|
||||
pub var blocked_tasks = std.TailQueue(*Task).init();
|
||||
pub var sleeping_tasks = DeltaQueue(*TaskNode).init();
|
||||
|
||||
const STACK_SIZE = x86.PAGE_SIZE; // Size of thread stacks.
|
||||
var tid_counter: u16 = 1;
|
||||
|
|
@ -23,7 +27,8 @@ pub fn update_time_used() void {
|
|||
pub const TaskState = enum {
|
||||
Running,
|
||||
ReadyToRun,
|
||||
Paused,
|
||||
Blocked,
|
||||
Sleeping,
|
||||
};
|
||||
|
||||
pub const Task = struct {
|
||||
|
|
@ -63,24 +68,35 @@ pub const Task = struct {
|
|||
};
|
||||
|
||||
pub fn new(entrypoint: usize) !void {
|
||||
const node = try vmem.create(ListOfTasks.Node);
|
||||
const node = try vmem.create(TaskNode);
|
||||
node.data = try Task.create(entrypoint);
|
||||
ready_tasks.prepend(node);
|
||||
}
|
||||
|
||||
/// Block the current task
|
||||
pub fn usleep(usec: u64) !void {
|
||||
const node = try vmem.create(SleepNode);
|
||||
lock_scheduler();
|
||||
current_task.data.state = .Sleeping;
|
||||
node.data = current_task;
|
||||
node.counter = usec;
|
||||
sleeping_tasks.insert(node);
|
||||
schedule();
|
||||
}
|
||||
|
||||
pub fn block(state: TaskState) void {
|
||||
assert(state != .Running);
|
||||
assert(state != .ReadyToRun);
|
||||
|
||||
lock_scheduler();
|
||||
current_task.data.state = state;
|
||||
blocked_tasks.append(current_task);
|
||||
schedule();
|
||||
}
|
||||
|
||||
pub fn unblock(node: *ListOfTasks.Node) void {
|
||||
pub fn unblock(node: *TaskNode) void {
|
||||
lock_scheduler();
|
||||
node.data.state = .ReadyToRun;
|
||||
blocked_tasks.remove(node);
|
||||
if (ready_tasks.first == null) {
|
||||
// Only one task was running before, so pre-empt
|
||||
switch_to(node);
|
||||
|
|
@ -91,16 +107,22 @@ pub fn unblock(node: *ListOfTasks.Node) void {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn switch_to(chosen: *ListOfTasks.Node) void {
|
||||
// expects:
|
||||
// - chosen is .ReadyToRun
|
||||
// - chosen is not in any scheduler lists
|
||||
pub fn switch_to(chosen: *TaskNode) void {
|
||||
assert(chosen.data.state == .ReadyToRun);
|
||||
|
||||
// in case of self preemption
|
||||
if (current_task.data.state == .Running) {
|
||||
current_task.data.state = .ReadyToRun;
|
||||
ready_tasks.append(current_task);
|
||||
}
|
||||
|
||||
// save old stack
|
||||
const old_task_esp_addr = ¤t_task.data.esp;
|
||||
|
||||
ready_tasks.remove(chosen);
|
||||
// switch states
|
||||
switch (current_task.data.state) {
|
||||
.Running => ready_tasks.append(current_task),
|
||||
else => blocked_tasks.append(current_task),
|
||||
}
|
||||
chosen.data.state = .Running;
|
||||
current_task = chosen;
|
||||
|
||||
|
|
@ -111,16 +133,23 @@ pub fn switch_to(chosen: *ListOfTasks.Node) void {
|
|||
}
|
||||
|
||||
pub fn schedule() void {
|
||||
assert(IRQ_disable_counter > 0);
|
||||
|
||||
update_time_used();
|
||||
if (ready_tasks.first) |t| {
|
||||
switch_to(t);
|
||||
} else {
|
||||
unlock_scheduler();
|
||||
}
|
||||
|
||||
// check if somebody wants to run
|
||||
const chosen = ready_tasks.popFirst();
|
||||
if (chosen == null) return unlock_scheduler();
|
||||
|
||||
// std doesn't do this, for developer flexibility maybe?
|
||||
chosen.?.prev = null;
|
||||
chosen.?.next = null;
|
||||
|
||||
// switch
|
||||
switch_to(chosen.?);
|
||||
}
|
||||
|
||||
var IRQ_disable_counter: usize = 0;
|
||||
|
||||
pub fn lock_scheduler() void {
|
||||
if (constants.SMP == false) {
|
||||
x86.cli();
|
||||
|
|
@ -138,26 +167,13 @@ pub fn introspect() void {
|
|||
update_time_used();
|
||||
|
||||
println("{}", current_task.data);
|
||||
|
||||
var it = ready_tasks.first;
|
||||
while (it) |node| : (it = node.next) println("{}", node.data);
|
||||
|
||||
it = blocked_tasks.first;
|
||||
while (it) |node| : (it = node.next) println("{}", node.data);
|
||||
|
||||
var sit = sleeping_tasks.first;
|
||||
while (sit) |node| : (sit = node.next) println("{} {}", node.data.data, node.counter);
|
||||
}
|
||||
|
||||
// fn initContext(entry_point: usize, stack: usize) isr.Context {
|
||||
// // Insert a trap return address to destroy the thread on return.
|
||||
// var stack_top = @intToPtr(*usize, stack + STACK_SIZE - @sizeOf(usize));
|
||||
// stack_top.* = layout.THREAD_DESTROY;
|
||||
|
||||
// return isr.Context{
|
||||
// .cs = gdt.USER_CODE | gdt.USER_RPL,
|
||||
// .ss = gdt.USER_DATA | gdt.USER_RPL,
|
||||
// .eip = entry_point,
|
||||
// .esp = @ptrToInt(stack_top),
|
||||
// .eflags = 0x202,
|
||||
|
||||
// .registers = isr.Registers.init(),
|
||||
// .interrupt_n = 0,
|
||||
// .error_code = 0,
|
||||
// };
|
||||
// }
|
||||
|
|
|
|||
|
|
@ -65,8 +65,7 @@ pub fn clear() void {
|
|||
pub fn topbar() void {
|
||||
const bg = vga.background;
|
||||
while (true) {
|
||||
if (time.offset_us / 1000000 == 4) task.block(.Paused);
|
||||
|
||||
if (time.offset_us / 1000000 == 4) task.usleep(2 * 1000 * 1000) catch unreachable;
|
||||
const cursor = vga.cursor;
|
||||
vga.cursor = 0;
|
||||
vga.background = Color.Red;
|
||||
|
|
|
|||
Loading…
Reference in a new issue