Skip to content

Commit

Permalink
added multithreading unit tests and a delay in ap_startup code to wai…
Browse files Browse the repository at this point in the history
…t for all cores to finish getting initialized
  • Loading branch information
FlareCoding committed Sep 10, 2024
1 parent 5855949 commit 5281e90
Show file tree
Hide file tree
Showing 7 changed files with 173 additions and 13 deletions.
Binary file modified efi/OVMF_VARS.fd
Binary file not shown.
8 changes: 8 additions & 0 deletions kernel/src/arch/x86/ap_startup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ void _prepareApStartupMemoryMappings() {
}

void initializeApCores() {
const uint32_t coreStartupMaxTimeout = 3; // seconds

auto& acpiController = AcpiController::get();
Madt* apicTable = acpiController.getApicTable();

Expand Down Expand Up @@ -122,6 +124,9 @@ void initializeApCores() {
// Let the AP cores continue on their own asynchronously
_releaseApStartupSpinlockFlag();
});

// Wait for all cores to fully start and finish initializing
sleep(coreStartupMaxTimeout);
}

void bootAndInitApCore(uint8_t apicid) {
Expand Down Expand Up @@ -170,6 +175,9 @@ void apStartupEntryC(int apicid) {
size_t usermodeStackSize = 8 * PAGE_SIZE;
size_t userStackTop = (uint64_t)(usermodeStack + usermodeStackSize);

// Set the userStackTop field of the CPU's swapper task
g_kernelSwapperTasks[apicid].userStackTop = userStackTop;

__call_lowered_entry(apStartupEntryLowered, (void*)userStackTop);
while (1);
}
Expand Down
2 changes: 2 additions & 0 deletions kernel/src/entry/kernel_entry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ __PRIVILEGED_CODE void _kentry(KernelEntryParams* params) {
g_kernelSwapperTasks[BSP_CPU_ID].pid = 1;
zeromem(&g_kernelSwapperTasks[BSP_CPU_ID].context, sizeof(CpuContext));
g_kernelSwapperTasks[BSP_CPU_ID].context.rflags |= 0x200;
g_kernelSwapperTasks[BSP_CPU_ID].userStackTop =
(uint64_t)(__usermodeKernelEntryStack + USERMODE_KERNEL_ENTRY_STACK_SIZE);

// Elevated flag must be 0 since we are going to lower ourselves in the next few calls.
// TO-DO: investigate further why setting elevated flag to 1 here causes a crash.
Expand Down
113 changes: 110 additions & 3 deletions kernel/src/entry/tests/multithreading.test.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#include "kernel_unit_tests.h"
#include <sched/sched.h>
#include <memory/kmemory.h>
#include <time/ktime.h>
#include <acpi/acpi_controller.h>
#include <sync.h>

DECLARE_SPINLOCK(mtUnitTestLock);
Expand All @@ -18,14 +20,119 @@ DECLARE_UNIT_TEST("Multithreading Test - Kernel Task Creation", mtTaskCreationUn
const size_t iterations = 10000;
const size_t milestone = 1000;

for (size_t i = 1; i <= iterations; i++) {
Task** taskArray = (Task**)kmalloc(sizeof(Task*) * iterations);

for (size_t i = 0; i < iterations; i++) {
Task* task = createKernelTask(incrementMtUnitTestCounter);
ASSERT_TRUE(task, "Failed to allocate a kernel task");

if (i % milestone == 0) {
kuPrint(UNIT_TEST "Allocated %lli tasks\n", i);
taskArray[i] = task;

if ((i + 1) % milestone == 0) {
kuPrint(UNIT_TEST "Allocated %lli tasks\n", i + 1);
}
}

// Free the allocated tasks
for (size_t i = 0; i < iterations; i++) {
bool ret = destroyKernelTask(taskArray[i]);
ASSERT_TRUE(ret, "Failed to destroy and clean up a kernel task");
}

// Free the array for holding tasks for this test
kfree(taskArray);

return UNIT_TEST_SUCCESS;
}

DECLARE_UNIT_TEST("Multithreading Test - Single Core", mtSingleCoreUnitTest) {
const size_t taskCount = MAX_QUEUED_PROCESSES - 1;
const int targetCpu = BSP_CPU_ID;
const int taskExecutionTimeout = 400;
auto& sched = RRScheduler::get();

// Allocate a buffer to store the tasks
Task** taskArray = (Task**)kmalloc(sizeof(Task*) * taskCount);

// Reset the test counter
g_mtUnitTestCounter = 0;

kuPrint(UNIT_TEST "Creating %llu test tasks\n", taskCount);

// Create the tasks
for (size_t i = 0; i < taskCount; i++) {
Task* task = createKernelTask(incrementMtUnitTestCounter);
ASSERT_TRUE(task, "Failed to allocate a kernel task");

taskArray[i] = task;
}

// Schedule all the tasks
for (size_t i = 0; i < taskCount; i++) {
bool ret = sched.addTask(taskArray[i], targetCpu);
ASSERT_TRUE(ret, "Failed to schedule a task on a single CPU core");
}

// Wait for all tasks to finish
msleep(taskExecutionTimeout);

// Check that the counter reached the correct value
ASSERT_EQ(g_mtUnitTestCounter, taskCount, "Incorrect final value of the test counter after task execution");

// Destroy the allocated tasks
for (size_t i = 0; i < taskCount; i++) {
bool ret = destroyKernelTask(taskArray[i]);
ASSERT_TRUE(ret, "Failed to destroy and clean up a kernel task");
}

// Free the array for holding tasks for this test
kfree(taskArray);

return UNIT_TEST_SUCCESS;
}

// DECLARE_UNIT_TEST("Multithreading Test - Multi Core", mtMultiCoreUnitTest) {
// const size_t systemCpus = AcpiController::get().getApicTable()->getCpuCount();
// const size_t taskCount = (MAX_QUEUED_PROCESSES - 1) * (systemCpus - 1);
// const uint32_t taskExecutionTimeout = 400;
// auto& sched = RRScheduler::get();

// // Allocate a buffer to store the tasks
// Task** taskArray = (Task**)kmalloc(sizeof(Task*) * taskCount);

// // Reset the test counter
// g_mtUnitTestCounter = 0;

// kuPrint(UNIT_TEST "Creating %llu test tasks\n", taskCount);

// // Create the tasks
// for (size_t i = 0; i < taskCount; i++) {
// Task* task = createKernelTask(incrementMtUnitTestCounter);
// ASSERT_TRUE(task, "Failed to allocate a kernel task");

// taskArray[i] = task;
// }

// // Schedule all the tasks
// for (size_t i = 0; i < taskCount; i++) {
// bool ret = sched.addTask(taskArray[i]);
// ASSERT_TRUE(ret, "Failed to schedule a task on a single CPU core");
// }

// // Wait for all tasks to finish
// msleep(taskExecutionTimeout);

// // Check that the counter reached the correct value
// ASSERT_EQ(g_mtUnitTestCounter, taskCount, "Incorrect final value of the test counter after task execution");

// // Destroy the allocated tasks
// for (size_t i = 0; i < taskCount; i++) {
// bool ret = destroyKernelTask(taskArray[i]);
// ASSERT_TRUE(ret, "Failed to destroy and clean up a kernel task");
// }

// // Free the array for holding tasks for this test
// kfree(taskArray);

// return UNIT_TEST_SUCCESS;
// }
8 changes: 6 additions & 2 deletions kernel/src/process/process.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,13 @@ typedef struct ProcessControlBlock {
uint64_t priority;
uint64_t cr3;
uint64_t kernelStack;
uint64_t userStackTop;
uint64_t usergs;
uint8_t elevated;
uint8_t cpu;
struct {
uint64_t elevated : 1;
uint64_t cpu : 1;
uint64_t flrsvd : 62;
} __attribute__((packed));
} PCB;

typedef int64_t pid_t;
Expand Down
40 changes: 32 additions & 8 deletions kernel/src/sched/sched.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,12 @@
#include <kelevate/kelevate.h>
#include <sync.h>

#define SCHED_KERNEL_STACK_PAGES 2
#define SCHED_USER_STACK_PAGES 2

#define SCHED_KERNEL_STACK_SIZE SCHED_KERNEL_STACK_PAGES * PAGE_SIZE
#define SCHED_USER_STACK_SIZE SCHED_USER_STACK_PAGES * PAGE_SIZE

RRScheduler s_globalRRScheduler;
Task g_kernelSwapperTasks[MAX_CPUS] = {};

Expand Down Expand Up @@ -279,24 +285,24 @@ Task* createKernelTask(void (*taskEntry)(), int priority) {
task->priority = priority;

// Allocate both user and kernel stacks
void* userStack = zallocPages(2);
void* userStack = zallocPages(SCHED_USER_STACK_PAGES);
if (!userStack) {
kfree(task);
return nullptr;
}

void* kernelStack = zallocPages(2);
void* kernelStack = zallocPages(SCHED_KERNEL_STACK_PAGES);
if (!kernelStack) {
kfree(task);
freePages(userStack, 2);
freePages(userStack, SCHED_USER_STACK_PAGES);
return nullptr;
}

// Initialize the CPU context
task->context.rsp = (uint64_t)userStack + PAGE_SIZE; // Point to the top of the stack
task->context.rbp = task->context.rsp; // Point to the top of the stack
task->context.rip = (uint64_t)taskEntry; // Set instruction pointer to the task function
task->context.rflags = 0x200; // Enable interrupts
task->context.rsp = (uint64_t)userStack + SCHED_USER_STACK_SIZE; // Point to the top of the stack
task->context.rbp = task->context.rsp; // Point to the top of the stack
task->context.rip = (uint64_t)taskEntry; // Set instruction pointer to the task function
task->context.rflags = 0x200; // Enable interrupts

// Set up segment registers for user space. These values correspond to the selectors in the GDT.
task->context.cs = __USER_CS | 0x3;
Expand All @@ -305,14 +311,32 @@ Task* createKernelTask(void (*taskEntry)(), int priority) {
task->context.ss = task->context.ds;

// Save the kernel stack
task->kernelStack = (uint64_t)kernelStack + PAGE_SIZE;
task->kernelStack = (uint64_t)kernelStack + SCHED_KERNEL_STACK_SIZE;

// Save the user stack
task->userStackTop = task->context.rsp;

// Setup the task's page table
task->cr3 = reinterpret_cast<uint64_t>(paging::g_kernelRootPageTable);

return task;
}

bool destroyKernelTask(Task* task) {
if (!task) {
return false;
}

// Destroy the stacks
freePages((void*)(task->kernelStack - SCHED_KERNEL_STACK_SIZE), SCHED_KERNEL_STACK_PAGES);
freePages((void*)(task->userStackTop - SCHED_USER_STACK_SIZE), SCHED_USER_STACK_PAGES);

// Free the actual task structure
kfree(task);

return true;
}

void exitKernelThread() {
// Construct a fake PtRegs structure to switch to a new context
PtRegs regs;
Expand Down
15 changes: 15 additions & 0 deletions kernel/src/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,21 @@ class RRScheduler {
//
Task* createKernelTask(void (*taskEntry)(), int priority = 0);

//
// Destroys a task object, releasing any resources allocated for the task.
// This function should properly clean up any state or memory associated
// with the task, ensuring it no longer runs and freeing up any used memory.
//
// Parameters:
// - task: A pointer to the Task object to be destroyed.
// The Task pointer must not be used after calling this function.
//
// Returns:
// - Returns true if the task was successfully destroyed. False if there
// was an error (such as the task not being found).
//
bool destroyKernelTask(Task* task);

//
// Allows the current running kernel thread to terminate and switch to the next
// available task without waiting for the next timer interrupt. If no next valid
Expand Down

0 comments on commit 5281e90

Please sign in to comment.