Skip to content

Commit

Permalink
memory: introduce kmap
Browse files Browse the repository at this point in the history
  • Loading branch information
mosmeh committed Oct 4, 2024
1 parent 22216db commit 8b3542d
Show file tree
Hide file tree
Showing 6 changed files with 135 additions and 64 deletions.
18 changes: 18 additions & 0 deletions kernel/memory/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,4 +164,22 @@ void page_table_unmap(uintptr_t virt_addr, uintptr_t size);
// Changes the page table flags for the virtual address range.
void page_table_set_flags(uintptr_t virt_addr, uintptr_t size, uint16_t flags);

#define MAX_NUM_KMAPS_PER_TASK 2

struct kmap_ctrl {
size_t num_mapped;
uintptr_t phys_addrs[MAX_NUM_KMAPS_PER_TASK];
};

// Maps a physical page to the kernel virtual address space.
// MAX_NUM_KMAPS_PER_TASK pages can be mapped at the same time for each task.
void* kmap(uintptr_t phys_addr);

// Unmaps the kmapped virtual address.
// kunmap must be called in the reverse order of kmap.
void kunmap(void* virt_addr);

// Should be called on context switch with the kmap_ctrl of the new task.
void kmap_switch(struct kmap_ctrl*);

#endif
25 changes: 12 additions & 13 deletions kernel/memory/page.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "memory.h"
#include "private.h"
#include <common/extra.h>
#include <kernel/api/sys/types.h>
#include <kernel/kmsg.h>
Expand All @@ -15,7 +16,7 @@
static size_t bitmap_len;
static uint32_t bitmap[BITMAP_MAX_LEN];
static uint8_t ref_counts[MAX_NUM_PAGES];
static struct mutex lock;
static struct spinlock lock;

static bool bitmap_get(size_t i) {
ASSERT(BITMAP_INDEX(i) < bitmap_len);
Expand Down Expand Up @@ -138,9 +139,7 @@ static void bitmap_init(const multiboot_info_t* mb_info, uintptr_t lower_bound,
}

void page_init(const multiboot_info_t* mb_info) {
// In the current setup, kernel image (including 1MiB offset) has to fit in
// single page table (< 4MiB), and last two pages are reserved for quickmap
ASSERT((uintptr_t)kernel_end <= KERNEL_VIRT_ADDR + 1022 * PAGE_SIZE);
ASSERT((uintptr_t)kernel_end <= KERNEL_IMAGE_END);

uintptr_t lower_bound;
uintptr_t upper_bound;
Expand All @@ -152,11 +151,11 @@ void page_init(const multiboot_info_t* mb_info) {
}

uintptr_t page_alloc(void) {
mutex_lock(&lock);
spinlock_lock(&lock);

ssize_t first_set = bitmap_find_first_set();
if (IS_ERR(first_set)) {
mutex_unlock(&lock);
spinlock_unlock(&lock);
kprint("page: out of physical pages\n");
return first_set;
}
Expand All @@ -168,7 +167,7 @@ uintptr_t page_alloc(void) {
bitmap_clear(first_set);
stats.free_kibibytes -= PAGE_SIZE / 1024;

mutex_unlock(&lock);
spinlock_unlock(&lock);
return first_set * PAGE_SIZE;
}

Expand All @@ -178,15 +177,15 @@ void page_ref(uintptr_t phys_addr) {
if (BITMAP_INDEX(index) >= bitmap_len)
return;

mutex_lock(&lock);
spinlock_lock(&lock);

ASSERT(ref_counts[index] > 0);
ASSERT(!bitmap_get(index));

if (ref_counts[index] < UINT8_MAX)
++ref_counts[index];

mutex_unlock(&lock);
spinlock_unlock(&lock);
}

void page_unref(uintptr_t phys_addr) {
Expand All @@ -195,7 +194,7 @@ void page_unref(uintptr_t phys_addr) {
if (BITMAP_INDEX(index) >= bitmap_len)
return;

mutex_lock(&lock);
spinlock_lock(&lock);

ASSERT(ref_counts[index] > 0);
ASSERT(!bitmap_get(index));
Expand All @@ -211,11 +210,11 @@ void page_unref(uintptr_t phys_addr) {
}
}

mutex_unlock(&lock);
spinlock_unlock(&lock);
}

void memory_get_stats(struct memory_stats* out_stats) {
mutex_lock(&lock);
spinlock_lock(&lock);
*out_stats = stats;
mutex_unlock(&lock);
spinlock_unlock(&lock);
}
132 changes: 87 additions & 45 deletions kernel/memory/page_table.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,43 +171,14 @@ static void flush_tlb_range(uintptr_t virt_addr, size_t size) {
pop_cli(int_flag);
}

// quickmap temporarily maps a physical page to the fixed virtual addresses,
// which are at the last two pages of the kernel page directory

#define QUICKMAP_PAGE 1022
#define QUICKMAP_PAGE_TABLE 1023

// this is locked in page_directory_clone_current
static struct mutex quickmap_lock;

static uintptr_t quickmap(size_t which, uintptr_t phys_addr, uint32_t flags) {
volatile page_table* pt = get_page_table_from_index(KERNEL_PDE_IDX);
volatile page_table_entry* pte = pt->entries + which;
ASSERT(pte->raw == 0);
pte->raw = phys_addr | flags;
pte->present = true;
uintptr_t virt_addr = KERNEL_VIRT_ADDR + PAGE_SIZE * which;
flush_tlb_range(virt_addr, PAGE_SIZE);
return virt_addr;
}

static void unquickmap(size_t which) {
volatile page_table* pt = get_page_table_from_index(KERNEL_PDE_IDX);
volatile page_table_entry* pte = pt->entries + which;
ASSERT(pte->present);
pte->raw = 0;
flush_tlb_range(KERNEL_VIRT_ADDR + PAGE_SIZE * which, PAGE_SIZE);
}

static uintptr_t clone_page_table(const volatile page_table* src,
uintptr_t src_virt_addr) {
uintptr_t dest_pt_phys_addr = page_alloc();
if (IS_ERR(dest_pt_phys_addr))
return dest_pt_phys_addr;

uintptr_t dest_pt_virt_addr =
quickmap(QUICKMAP_PAGE_TABLE, dest_pt_phys_addr, PTE_WRITE);
volatile page_table* dest_pt = (volatile page_table*)dest_pt_virt_addr;
volatile page_table* dest_pt =
(volatile page_table*)kmap(dest_pt_phys_addr);

for (size_t i = 0; i < 1024; ++i) {
if (!src->entries[i].present) {
Expand All @@ -223,21 +194,20 @@ static uintptr_t clone_page_table(const volatile page_table* src,

uintptr_t dest_page_phys_addr = page_alloc();
if (IS_ERR(dest_page_phys_addr)) {
unquickmap(QUICKMAP_PAGE_TABLE);
kunmap((void*)dest_pt);
return dest_page_phys_addr;
}

dest_pt->entries[i].raw =
dest_page_phys_addr | (src->entries[i].raw & PTE_FLAGS_MASK);

uintptr_t dest_page_virt_addr =
quickmap(QUICKMAP_PAGE, dest_page_phys_addr, PTE_WRITE);
memcpy((void*)dest_page_virt_addr,
(void*)(src_virt_addr + PAGE_SIZE * i), PAGE_SIZE);
unquickmap(QUICKMAP_PAGE);
void* dest_page_kaddr = kmap(dest_page_phys_addr);
memcpy(dest_page_kaddr, (void*)(src_virt_addr + PAGE_SIZE * i),
PAGE_SIZE);
kunmap(dest_page_kaddr);
}

unquickmap(QUICKMAP_PAGE_TABLE);
kunmap((void*)dest_pt);
return dest_pt_phys_addr;
}

Expand All @@ -248,8 +218,6 @@ struct page_directory* page_directory_clone_current(void) {

// copy userland region

mutex_lock(&quickmap_lock);

struct page_directory* src = current_page_directory();
for (size_t i = 0; i < KERNEL_PDE_IDX; ++i) {
if (!src->entries[i].present) {
Expand All @@ -259,17 +227,13 @@ struct page_directory* page_directory_clone_current(void) {

volatile page_table* pt = get_page_table_from_index(i);
uintptr_t cloned_pt_phys_addr = clone_page_table(pt, i * 0x400000);
if (IS_ERR(cloned_pt_phys_addr)) {
mutex_unlock(&quickmap_lock);
if (IS_ERR(cloned_pt_phys_addr))
return ERR_PTR(cloned_pt_phys_addr);
}

dst->entries[i].raw =
cloned_pt_phys_addr | (src->entries[i].raw & PTE_FLAGS_MASK);
}

mutex_unlock(&quickmap_lock);

return dst;
}

Expand Down Expand Up @@ -463,3 +427,81 @@ void page_table_set_flags(uintptr_t virt_addr, uintptr_t size, uint16_t flags) {
}
flush_tlb_range(virt_addr, size);
}

static uintptr_t kmap_addr(size_t index) {
return KMAP_START +
(cpu_get_id() * MAX_NUM_KMAPS_PER_TASK + index) * PAGE_SIZE;
}

void* kmap(uintptr_t phys_addr) {
ASSERT(phys_addr);
ASSERT(phys_addr % PAGE_SIZE == 0);

bool int_flag = push_cli();

struct kmap_ctrl* kmap = &current->kmap;
size_t index = kmap->num_mapped++;
ASSERT(index < MAX_NUM_KMAPS_PER_TASK);
ASSERT(!kmap->phys_addrs[index]);
kmap->phys_addrs[index] = phys_addr;

uintptr_t kaddr = kmap_addr(index);
volatile page_table_entry* pte = get_or_create_pte(kaddr);
ASSERT(pte);
pte->raw = phys_addr;
pte->write = true;
pte->global = true;
pte->present = true;
flush_tlb_single(kaddr);

pop_cli(int_flag);

return (void*)kaddr;
}

void kunmap(void* addr) {
ASSERT(addr);
size_t offset = (uintptr_t)addr - kmap_addr(0);
ASSERT(offset % PAGE_SIZE == 0);
size_t index = offset / PAGE_SIZE;
ASSERT(index < MAX_NUM_KMAPS_PER_TASK);

bool int_flag = push_cli();

struct kmap_ctrl* kmap = &current->kmap;
ASSERT(kmap->num_mapped == index + 1);
kmap->phys_addrs[index] = 0;
--kmap->num_mapped;

volatile page_table_entry* pte = get_pte((uintptr_t)addr);
ASSERT(pte && pte->present);
pte->raw = 0;
flush_tlb_single((uintptr_t)addr);

pop_cli(int_flag);
}

void kmap_switch(struct kmap_ctrl* kmap) {
ASSERT(!interrupts_enabled());
ASSERT(kmap);

size_t i = 0;
for (; i < kmap->num_mapped; ++i) {
uintptr_t virt_addr = kmap_addr(i);
volatile page_table_entry* pte = get_or_create_pte(virt_addr);
ASSERT(pte);
pte->raw = kmap->phys_addrs[i];
pte->write = true;
pte->global = true;
pte->present = true;
flush_tlb_single(virt_addr);
}
for (; i < MAX_NUM_KMAPS_PER_TASK; ++i) {
uintptr_t virt_addr = kmap_addr(i);
volatile page_table_entry* pte = get_pte(virt_addr);
if (!pte || !pte->present)
break;
pte->raw = 0;
flush_tlb_single(virt_addr);
}
}
20 changes: 14 additions & 6 deletions kernel/memory/private.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,23 @@

#include "memory.h"
#include <common/extra.h>
#include <kernel/cpu.h>
#include <kernel/lock.h>
#include <stdalign.h>

// In the current setup, kernel image (including 1MiB offset) has to fit in
// single page table (< 4MiB).
#define KERNEL_IMAGE_END (KERNEL_VIRT_ADDR + 1024 * PAGE_SIZE)

#define KMAP_START KERNEL_IMAGE_END
#define KMAP_END \
(KMAP_START + MAX_NUM_KMAPS_PER_TASK * MAX_NUM_CPUS * PAGE_SIZE)

#define KERNEL_HEAP_START KMAP_END

// Last 4MiB is for recursive mapping
#define KERNEL_HEAP_END 0xffc00000

struct page_directory;
typedef struct multiboot_info multiboot_info_t;

Expand Down Expand Up @@ -33,12 +47,6 @@ uintptr_t page_alloc(void);
void page_ref(uintptr_t phys_addr);
void page_unref(uintptr_t phys_addr);

// kernel heap starts right after the quickmap page
#define KERNEL_HEAP_START (KERNEL_VIRT_ADDR + 1024 * PAGE_SIZE)

// last 4MiB is for recursive mapping
#define KERNEL_HEAP_END 0xffc00000

void page_table_init(void);

struct page_directory* page_directory_create(void);
Expand Down
2 changes: 2 additions & 0 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ noreturn void switch_context(void) {
cpu->current_task = task;

vm_enter(task->vm);
kmap_switch(&task->kmap);

gdt_set_cpu_kernel_stack(task->kernel_stack_top);
memcpy(cpu_get_current()->gdt + GDT_ENTRY_TLS_MIN, current->tls,
sizeof(current->tls));
Expand Down
2 changes: 2 additions & 0 deletions kernel/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ struct task {
uintptr_t kernel_stack_base, kernel_stack_top;
uintptr_t arg_start, arg_end, env_start, env_end;

struct kmap_ctrl kmap;

struct gdt_segment tls[NUM_GDT_TLS_ENTRIES];

struct fs* fs;
Expand Down

0 comments on commit 8b3542d

Please sign in to comment.