Kernel: Refactor prekernel MMU to use a bump allocator

This commit is contained in:
James Mintram 2021-11-21 00:56:05 +00:00 committed by Brian Gianforcaro
parent 4a4a3193f8
commit 4e9777243e

View file

@ -37,6 +37,7 @@ constexpr u32 PAGE_TABLE_SIZE = 0x1000;
// https://developer.arm.com/documentation/101811/0101/Controlling-address-translation
constexpr u32 PAGE_DESCRIPTOR = 0b11;
constexpr u32 TABLE_DESCRIPTOR = 0b11;
constexpr u32 DESCRIPTOR_MASK = ~0b11;
constexpr u32 ACCESS_FLAG = 1 << 10;
@ -48,44 +49,85 @@ constexpr u32 INNER_SHAREABLE = (3 << 8);
constexpr u32 NORMAL_MEMORY = (0 << 2);
constexpr u32 DEVICE_MEMORY = (1 << 2);
constexpr u64* descriptor_to_pointer(u64 descriptor)
{
return (u64*)(descriptor & DESCRIPTOR_MASK);
}
using page_table_t = u8*;
static void zero_identity_map(page_table_t page_table_start, page_table_t page_table_end)
static void zero_pages(u64* start, u64* end)
{
// Memset all page table memory to zero
for (uint64_t* p = (uint64_t*)page_table_start;
p < (uint64_t*)page_table_end;
for (u64* p = (u64*)start;
p < (u64*)end;
p++) {
*p = 0;
}
}
static void build_identity_map(page_table_t page_table)
namespace {
class PageBumpAllocator {
public:
PageBumpAllocator(u64* start, u64* end)
: m_start(start)
, m_end(end)
, m_current(start)
{
if (m_start >= m_end) {
Prekernel::panic("Invalid memory range passed to PageBumpAllocator");
}
if ((u64)m_start % PAGE_TABLE_SIZE != 0 || (u64)m_end % PAGE_TABLE_SIZE != 0) {
Prekernel::panic("Memory range passed into PageBumpAllocator not aligned to PAGE_TABLE_SIZE");
}
}
u64* take_page()
{
if (m_current == m_end) {
Prekernel::panic("Prekernel pagetable memory exhausted");
}
u64* page = m_current;
m_current += (PAGE_TABLE_SIZE / sizeof(u64));
zero_pages(page, page + (PAGE_TABLE_SIZE / sizeof(u64)));
return page;
}
private:
const u64* m_start;
const u64* m_end;
u64* m_current;
};
}
static void build_identity_map(PageBumpAllocator& allocator)
{
// Set up first entry of level 1
uint64_t* level1_entry = (uint64_t*)page_table;
*level1_entry = (uint64_t)&page_table[PAGE_TABLE_SIZE];
*level1_entry |= TABLE_DESCRIPTOR;
u64* level1_table = allocator.take_page();
// Set up first entry of level 2
uint64_t* level2_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE];
*level2_entry = (uint64_t)&page_table[PAGE_TABLE_SIZE * 2];
*level2_entry |= TABLE_DESCRIPTOR;
level1_table[0] = (u64)allocator.take_page();
level1_table[0] |= TABLE_DESCRIPTOR;
// Set up L3 entries
u64* level2_table = descriptor_to_pointer(level1_table[0]);
level2_table[0] = (u64)allocator.take_page();
level2_table[0] |= TABLE_DESCRIPTOR;
u64* level3_table = descriptor_to_pointer(level2_table[0]);
// // Set up L3 entries
for (uint32_t l3_idx = 0; l3_idx < 512; l3_idx++) {
uint64_t* l3_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE * 2 + (l3_idx * sizeof(uint64_t))];
*l3_entry = (uint64_t)(page_table + (PAGE_TABLE_SIZE * 3) + (l3_idx * PAGE_TABLE_SIZE));
*l3_entry |= TABLE_DESCRIPTOR;
level3_table[l3_idx] = (u64)allocator.take_page();
level3_table[l3_idx] |= TABLE_DESCRIPTOR;
}
// Set up L4 entries
size_t page_index = 0;
for (size_t addr = START_OF_NORMAL_MEMORY; addr < END_OF_NORMAL_MEMORY; addr += GRANULE_SIZE, page_index++) {
uint64_t* l4_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE * 3 + (page_index * sizeof(uint64_t))];
u64* level4_table = descriptor_to_pointer(level3_table[page_index / 512]);
u64* l4_entry = &level4_table[page_index % 512];
*l4_entry = addr;
*l4_entry |= ACCESS_FLAG;
@ -96,7 +138,8 @@ static void build_identity_map(page_table_t page_table)
// Set up entries for last 16MB of memory (MMIO)
for (size_t addr = START_OF_DEVICE_MEMORY; addr < END_OF_DEVICE_MEMORY; addr += GRANULE_SIZE, page_index++) {
uint64_t* l4_entry = (uint64_t*)&page_table[PAGE_TABLE_SIZE * 3 + (page_index * sizeof(uint64_t))];
u64* level4_table = descriptor_to_pointer(level3_table[page_index / 512]);
u64* l4_entry = &level4_table[page_index % 512];
*l4_entry = addr;
*l4_entry |= ACCESS_FLAG;
@ -149,8 +192,8 @@ static void activate_mmu()
void init_prekernel_page_tables()
{
zero_identity_map(page_tables_phys_start, page_tables_phys_end);
build_identity_map(page_tables_phys_start);
PageBumpAllocator allocator((u64*)page_tables_phys_start, (u64*)page_tables_phys_end);
build_identity_map(allocator);
switch_to_page_table(page_tables_phys_start);
activate_mmu();
}