Kernel: Move Kernel mapping to 0xc0000000

The kernel is now no longer identity mapped to the bottom 8MiB of
memory, and is now mapped at the higher address of `0xc0000000`.

The lower ~1MiB of memory (from GRUB's mmap), however is still
identity mapped to provide an easy way for the kernel to get
physical pages for things such as DMA etc. These could later be
mapped to the higher address too, as I'm not too sure how to
go about doing this elegantly without a lot of address subtractions.
This commit is contained in:
Jesse Buhagiar 2019-11-21 16:08:11 +11:00 committed by Andreas Kling
parent 61ba19f031
commit bd33c66273
Notes: sideshowbarker 2024-07-19 11:07:15 +09:00
13 changed files with 132 additions and 64 deletions

View file

@ -34,9 +34,9 @@ stack_top:
.section .page_tables
.align 4096
page_tables_start:
.skip 4096*3
.skip 4096*5
.section .text
.section .text.boot
.global start
.type start, @function
@ -51,13 +51,79 @@ start:
cli
cld
# We first save the multiboot_info_ptr so it doesn't get trampled
addl $0xc0000000, %ebx
movl %ebx, multiboot_info_ptr - 0xc0000000
# First, let's set up the first page table to map the the first 4MiB of memory.
# This makes sure we don't crash after we set CR3 and enable paging
movl $0x200, %ecx
xor %ebx, %ebx
movl $((page_tables_start + (4096 * 1)) - 0xc0000000), %edx
call make_table
# Now we create the kernel mappings. The kernel maps 0MiB -> 8MiB into its address space at
# v0xc0000000.
movl $0x400, %ecx
movl $0x0, %ebx # ebx is the base pointer (kernel base is at physical address 0 in this case)
movl $((page_tables_start + (4096 * 2)) - 0xc0000000), %edx
call make_table
movl $0x400, %ecx
movl $0x400000, %ebx # ebx is the base pointer (kernel base is at physical address 0 in this case)
movl $((page_tables_start + (4096 * 3)) - 0xc0000000), %edx
call make_table
# Okay, so we have a page table that contains addresses of the first 4MiB of memory. Let's insert this into the
# boot page directory. The index we need to insert it into is at vaddr >> 22, which is the page directory index.
# This reveals that we need to insert the page directory into 0xc0000000 >> 22 = 768
# An interesting quirk is that we must also identity map the first 4MiB too, as the next instruction after enabling
# paging is at a physical address, which cause a page fault. As we have no handler, this would cause a triple fault.
movl $((page_tables_start + (4096 * 1)) - 0xc0000000 + 0x003), page_tables_start - 0xc0000000 + 0
movl $((page_tables_start + (4096 * 2)) - 0xc0000000 + 0x003), page_tables_start - 0xc0000000 + 768 * 4
movl $((page_tables_start + (4096 * 3)) - 0xc0000000 + 0x003), page_tables_start - 0xc0000000 + 769 * 4
# Now let's load the CR3 register with our page directory
movl $(page_tables_start - 0xc0000000), %ecx
movl %ecx, %cr3
# Let's enable paging!
movl %cr0, %ecx
orl $0x80000001, %ecx
movl %ecx, %cr0
lea high_address_space_start, %ecx
jmp *%ecx
# Make a page table. This is called with the following arguments:
# ebx = base pointer of mapping
# edx = page table physical address
# ecx = number of pages to map
#
# Registers used in function
# eax = loop counter
make_table:
xorl %eax, %eax
.loop:
pushl %ecx
movl %ebx, %ecx
orl $0x3, %ecx # addr | READ_WRITE | PAGE_PRESENT
movl %ecx, 0(%edx, %eax, 4)
addl $0x1000, %ebx
inc %eax
popl %ecx
loop .loop
ret
# At this point, the CPU now starts reading instructions from (virtual) address 0xc00100000
high_address_space_start:
mov $stack_top, %esp
and $-16, %esp
mov %ebx, multiboot_info_ptr
pushl $page_tables_start
pushl $(page_tables_start - 0xc0000000)
call init
add $4, %esp

View file

@ -8,6 +8,8 @@
#define PAGE_SIZE 4096
#define PAGE_MASK 0xfffff000
static const u32 kernel_virtual_base = 0xc0000000;
class MemoryManager;
class PageTableEntry;
@ -89,6 +91,7 @@ class PageDirectoryEntry {
public:
PageTableEntry* page_table_base() { return reinterpret_cast<PageTableEntry*>(m_raw & 0xfffff000u); }
PageTableEntry* page_table_virtual_base() { return reinterpret_cast<PageTableEntry*>((m_raw + kernel_virtual_base) & 0xfffff000u); }
void set_page_table_base(u32 value)
{
m_raw &= 0xfff;

View file

@ -116,11 +116,13 @@ void PATAChannel::initialize(bool force_pio)
kprintf("PATAChannel: PATA Controller found! id=%w:%w\n", id.vendor_id, id.device_id);
}
});
m_prdt_page = MM.allocate_supervisor_physical_page();
m_force_pio.resource() = false;
if (!m_pci_address.is_null()) {
// Let's try to set up DMA transfers.
PCI::enable_bus_mastering(m_pci_address);
m_prdt.end_of_table = 0x8000;
prdt().end_of_table = 0x8000;
m_bus_master_base = PCI::get_BAR4(m_pci_address) & 0xfffc;
m_dma_buffer_page = MM.allocate_supervisor_physical_page();
kprintf("PATAChannel: Bus master IDE: I/O @ %x\n", m_bus_master_base);
@ -259,16 +261,16 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, u8* outbuf, bool
disable_irq();
m_prdt.offset = m_dma_buffer_page->paddr();
m_prdt.size = 512 * count;
prdt().offset = m_dma_buffer_page->paddr();
prdt().size = 512 * count;
ASSERT(m_prdt.size <= PAGE_SIZE);
ASSERT(prdt().size <= PAGE_SIZE);
// Stop bus master
IO::out8(m_bus_master_base, 0);
// Write the PRDT location
IO::out32(m_bus_master_base + 4, (u32)&m_prdt);
IO::out32(m_bus_master_base + 4, (u32)&prdt());
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);
@ -338,18 +340,18 @@ bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const u8* inbuf
disable_irq();
m_prdt.offset = m_dma_buffer_page->paddr();
m_prdt.size = 512 * count;
prdt().offset = m_dma_buffer_page->paddr();
prdt().size = 512 * count;
memcpy(m_dma_buffer_page->paddr().as_ptr(), inbuf, 512 * count);
ASSERT(m_prdt.size <= PAGE_SIZE);
ASSERT(prdt().size <= PAGE_SIZE);
// Stop bus master
IO::out8(m_bus_master_base, 0);
// Write the PRDT location
IO::out32(m_bus_master_base + 4, (u32)&m_prdt);
IO::out32(m_bus_master_base + 4, (u32)&prdt());
// Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);

View file

@ -55,6 +55,8 @@ private:
bool ata_read_sectors(u32, u16, u8*, bool);
bool ata_write_sectors(u32, u16, const u8*, bool);
PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_page->paddr().as_ptr()); }
// Data members
u8 m_channel_number { 0 }; // Channel number. 0 = master, 1 = slave
u16 m_io_base { 0x1F0 };
@ -63,7 +65,7 @@ private:
volatile bool m_interrupted { false };
PCI::Address m_pci_address;
PhysicalRegionDescriptor m_prdt;
RefPtr<PhysicalPage> m_prdt_page;
RefPtr<PhysicalPage> m_dma_buffer_page;
u16 m_bus_master_base { 0 };
Lockable<bool> m_dma_enabled;

View file

@ -6,11 +6,11 @@
#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/KSyms.h>
#include <Kernel/Process.h>
#include <Kernel/Scheduler.h>
#include <Kernel/StdLib.h>
#include <Kernel/Heap/kmalloc.h>
#define SANITIZE_KMALLOC
@ -20,11 +20,11 @@ struct [[gnu::packed]] allocation_t
size_t nchunk;
};
#define BASE_PHYSICAL (4 * MB)
#define KMALLOC_RANGE_BASE (0xc0000000 + (4 * MB))
#define CHUNK_SIZE 8
#define POOL_SIZE (3 * MB)
#define ETERNAL_BASE_PHYSICAL (2 * MB)
#define ETERNAL_RANGE_BASE (0xc0000000 + (2 * MB))
#define ETERNAL_RANGE_SIZE (2 * MB)
static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
@ -42,21 +42,21 @@ static u8* s_end_of_eternal_range;
bool is_kmalloc_address(const void* ptr)
{
if (ptr >= (u8*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
if (ptr >= (u8*)ETERNAL_RANGE_BASE && ptr < s_next_eternal_ptr)
return true;
return (size_t)ptr >= BASE_PHYSICAL && (size_t)ptr <= (BASE_PHYSICAL + POOL_SIZE);
return (size_t)ptr >= KMALLOC_RANGE_BASE && (size_t)ptr <= (KMALLOC_RANGE_BASE + POOL_SIZE);
}
void kmalloc_init()
{
memset(&alloc_map, 0, sizeof(alloc_map));
memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
memset((void*)KMALLOC_RANGE_BASE, 0, POOL_SIZE);
kmalloc_sum_eternal = 0;
sum_alloc = 0;
sum_free = POOL_SIZE;
s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
s_next_eternal_ptr = (u8*)ETERNAL_RANGE_BASE;
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
}
@ -134,7 +134,7 @@ void* kmalloc_impl(size_t size)
++chunks_here;
if (chunks_here == chunks_needed) {
auto* a = (allocation_t*)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE));
auto* a = (allocation_t*)(KMALLOC_RANGE_BASE + (first_chunk * CHUNK_SIZE));
u8* ptr = (u8*)a;
ptr += sizeof(allocation_t);
a->nchunk = chunks_needed;

View file

@ -107,7 +107,7 @@ CXXFLAGS += -nostdlib -nostdinc -nostdinc++
CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/8.3.0/
CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/8.3.0/i686-pc-serenity/
DEFINES += -DKERNEL
LDFLAGS += -Ttext 0x100000 -Wl,-T linker.ld -nostdlib
LDFLAGS += -Wl,-T linker.ld -nostdlib
all: $(KERNEL) kernel.map

View file

@ -1,10 +1,10 @@
#include "VirtualConsole.h"
#include "IO.h"
#include "StdLib.h"
#include <Kernel/Heap/kmalloc.h>
#include <AK/String.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Devices/KeyboardDevice.h>
#include <Kernel/Heap/kmalloc.h>
static u8* s_vga_buffer;
static VirtualConsole* s_consoles[6];
@ -32,7 +32,7 @@ void VirtualConsole::flush_vga_cursor()
void VirtualConsole::initialize()
{
s_vga_buffer = (u8*)0xb8000;
s_vga_buffer = (u8*)(kernel_virtual_base + 0xb8000);
memset(s_consoles, 0, sizeof(s_consoles));
s_active_console = -1;
}

View file

@ -24,7 +24,8 @@ MemoryManager::MemoryManager(u32 physical_address_for_kernel_page_tables)
{
m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(physical_address_for_kernel_page_tables));
m_page_table_zero = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE);
m_page_table_one = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE * 2);
m_page_table_768 = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE * 2);
m_page_table_769 = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE * 3);
initialize_paging();
kprintf("MM initialized.\n");
@ -38,7 +39,6 @@ void MemoryManager::populate_page_directory(PageDirectory& page_directory)
{
page_directory.m_directory_page = allocate_supervisor_physical_page();
page_directory.entries()[0].copy_from({}, kernel_page_directory().entries()[0]);
page_directory.entries()[1].copy_from({}, kernel_page_directory().entries()[1]);
// Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF
for (int i = 768; i < 1024; ++i)
page_directory.entries()[i].copy_from({}, kernel_page_directory().entries()[i]);
@ -47,7 +47,6 @@ void MemoryManager::populate_page_directory(PageDirectory& page_directory)
void MemoryManager::initialize_paging()
{
memset(m_page_table_zero, 0, PAGE_SIZE);
memset(m_page_table_one, 0, PAGE_SIZE);
#ifdef MM_DEBUG
dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3());
@ -60,16 +59,12 @@ void MemoryManager::initialize_paging()
map_protected(VirtualAddress(0), PAGE_SIZE);
#ifdef MM_DEBUG
dbgprintf("MM: Identity map bottom 8MB\n");
dbgprintf("MM: Identity map bottom 1MiB\n", kernel_virtual_base);
#endif
// The bottom 8 MB (except for the null page) are identity mapped & supervisor only.
// Every process shares these mappings.
create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (8 * MB) - PAGE_SIZE);
// FIXME: We should move everything kernel-related above the 0xc0000000 virtual mark.
create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (1 * MB) - PAGE_SIZE);
// Basic physical memory map:
// 0 -> 1 MB We're just leaving this alone for now.
// 0 -> 1 MB Page table/directory / I/O memory region
// 1 -> 3 MB Kernel image.
// (last page before 2MB) Used by quickmap_page().
// 2 MB -> 4 MB kmalloc_eternal() space.
@ -78,8 +73,10 @@ void MemoryManager::initialize_paging()
// 8 MB -> MAX Userspace physical pages (available for allocation!)
// Basic virtual memory map:
// 0 MB -> 8MB Identity mapped.
// 0xc0000000-0xffffffff Kernel-only virtual address space.
// 0x00000000-0x00100000 Identity mapped for Kernel Physical pages handed out by allocate_supervisor_physical_page (for I/O, page tables etc).
// 0x00800000-0xbfffffff Userspace program virtual address space.
// 0xc0001000-0xc0800000 Kernel-only virtual address space. This area is mapped to the first 8 MB of physical memory and includes areas for kmalloc, etc.
// 0xc0800000-0xffffffff Kernel virtual address space for kernel Page Directory.
#ifdef MM_DEBUG
dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
@ -100,10 +97,6 @@ void MemoryManager::initialize_paging()
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
continue;
// FIXME: Maybe make use of stuff below the 1MB mark?
if (mmap->addr < (1 * MB))
continue;
if ((mmap->addr + mmap->len) > 0xffffffff)
continue;
@ -131,9 +124,8 @@ void MemoryManager::initialize_paging()
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
auto addr = PhysicalAddress(page_base);
if (page_base < 7 * MB) {
// nothing
} else if (page_base >= 7 * MB && page_base < 8 * MB) {
// Anything below 1 * MB is a Kernel Physical region
if (page_base > PAGE_SIZE && page_base < 1 * MB) {
if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
region = m_super_physical_regions.last();
@ -141,7 +133,7 @@ void MemoryManager::initialize_paging()
} else {
region->expand(region->lower(), addr);
}
} else {
} else if (page_base > 8 * MB) {
if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
region = m_user_physical_regions.last();
@ -162,7 +154,6 @@ void MemoryManager::initialize_paging()
#ifdef MM_DEBUG
dbgprintf("MM: Installing page directory\n");
#endif
// Turn on CR4.PGE so the CPU will respect the G bit in page tables.
asm volatile(
"mov %cr4, %eax\n"
@ -175,10 +166,6 @@ void MemoryManager::initialize_paging()
"orl $0x80000001, %%eax\n"
"movl %%eax, %%cr0\n" ::
: "%eax", "memory");
#ifdef MM_DEBUG
dbgprintf("MM: Paging initialized.\n");
#endif
}
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
@ -199,9 +186,16 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
pde.set_present(true);
pde.set_writable(true);
pde.set_global(true);
} else if (page_directory_index == 1) {
} else if (page_directory_index == 768) {
ASSERT(&page_directory == m_kernel_page_directory);
pde.set_page_table_base((u32)m_page_table_one);
pde.set_page_table_base((u32)m_page_table_768);
pde.set_user_allowed(false);
pde.set_present(true);
pde.set_writable(true);
pde.set_global(true);
} else if (page_directory_index == 769) {
ASSERT(&page_directory == m_kernel_page_directory);
pde.set_page_table_base((u32)m_page_table_769);
pde.set_user_allowed(false);
pde.set_present(true);
pde.set_writable(true);
@ -227,7 +221,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
page_directory.m_physical_pages.set(page_directory_index, move(page_table));
}
}
return pde.page_table_base()[page_table_index];
return pde.page_table_virtual_base()[page_table_index];
}
void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)

View file

@ -1,6 +1,5 @@
#pragma once
#include <AK/String.h>
#include <AK/Badge.h>
#include <AK/Bitmap.h>
#include <AK/ByteBuffer.h>
@ -8,6 +7,7 @@
#include <AK/NonnullRefPtrVector.h>
#include <AK/RefCounted.h>
#include <AK/RefPtr.h>
#include <AK/String.h>
#include <AK/Types.h>
#include <AK/Vector.h>
#include <AK/Weakable.h>
@ -114,7 +114,8 @@ private:
RefPtr<PageDirectory> m_kernel_page_directory;
PageTableEntry* m_page_table_zero { nullptr };
PageTableEntry* m_page_table_one { nullptr };
PageTableEntry* m_page_table_768 { nullptr };
PageTableEntry* m_page_table_769 { nullptr };
VirtualAddress m_quickmap_addr;

View file

@ -22,7 +22,7 @@ RefPtr<PageDirectory> PageDirectory::find_by_pdb(u32 pdb)
}
PageDirectory::PageDirectory(PhysicalAddress paddr)
: m_range_allocator(VirtualAddress(0xc0000000), 0x3f000000)
: m_range_allocator(VirtualAddress(kernelspace_range_base + 0x800000), 0x3f000000)
{
m_directory_page = PhysicalPage::create(paddr, true, false);
InterruptDisabler disabler;

View file

@ -22,7 +22,7 @@ public:
~PageDirectory();
u32 cr3() const { return m_directory_page->paddr().get(); }
PageDirectoryEntry* entries() { return reinterpret_cast<PageDirectoryEntry*>(cr3()); }
PageDirectoryEntry* entries() { return reinterpret_cast<PageDirectoryEntry*>(cr3() + kernel_virtual_base); }
void flush(VirtualAddress);

View file

@ -252,7 +252,7 @@ void Region::map(PageDirectory& page_directory)
pte.set_user_allowed(is_user_accessible());
page_directory.flush(page_vaddr);
#ifdef MM_DEBUG
dbgprintf("MM: >> map_region_at_address (PD=%p) '%s' V%p => P%p (@%p)\n", &page_directory, name().characters(), page_vaddr.get(), physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
kprintf("MM: >> map_region_at_address (PD=%p) '%s' V%p => P%p (@%p)\n", &page_directory, name().characters(), page_vaddr.get(), physical_page ? physical_page->paddr().get() : 0, physical_page.ptr());
#endif
}
}

View file

@ -2,9 +2,9 @@ ENTRY(start)
SECTIONS
{
. = 0x100000;
. = 0xc0100000;
.text BLOCK(4K) : ALIGN(4K)
.text ALIGN(4K) : AT(ADDR(.text) - 0xc0000000)
{
Arch/i386/Boot/boot.ao
*(.multiboot)
@ -13,7 +13,7 @@ SECTIONS
*(.text.startup)
}
.rodata BLOCK(4K) : ALIGN(4K)
.rodata ALIGN(4K) : AT(ADDR(.rodata) - 0xc0000000)
{
start_ctors = .;
*(.ctors)
@ -22,12 +22,12 @@ SECTIONS
*(.rodata)
}
.data BLOCK(4K) : ALIGN(4K)
.data ALIGN(4K) : AT(ADDR(.data) - 0xc0000000)
{
*(.data)
}
.bss BLOCK(4K) : ALIGN(4K)
.bss ALIGN(4K) : AT(ADDR(.bss) - 0xc0000000)
{
*(COMMON)
*(.bss)