From beb55f726fbd2a5d68f2a2daa278b70a752abc17 Mon Sep 17 00:00:00 2001 From: Daniel Bertalan Date: Fri, 19 May 2023 13:06:21 +0200 Subject: [PATCH] Kernel/aarch64: Detect if access faults come from SafeMem This commit lets us differentiate whether access faults are caused by accessing junk memory addresses given to us by userspace or if we hit a kernel bug. The stub implementations of the `safe_*` functions currently don't let us jump back into them and return a value indicating failure, so we panic if such a fault happens. Practically, this means that we still crash, but if the access violation was caused by something else, we take the usual kernel crash code path and print a register and memory dump, rather than hitting the `TODO_AARCH64` in `handle_safe_access_fault`. --- Kernel/Arch/aarch64/SafeMem.cpp | 49 ++++++++++++++++++++++++++------- Kernel/Arch/aarch64/linker.ld | 8 ++++++ 2 files changed, 47 insertions(+), 10 deletions(-) diff --git a/Kernel/Arch/aarch64/SafeMem.cpp b/Kernel/Arch/aarch64/SafeMem.cpp index c537dc95cd9..1d8054633a0 100644 --- a/Kernel/Arch/aarch64/SafeMem.cpp +++ b/Kernel/Arch/aarch64/SafeMem.cpp @@ -1,15 +1,26 @@ /* * Copyright (c) 2022, Timon Kruiper + * Copyright (c) 2023, Daniel Bertalan * * SPDX-License-Identifier: BSD-2-Clause */ +#include #include #include +#define CODE_SECTION(section_name) __attribute__((section(section_name))) + +extern "C" u8 start_of_safemem_text[]; +extern "C" u8 end_of_safemem_text[]; + +extern "C" u8 start_of_safemem_atomic_text[]; +extern "C" u8 end_of_safemem_atomic_text[]; + namespace Kernel { -bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) +CODE_SECTION(".text.safemem") +NEVER_INLINE FLATTEN bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) { // FIXME: Actually implement a safe memset. auto* dest = static_cast(dest_ptr); @@ -19,7 +30,8 @@ bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at) return true; } -ssize_t safe_strnlen(char const* str, unsigned long max_n, void*& fault_at) +CODE_SECTION(".text.safemem") +NEVER_INLINE FLATTEN ssize_t safe_strnlen(char const* str, unsigned long max_n, void*& fault_at) { // FIXME: Actually implement a safe strnlen. size_t len = 0; @@ -29,7 +41,8 @@ ssize_t safe_strnlen(char const* str, unsigned long max_n, void*& fault_at) return len; } -bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*& fault_at) +CODE_SECTION(".text.safemem") +NEVER_INLINE FLATTEN bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*& fault_at) { // FIXME: Actually implement a safe memcpy. auto* pd = static_cast(dest_ptr); @@ -40,40 +53,56 @@ bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*& fa return true; } -Optional safe_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val) +CODE_SECTION(".text.safemem.atomic") +NEVER_INLINE FLATTEN Optional safe_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val) { // FIXME: Handle access faults. return AK::atomic_compare_exchange_strong(var, expected, val, AK::memory_order_relaxed); } -Optional safe_atomic_load_relaxed(u32 volatile* var) +CODE_SECTION(".text.safemem.atomic") +NEVER_INLINE FLATTEN Optional safe_atomic_load_relaxed(u32 volatile* var) { // FIXME: Handle access faults. return AK::atomic_load(var, AK::memory_order_relaxed); } -Optional safe_atomic_fetch_add_relaxed(u32 volatile* var, u32 val) +CODE_SECTION(".text.safemem.atomic") +NEVER_INLINE FLATTEN Optional safe_atomic_fetch_add_relaxed(u32 volatile* var, u32 val) { // FIXME: Handle access faults. return AK::atomic_fetch_add(var, val, AK::memory_order_relaxed); } -Optional safe_atomic_exchange_relaxed(u32 volatile* var, u32 val) +CODE_SECTION(".text.safemem.atomic") +NEVER_INLINE FLATTEN Optional safe_atomic_exchange_relaxed(u32 volatile* var, u32 val) { // FIXME: Handle access faults. return AK::atomic_exchange(var, val, AK::memory_order_relaxed); } -bool safe_atomic_store_relaxed(u32 volatile* var, u32 val) +CODE_SECTION(".text.safemem.atomic") +NEVER_INLINE FLATTEN bool safe_atomic_store_relaxed(u32 volatile* var, u32 val) { // FIXME: Handle access faults. AK::atomic_store(var, val); return true; } -bool handle_safe_access_fault(RegisterState&, FlatPtr) +bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address) { - TODO_AARCH64(); + FlatPtr ip = regs.ip(); + + if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) { + dbgln("FIXME: Faulted while accessing userspace address {:p}.", fault_address); + dbgln(" We need to jump back into the appropriate SafeMem function, set fault_at and return failure."); + TODO_AARCH64(); + } else if (ip >= (FlatPtr)&start_of_safemem_atomic_text && ip < (FlatPtr)&end_of_safemem_atomic_text) { + dbgln("FIXME: Faulted while accessing userspace address {:p}.", fault_address); + dbgln(" We need to jump back into the appropriate atomic SafeMem function and return failure."); + TODO_AARCH64(); + } + return false; } diff --git a/Kernel/Arch/aarch64/linker.ld b/Kernel/Arch/aarch64/linker.ld index e80bf29e5c5..9b3fde122e4 100644 --- a/Kernel/Arch/aarch64/linker.ld +++ b/Kernel/Arch/aarch64/linker.ld @@ -20,6 +20,14 @@ SECTIONS .text ALIGN(4K) : AT (ADDR(.text) - KERNEL_MAPPING_BASE) { *(.text.first) + + start_of_safemem_text = .; + KEEP(*(.text.safemem)) + end_of_safemem_text = .; + start_of_safemem_atomic_text = .; + KEEP(*(.text.safemem.atomic)) + end_of_safemem_atomic_text = .; + *(.text*) } :text