ladybird/Kernel/StdLib.cpp
Andreas Kling 9eef39d68a Kernel: Start implementing x86 SMAP support
Supervisor Mode Access Prevention (SMAP) is an x86 CPU feature that
prevents the kernel from accessing userspace memory. With SMAP enabled,
trying to read/write a userspace memory address while in the kernel
will now generate a page fault.

Since it's sometimes necessary to read/write userspace memory, there
are two new instructions that quickly switch the protection on/off:
STAC (disables protection) and CLAC (enables protection.)
These are exposed in kernel code via the stac() and clac() helpers.

There's also a SmapDisabler RAII object that can be used to ensure
that you don't forget to re-enable protection before returning to
userspace code.

THis patch also adds copy_to_user(), copy_from_user() and memset_user()
which are the "correct" way of doing things. These functions allow us
to briefly disable protection for a specific purpose, and then turn it
back on immediately after it's done. Going forward all kernel code
should be moved to using these and all uses of SmapDisabler are to be
considered FIXME's.

Note that we're not realizing the full potential of this feature since
I've used SmapDisabler quite liberally in this initial bring-up patch.
2020-01-05 18:14:51 +01:00

215 lines
4.5 KiB
C++

#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/StdLib.h>
extern "C" {
void* copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
{
SmapDisabler disabler;
auto* ptr = memcpy(dest_ptr, src_ptr, n);
return ptr;
}
void* copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
{
return copy_to_user(dest_ptr, src_ptr, n);
}
void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
{
size_t dest = (size_t)dest_ptr;
size_t src = (size_t)src_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
asm volatile(
"rep movsl\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
: "memory");
return dest_ptr;
}
void* memmove(void* dest, const void* src, size_t n)
{
if (dest < src)
return memcpy(dest, src, n);
u8* pd = (u8*)dest;
const u8* ps = (const u8*)src;
for (pd += n, ps += n; n--;)
*--pd = *--ps;
return dest;
}
char* strcpy(char* dest, const char* src)
{
auto* dest_ptr = dest;
auto* src_ptr = src;
while ((*dest_ptr++ = *src_ptr++) != '\0')
;
return dest;
}
char* strncpy(char* dest, const char* src, size_t n)
{
size_t i;
for (i = 0; i < n && src[i] != '\0'; ++i)
dest[i] = src[i];
for (; i < n; ++i)
dest[i] = '\0';
return dest;
}
void* memset_user(void* dest_ptr, int c, size_t n)
{
SmapDisabler disabler;
auto* ptr = memset(dest_ptr, c, n);
return ptr;
}
void* memset(void* dest_ptr, int c, size_t n)
{
size_t dest = (size_t)dest_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = (u8)c;
expanded_c |= expanded_c << 8;
expanded_c |= expanded_c << 16;
asm volatile(
"rep stosl\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep stosb\n"
: "=D"(dest), "=c"(n)
: "0"(dest), "1"(n), "a"(c)
: "memory");
return dest_ptr;
}
char* strrchr(const char* str, int ch)
{
char* last = nullptr;
char c;
for (; (c = *str); ++str) {
if (c == ch)
last = const_cast<char*>(str);
}
return last;
}
size_t strlen(const char* str)
{
size_t len = 0;
while (*(str++))
++len;
return len;
}
size_t strnlen(const char* str, size_t maxlen)
{
size_t len = 0;
for (; len < maxlen && *str; str++)
len++;
return len;
}
int strcmp(const char* s1, const char* s2)
{
for (; *s1 == *s2; ++s1, ++s2) {
if (*s1 == 0)
return 0;
}
return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
}
char* strdup(const char* str)
{
size_t len = strlen(str);
char* new_str = (char*)kmalloc(len + 1);
strcpy(new_str, str);
return new_str;
}
int memcmp(const void* v1, const void* v2, size_t n)
{
auto* s1 = (const u8*)v1;
auto* s2 = (const u8*)v2;
while (n-- > 0) {
if (*s1++ != *s2++)
return s1[-1] < s2[-1] ? -1 : 1;
}
return 0;
}
int strncmp(const char* s1, const char* s2, size_t n)
{
if (!n)
return 0;
do {
if (*s1 != *s2++)
return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
if (*s1++ == 0)
break;
} while (--n);
return 0;
}
char* strstr(const char* haystack, const char* needle)
{
char nch;
char hch;
if ((nch = *needle++) != 0) {
size_t len = strlen(needle);
do {
do {
if ((hch = *haystack++) == 0)
return nullptr;
} while (hch != nch);
} while (strncmp(haystack, needle, len) != 0);
--haystack;
}
return const_cast<char*>(haystack);
}
[[noreturn]] void __cxa_pure_virtual()
{
ASSERT_NOT_REACHED();
}
void* realloc(void* p, size_t s)
{
return krealloc(p, s);
}
void free(void* p)
{
return kfree(p);
}
extern u32 __stack_chk_guard;
u32 __stack_chk_guard = (u32)0xc0000c13;
[[noreturn]] void __stack_chk_fail()
{
ASSERT_NOT_REACHED();
}
}