Kernel: Add support for setting up a x86_64 GDT once in C++ land

This commit is contained in:
Gunnar Beutner 2021-06-25 14:34:04 +02:00 committed by Andreas Kling
parent 29d9666e02
commit f630299d49
5 changed files with 55 additions and 11 deletions

View file

@ -30,6 +30,7 @@ ALWAYS_INLINE FlatPtr cpu_flags()
return flags;
}
#if ARCH(I386)
ALWAYS_INLINE void set_fs(u16 segment)
{
asm volatile(
@ -59,6 +60,7 @@ ALWAYS_INLINE u16 get_gs()
: "=a"(gs));
return gs;
}
#endif
ALWAYS_INLINE u32 read_fs_u32(u32 offset)
{

View file

@ -10,13 +10,14 @@
#include <AK/Types.h>
#include <Kernel/VirtualAddress.h>
#define GDT_SELECTOR_CODE0 0x08
#define GDT_SELECTOR_DATA0 0x10
#define GDT_SELECTOR_CODE3 0x18
#define GDT_SELECTOR_DATA3 0x20
#define GDT_SELECTOR_TLS 0x28
#define GDT_SELECTOR_PROC 0x30
#define GDT_SELECTOR_TSS 0x38
#if ARCH(I386)
# define GDT_SELECTOR_CODE0 0x08
# define GDT_SELECTOR_DATA0 0x10
# define GDT_SELECTOR_CODE3 0x18
# define GDT_SELECTOR_DATA3 0x20
# define GDT_SELECTOR_TLS 0x28
# define GDT_SELECTOR_PROC 0x30
# define GDT_SELECTOR_TSS 0x38
// SYSENTER makes certain assumptions on how the GDT is structured:
static_assert(GDT_SELECTOR_CODE0 + 8 == GDT_SELECTOR_DATA0); // SS0 = CS0 + 8
@ -24,6 +25,12 @@ static_assert(GDT_SELECTOR_CODE0 + 8 == GDT_SELECTOR_DATA0); // SS0 = CS0 + 8
// SYSEXIT makes certain assumptions on how the GDT is structured:
static_assert(GDT_SELECTOR_CODE0 + 16 == GDT_SELECTOR_CODE3); // CS3 = CS0 + 16
static_assert(GDT_SELECTOR_CODE0 + 24 == GDT_SELECTOR_DATA3); // SS3 = CS0 + 32
#else
# define GDT_SELECTOR_CODE0 0x08
# define GDT_SELECTOR_CODE3 0x10
# define GDT_SELECTOR_TSS 0x18
# define GDT_SELECTOR_TSS_PART2 0x20
#endif
namespace Kernel {
@ -82,6 +89,7 @@ union [[gnu::packed]] Descriptor {
base_lo = base.get() & 0xffffu;
base_hi = (base.get() >> 16u) & 0xffu;
base_hi2 = (base.get() >> 24u) & 0xffu;
VERIFY(base.get() <= 0xffffffff);
}
void set_limit(u32 length)
@ -91,6 +99,8 @@ union [[gnu::packed]] Descriptor {
}
};
static_assert(sizeof(Descriptor) == 8);
enum class IDTEntryType {
TaskGate32 = 0b0101,
InterruptGate16 = 0b110,

View file

@ -19,6 +19,11 @@
namespace Kernel {
#if ARCH(X86_64)
# define MSR_FS_BASE 0xc0000100
# define MSR_GS_BASE 0xc0000102
#endif
class Thread;
class SchedulerPerProcessorData;
struct MemoryManagerData;
@ -241,7 +246,11 @@ public:
ALWAYS_INLINE static bool is_initialized()
{
return get_fs() == GDT_SELECTOR_PROC && read_fs_u32(__builtin_offsetof(Processor, m_self)) != 0;
return
#if ARCH(I386)
get_fs() == GDT_SELECTOR_PROC &&
#endif
read_fs_u32(__builtin_offsetof(Processor, m_self)) != 0;
}
ALWAYS_INLINE void set_scheduler_data(SchedulerPerProcessorData& scheduler_data)

View file

@ -19,6 +19,7 @@
#include <Kernel/Arch/x86/CPUID.h>
#include <Kernel/Arch/x86/Interrupts.h>
#include <Kernel/Arch/x86/MSR.h>
#include <Kernel/Arch/x86/Processor.h>
#include <Kernel/Arch/x86/ProcessorInfo.h>
#include <Kernel/Arch/x86/SafeMem.h>
@ -1056,11 +1057,17 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
m_gdtr.limit = 0;
write_raw_gdt_entry(0x0000, 0x00000000, 0x00000000);
#if ARCH(I386)
write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00cf9a00); // code0
write_raw_gdt_entry(GDT_SELECTOR_DATA0, 0x0000ffff, 0x00cf9200); // data0
write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00cffa00); // code3
write_raw_gdt_entry(GDT_SELECTOR_DATA3, 0x0000ffff, 0x00cff200); // data3
#else
write_raw_gdt_entry(GDT_SELECTOR_CODE0, 0x0000ffff, 0x00ef9a00); // code0
write_raw_gdt_entry(GDT_SELECTOR_CODE3, 0x0000ffff, 0x00effa00); // code3
#endif
#if ARCH(I386)
Descriptor tls_descriptor {};
tls_descriptor.low = tls_descriptor.high = 0;
tls_descriptor.dpl = 3;
@ -1083,10 +1090,11 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
fs_descriptor.descriptor_type = 1;
fs_descriptor.type = 2;
write_gdt_entry(GDT_SELECTOR_PROC, fs_descriptor); // fs0
#endif
Descriptor tss_descriptor {};
tss_descriptor.set_base(VirtualAddress { &m_tss });
tss_descriptor.set_limit(sizeof(TSS32) - 1);
tss_descriptor.set_base(VirtualAddress { (size_t)&m_tss & 0xffffffff });
tss_descriptor.set_limit(sizeof(TSS) - 1);
tss_descriptor.dpl = 0;
tss_descriptor.segment_present = 1;
tss_descriptor.granularity = 0;
@ -1096,9 +1104,21 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
tss_descriptor.type = 9;
write_gdt_entry(GDT_SELECTOR_TSS, tss_descriptor); // tss
#if ARCH(X86_64)
Descriptor tss_descriptor_part2 {};
tss_descriptor_part2.low = (size_t)&m_tss >> 32;
write_gdt_entry(GDT_SELECTOR_TSS_PART2, tss_descriptor_part2);
#endif
flush_gdt();
load_task_register(GDT_SELECTOR_TSS);
#if ARCH(X86_64)
MSR fs_base(MSR_FS_BASE);
fs_base.set((size_t)this & 0xffffffff, (size_t)this >> 32);
#endif
#if ARCH(I386)
asm volatile(
"mov %%ax, %%ds\n"
"mov %%ax, %%es\n"
@ -1107,7 +1127,6 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
: "memory");
set_fs(GDT_SELECTOR_PROC);
#if ARCH(I386)
// Make sure CS points to the kernel code descriptor.
// clang-format off
asm volatile(

View file

@ -607,9 +607,13 @@ KResultOr<FlatPtr> Process::sys$allocate_tls(Userspace<const char*> initial_data
if (tsr_result.is_error())
return EFAULT;
#if ARCH(I386)
auto& tls_descriptor = Processor::current().get_gdt_entry(GDT_SELECTOR_TLS);
tls_descriptor.set_base(main_thread->thread_specific_data());
tls_descriptor.set_limit(main_thread->thread_specific_region_size());
#else
TODO();
#endif
return m_master_tls_region.unsafe_ptr()->vaddr().get();
}