2018-11-07 22:15:02 +01:00
# include "Scheduler.h"
# include "Process.h"
2019-02-01 03:50:06 +01:00
# include "RTC.h"
# include "i8253.h"
2019-02-06 15:05:47 +01:00
# include <AK/TemporaryChange.h>
2019-03-20 17:09:46 +01:00
# include <Kernel/Alarm.h>
2019-04-29 04:55:54 +02:00
# include <Kernel/FileSystem/FileDescriptor.h>
2018-11-07 22:15:02 +01:00
2018-11-07 22:24:20 +01:00
//#define LOG_EVERY_CONTEXT_SWITCH
2018-11-07 22:15:02 +01:00
//#define SCHEDULER_DEBUG
2019-02-07 12:21:17 +01:00
static dword time_slice_for ( Process : : Priority priority )
{
// One time slice unit == 1ms
switch ( priority ) {
case Process : : HighPriority :
return 50 ;
2019-02-12 12:11:22 +01:00
case Process : : NormalPriority :
return 15 ;
case Process : : LowPriority :
return 5 ;
2019-04-20 15:58:45 +02:00
case Process : : IdlePriority :
return 1 ;
2019-02-07 12:21:17 +01:00
}
2019-02-12 12:11:22 +01:00
ASSERT_NOT_REACHED ( ) ;
2019-02-07 12:21:17 +01:00
}
2018-11-07 22:15:02 +01:00
2019-03-23 22:03:17 +01:00
Thread * current ;
Thread * g_last_fpu_thread ;
Thread * g_finalizer ;
2018-11-07 22:15:02 +01:00
static Process * s_colonel_process ;
2019-04-14 01:29:14 +02:00
qword g_uptime ;
2018-11-07 22:15:02 +01:00
2018-11-07 23:13:38 +01:00
struct TaskRedirectionData {
word selector ;
TSS32 tss ;
} ;
static TaskRedirectionData s_redirection ;
2019-02-06 15:05:47 +01:00
static bool s_active ;
bool Scheduler : : is_active ( )
{
return s_active ;
}
2018-11-07 23:13:38 +01:00
2018-11-07 22:15:02 +01:00
bool Scheduler : : pick_next ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-02-06 15:05:47 +01:00
ASSERT ( ! s_active ) ;
TemporaryChange < bool > change ( s_active , true ) ;
ASSERT ( s_active ) ;
2018-11-07 22:15:02 +01:00
if ( ! current ) {
// XXX: The first ever context_switch() goes to the idle process.
// This to setup a reliable place we can return to.
2019-03-23 22:03:17 +01:00
return context_switch ( s_colonel_process - > main_thread ( ) ) ;
2018-11-07 22:15:02 +01:00
}
2019-03-25 02:06:57 +01:00
struct timeval now ;
kgettimeofday ( now ) ;
auto now_sec = now . tv_sec ;
auto now_usec = now . tv_usec ;
2019-03-13 13:13:23 +01:00
2019-03-23 22:03:17 +01:00
// Check and unblock threads whose wait conditions have been met.
Thread : : for_each ( [ & ] ( Thread & thread ) {
auto & process = thread . process ( ) ;
if ( thread . state ( ) = = Thread : : BlockedSleep ) {
2019-04-14 01:29:14 +02:00
if ( thread . wakeup_time ( ) < = g_uptime )
2019-03-23 22:03:17 +01:00
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2018-11-07 22:15:02 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : BlockedWait ) {
process . for_each_child ( [ & ] ( Process & child ) {
2019-03-24 01:20:35 +01:00
if ( ! child . is_dead ( ) )
2018-11-11 15:36:40 +01:00
return true ;
2019-03-23 22:03:17 +01:00
if ( thread . waitee_pid ( ) = = - 1 | | thread . waitee_pid ( ) = = child . pid ( ) ) {
thread . m_waitee_pid = child . pid ( ) ;
thread . unblock ( ) ;
2018-11-11 15:36:40 +01:00
return false ;
}
return true ;
} ) ;
2019-03-23 22:03:17 +01:00
return IterationDecision : : Continue ;
2018-11-07 22:15:02 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : BlockedRead ) {
2019-05-03 20:15:54 +02:00
ASSERT ( thread . m_blocked_descriptor ) ;
2018-11-07 22:15:02 +01:00
// FIXME: Block until the amount of data wanted is available.
2019-05-03 20:15:54 +02:00
if ( thread . m_blocked_descriptor - > can_read ( ) )
2019-03-23 22:03:17 +01:00
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2018-11-07 22:15:02 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : BlockedWrite ) {
2019-05-03 20:15:54 +02:00
ASSERT ( thread . m_blocked_descriptor ! = - 1 ) ;
if ( thread . m_blocked_descriptor - > can_write ( ) )
2019-03-23 22:03:17 +01:00
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2018-11-12 01:28:46 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : BlockedConnect ) {
2019-05-03 20:15:54 +02:00
auto & descriptor = * thread . m_blocked_descriptor ;
auto & socket = * descriptor . socket ( ) ;
if ( socket . is_connected ( ) )
2019-03-23 22:03:17 +01:00
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2019-02-14 17:18:35 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : BlockedReceive ) {
2019-05-03 20:15:54 +02:00
auto & descriptor = * thread . m_blocked_descriptor ;
auto & socket = * descriptor . socket ( ) ;
2019-03-12 17:27:07 +01:00
// FIXME: Block until the amount of data wanted is available.
2019-03-13 13:13:23 +01:00
bool timed_out = now_sec > socket . receive_deadline ( ) . tv_sec | | ( now_sec = = socket . receive_deadline ( ) . tv_sec & & now_usec > = socket . receive_deadline ( ) . tv_usec ) ;
2019-05-03 20:15:54 +02:00
if ( timed_out | | descriptor . can_read ( ) ) {
2019-03-23 22:03:17 +01:00
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2019-03-12 17:27:07 +01:00
}
2019-03-23 22:03:17 +01:00
return IterationDecision : : Continue ;
2019-03-12 17:27:07 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : BlockedSelect ) {
if ( thread . m_select_has_timeout ) {
if ( now_sec > thread . m_select_timeout . tv_sec | | ( now_sec = = thread . m_select_timeout . tv_sec & & now_usec > = thread . m_select_timeout . tv_usec ) ) {
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2019-02-01 03:50:06 +01:00
}
}
2019-03-23 22:03:17 +01:00
for ( int fd : thread . m_select_read_fds ) {
2019-04-29 13:58:40 +02:00
if ( process . m_fds [ fd ] . descriptor - > can_read ( ) ) {
2019-03-23 22:03:17 +01:00
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2019-01-15 23:12:20 +01:00
}
}
2019-03-23 22:03:17 +01:00
for ( int fd : thread . m_select_write_fds ) {
2019-04-29 13:58:40 +02:00
if ( process . m_fds [ fd ] . descriptor - > can_write ( ) ) {
2019-03-23 22:03:17 +01:00
thread . unblock ( ) ;
return IterationDecision : : Continue ;
2019-01-15 23:12:20 +01:00
}
}
2019-03-23 22:03:17 +01:00
return IterationDecision : : Continue ;
2019-03-20 17:09:46 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : BlockedSnoozing ) {
if ( thread . m_snoozing_alarm - > is_ringing ( ) ) {
thread . m_snoozing_alarm = nullptr ;
thread . unblock ( ) ;
2019-03-20 17:09:46 +01:00
}
2019-03-23 22:03:17 +01:00
return IterationDecision : : Continue ;
2019-01-15 23:12:20 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : Skip1SchedulerPass ) {
thread . set_state ( Thread : : Skip0SchedulerPasses ) ;
return IterationDecision : : Continue ;
2018-11-07 23:13:38 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : Skip0SchedulerPasses ) {
thread . set_state ( Thread : : Runnable ) ;
return IterationDecision : : Continue ;
2018-11-07 23:13:38 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread . state ( ) = = Thread : : Dying ) {
ASSERT ( g_finalizer ) ;
if ( g_finalizer - > state ( ) = = Thread : : BlockedLurking )
g_finalizer - > unblock ( ) ;
return IterationDecision : : Continue ;
}
return IterationDecision : : Continue ;
} ) ;
Process : : for_each ( [ & ] ( Process & process ) {
if ( process . is_dead ( ) ) {
if ( current ! = & process . main_thread ( ) & & ( ! process . ppid ( ) | | ! Process : : from_pid ( process . ppid ( ) ) ) ) {
2018-11-28 22:01:24 +01:00
auto name = process . name ( ) ;
auto pid = process . pid ( ) ;
auto exit_status = Process : : reap ( process ) ;
2019-01-01 03:56:39 +01:00
dbgprintf ( " reaped unparented process %s(%u), exit status: %u \n " , name . characters ( ) , pid , exit_status ) ;
2018-11-28 22:01:24 +01:00
}
2018-11-07 23:59:49 +01:00
}
2018-11-07 22:15:02 +01:00
return true ;
} ) ;
// Dispatch any pending signals.
// FIXME: Do we really need this to be a separate pass over the process list?
2019-03-23 22:03:17 +01:00
Thread : : for_each_living ( [ ] ( Thread & thread ) {
if ( ! thread . has_unmasked_pending_signals ( ) )
2018-11-07 22:15:02 +01:00
return true ;
2019-03-05 12:50:55 +01:00
// FIXME: It would be nice if the Scheduler didn't have to worry about who is "current"
// For now, avoid dispatching signals to "current" and do it in a scheduling pass
// while some other process is interrupted. Otherwise a mess will be made.
2019-03-23 22:03:17 +01:00
if ( & thread = = current )
2019-03-05 12:50:55 +01:00
return true ;
2018-11-07 22:15:02 +01:00
// We know how to interrupt blocked processes, but if they are just executing
// at some random point in the kernel, let them continue. They'll be in userspace
// sooner or later and we can deliver the signal then.
// FIXME: Maybe we could check when returning from a syscall if there's a pending
// signal and dispatch it then and there? Would that be doable without the
// syscall effectively being "interrupted" despite having completed?
2019-03-23 22:03:17 +01:00
if ( thread . in_kernel ( ) & & ! thread . is_blocked ( ) & & ! thread . is_stopped ( ) )
2018-11-07 22:15:02 +01:00
return true ;
2018-11-28 23:30:06 +01:00
// NOTE: dispatch_one_pending_signal() may unblock the process.
2019-03-23 22:03:17 +01:00
bool was_blocked = thread . is_blocked ( ) ;
if ( thread . dispatch_one_pending_signal ( ) = = ShouldUnblockThread : : No )
2018-11-16 21:14:25 +01:00
return true ;
2018-11-28 23:30:06 +01:00
if ( was_blocked ) {
2019-03-23 22:03:17 +01:00
dbgprintf ( " Unblock %s(%u) due to signal \n " , thread . process ( ) . name ( ) . characters ( ) , thread . pid ( ) ) ;
thread . m_was_interrupted_while_blocked = true ;
thread . unblock ( ) ;
2018-11-07 22:15:02 +01:00
}
return true ;
} ) ;
# ifdef SCHEDULER_DEBUG
dbgprintf ( " Scheduler choices: \n " ) ;
2019-03-23 22:03:17 +01:00
for ( auto * thread = g_threads - > head ( ) ; thread ; thread = thread - > next ( ) ) {
//if (process->state() == Thread::BlockedWait || process->state() == Thread::BlockedSleep)
2018-11-07 22:15:02 +01:00
// continue;
2019-03-23 22:03:17 +01:00
auto * process = & thread - > process ( ) ;
dbgprintf ( " [K%x] % 12s %s(%u:%u) @ %w:%x \n " , process , to_string ( thread - > state ( ) ) , process - > name ( ) . characters ( ) , process - > pid ( ) , thread - > tid ( ) , thread - > tss ( ) . cs , thread - > tss ( ) . eip ) ;
2018-11-07 22:15:02 +01:00
}
# endif
2019-03-23 22:03:17 +01:00
auto * previous_head = g_threads - > head ( ) ;
2018-11-07 22:15:02 +01:00
for ( ; ; ) {
// Move head to tail.
2019-03-23 22:03:17 +01:00
g_threads - > append ( g_threads - > remove_head ( ) ) ;
auto * thread = g_threads - > head ( ) ;
2018-11-07 22:15:02 +01:00
2019-03-23 22:03:17 +01:00
if ( ! thread - > process ( ) . is_being_inspected ( ) & & ( thread - > state ( ) = = Thread : : Runnable | | thread - > state ( ) = = Thread : : Running ) ) {
2018-11-07 22:15:02 +01:00
# ifdef SCHEDULER_DEBUG
2019-03-23 22:03:17 +01:00
kprintf ( " switch to %s(%u:%u) @ %w:%x \n " , thread - > process ( ) . name ( ) . characters ( ) , thread - > process ( ) . pid ( ) , thread - > tid ( ) , thread - > tss ( ) . cs , thread - > tss ( ) . eip ) ;
2018-11-07 22:15:02 +01:00
# endif
2019-03-23 22:03:17 +01:00
return context_switch ( * thread ) ;
2018-11-07 22:15:02 +01:00
}
2019-03-23 22:03:17 +01:00
if ( thread = = previous_head ) {
2018-11-07 23:13:38 +01:00
// Back at process_head, nothing wants to run. Send in the colonel!
2019-03-23 22:03:17 +01:00
return context_switch ( s_colonel_process - > main_thread ( ) ) ;
2018-11-07 22:15:02 +01:00
}
}
}
2019-03-23 22:03:17 +01:00
bool Scheduler : : donate_to ( Thread * beneficiary , const char * reason )
2019-02-07 11:12:23 +01:00
{
2019-04-17 12:41:51 +02:00
InterruptDisabler disabler ;
if ( ! Thread : : is_thread ( beneficiary ) )
return false ;
2019-02-07 11:12:23 +01:00
( void ) reason ;
unsigned ticks_left = current - > ticks_left ( ) ;
2019-04-17 12:41:51 +02:00
if ( ! beneficiary | | beneficiary - > state ( ) ! = Thread : : Runnable | | ticks_left < = 1 )
2019-02-07 11:12:23 +01:00
return yield ( ) ;
2019-03-23 22:03:17 +01:00
unsigned ticks_to_donate = min ( ticks_left - 1 , time_slice_for ( beneficiary - > process ( ) . priority ( ) ) ) ;
2019-02-07 11:12:23 +01:00
# ifdef SCHEDULER_DEBUG
2019-03-23 22:03:17 +01:00
dbgprintf ( " %s(%u:%u) donating %u ticks to %s(%u:%u), reason=%s \n " , current - > process ( ) . name ( ) . characters ( ) , current - > pid ( ) , current - > tid ( ) , ticks_to_donate , beneficiary - > process ( ) . name ( ) . characters ( ) , beneficiary - > pid ( ) , beneficiary - > tid ( ) , reason ) ;
2019-02-07 11:12:23 +01:00
# endif
context_switch ( * beneficiary ) ;
beneficiary - > set_ticks_left ( ticks_to_donate ) ;
switch_now ( ) ;
2019-04-17 12:41:51 +02:00
return false ;
2019-02-07 11:12:23 +01:00
}
2018-11-07 22:15:02 +01:00
bool Scheduler : : yield ( )
{
2019-01-23 05:05:45 +01:00
InterruptDisabler disabler ;
2019-02-06 15:05:47 +01:00
ASSERT ( current ) ;
2019-03-23 22:03:17 +01:00
// dbgprintf("%s(%u:%u) yield()\n", current->process().name().characters(), current->pid(), current->tid());
2018-11-07 22:15:02 +01:00
2019-02-06 15:05:47 +01:00
if ( ! pick_next ( ) )
2019-04-17 12:41:51 +02:00
return false ;
2018-11-07 22:15:02 +01:00
2019-03-23 22:03:17 +01:00
// dbgprintf("yield() jumping to new process: sel=%x, %s(%u:%u)\n", current->far_ptr().selector, current->process().name().characters(), current->pid(), current->tid());
2018-11-07 22:15:02 +01:00
switch_now ( ) ;
2019-04-17 12:41:51 +02:00
return true ;
2018-11-07 22:15:02 +01:00
}
void Scheduler : : pick_next_and_switch_now ( )
{
bool someone_wants_to_run = pick_next ( ) ;
ASSERT ( someone_wants_to_run ) ;
switch_now ( ) ;
}
void Scheduler : : switch_now ( )
{
2018-12-03 00:39:25 +01:00
Descriptor & descriptor = get_gdt_entry ( current - > selector ( ) ) ;
2018-11-07 22:15:02 +01:00
descriptor . type = 9 ;
2018-12-03 00:39:25 +01:00
flush_gdt ( ) ;
2018-11-07 22:15:02 +01:00
asm ( " sti \n "
" ljmp *(%%eax) \n "
2019-01-31 17:31:23 +01:00
: : " a " ( & current - > far_ptr ( ) )
2018-11-07 22:15:02 +01:00
) ;
}
2019-03-23 22:03:17 +01:00
bool Scheduler : : context_switch ( Thread & thread )
2018-11-07 22:15:02 +01:00
{
2019-03-23 22:03:17 +01:00
thread . set_ticks_left ( time_slice_for ( thread . process ( ) . priority ( ) ) ) ;
thread . did_schedule ( ) ;
2018-11-07 22:15:02 +01:00
2019-03-23 22:03:17 +01:00
if ( current = = & thread )
2018-11-07 22:15:02 +01:00
return false ;
if ( current ) {
// If the last process hasn't blocked (still marked as running),
// mark it as runnable for the next round.
2019-03-23 22:03:17 +01:00
if ( current - > state ( ) = = Thread : : Running )
current - > set_state ( Thread : : Runnable ) ;
2018-11-07 22:24:20 +01:00
# ifdef LOG_EVERY_CONTEXT_SWITCH
2019-03-23 22:03:17 +01:00
dbgprintf ( " Scheduler: %s(%u:%u) -> %s(%u:%u) %w:%x \n " ,
current - > process ( ) . name ( ) . characters ( ) , current - > process ( ) . pid ( ) , current - > tid ( ) ,
thread . process ( ) . name ( ) . characters ( ) , thread . process ( ) . pid ( ) , thread . tid ( ) ,
thread . tss ( ) . cs , thread . tss ( ) . eip ) ;
2018-11-07 22:24:20 +01:00
# endif
2018-11-07 22:15:02 +01:00
}
2019-03-23 22:03:17 +01:00
current = & thread ;
thread . set_state ( Thread : : Running ) ;
2018-11-07 22:15:02 +01:00
2019-03-23 22:03:17 +01:00
if ( ! thread . selector ( ) ) {
thread . set_selector ( gdt_alloc_entry ( ) ) ;
auto & descriptor = get_gdt_entry ( thread . selector ( ) ) ;
descriptor . set_base ( & thread . tss ( ) ) ;
2019-01-31 17:31:23 +01:00
descriptor . set_limit ( 0xffff ) ;
2018-11-07 22:15:02 +01:00
descriptor . dpl = 0 ;
descriptor . segment_present = 1 ;
descriptor . granularity = 1 ;
descriptor . zero = 0 ;
descriptor . operation_size = 1 ;
descriptor . descriptor_type = 0 ;
}
2019-03-23 22:03:17 +01:00
auto & descriptor = get_gdt_entry ( thread . selector ( ) ) ;
2018-11-07 22:15:02 +01:00
descriptor . type = 11 ; // Busy TSS
2018-12-03 00:39:25 +01:00
flush_gdt ( ) ;
2018-11-07 22:15:02 +01:00
return true ;
}
2018-11-07 23:13:38 +01:00
static void initialize_redirection ( )
2018-11-07 22:15:02 +01:00
{
2018-12-03 00:39:25 +01:00
auto & descriptor = get_gdt_entry ( s_redirection . selector ) ;
2019-01-31 17:31:23 +01:00
descriptor . set_base ( & s_redirection . tss ) ;
descriptor . set_limit ( 0xffff ) ;
2018-11-07 23:13:38 +01:00
descriptor . dpl = 0 ;
descriptor . segment_present = 1 ;
descriptor . granularity = 1 ;
descriptor . zero = 0 ;
descriptor . operation_size = 1 ;
descriptor . descriptor_type = 0 ;
descriptor . type = 9 ;
2018-12-03 00:39:25 +01:00
flush_gdt ( ) ;
2018-11-07 22:15:02 +01:00
}
void Scheduler : : prepare_for_iret_to_new_process ( )
{
2018-12-03 00:39:25 +01:00
auto & descriptor = get_gdt_entry ( s_redirection . selector ) ;
2018-11-07 23:13:38 +01:00
descriptor . type = 9 ;
s_redirection . tss . backlink = current - > selector ( ) ;
load_task_register ( s_redirection . selector ) ;
2018-11-07 22:15:02 +01:00
}
2019-03-23 22:03:17 +01:00
void Scheduler : : prepare_to_modify_tss ( Thread & thread )
2018-11-07 22:15:02 +01:00
{
2018-11-07 23:59:49 +01:00
// This ensures that a currently running process modifying its own TSS
// in order to yield() and end up somewhere else doesn't just end up
// right after the yield().
2019-03-23 22:03:17 +01:00
if ( current = = & thread )
2018-11-07 23:59:49 +01:00
load_task_register ( s_redirection . selector ) ;
2018-11-07 22:15:02 +01:00
}
2019-02-04 10:28:12 +01:00
Process * Scheduler : : colonel ( )
{
return s_colonel_process ;
}
2018-11-07 22:15:02 +01:00
void Scheduler : : initialize ( )
{
2018-11-07 23:13:38 +01:00
s_redirection . selector = gdt_alloc_entry ( ) ;
initialize_redirection ( ) ;
2018-12-24 23:10:48 +01:00
s_colonel_process = Process : : create_kernel_process ( " colonel " , nullptr ) ;
2019-03-23 22:22:01 +01:00
// Make sure the colonel uses a smallish time slice.
2019-04-20 15:58:45 +02:00
s_colonel_process - > set_priority ( Process : : IdlePriority ) ;
2018-11-07 23:13:38 +01:00
load_task_register ( s_redirection . selector ) ;
2018-11-07 22:15:02 +01:00
}
2018-11-08 00:24:59 +01:00
void Scheduler : : timer_tick ( RegisterDump & regs )
{
if ( ! current )
return ;
2019-04-14 01:29:14 +02:00
+ + g_uptime ;
2018-11-08 00:24:59 +01:00
if ( current - > tick ( ) )
return ;
current - > tss ( ) . gs = regs . gs ;
current - > tss ( ) . fs = regs . fs ;
current - > tss ( ) . es = regs . es ;
current - > tss ( ) . ds = regs . ds ;
current - > tss ( ) . edi = regs . edi ;
current - > tss ( ) . esi = regs . esi ;
current - > tss ( ) . ebp = regs . ebp ;
current - > tss ( ) . ebx = regs . ebx ;
current - > tss ( ) . edx = regs . edx ;
current - > tss ( ) . ecx = regs . ecx ;
current - > tss ( ) . eax = regs . eax ;
current - > tss ( ) . eip = regs . eip ;
current - > tss ( ) . cs = regs . cs ;
current - > tss ( ) . eflags = regs . eflags ;
// Compute process stack pointer.
// Add 12 for CS, EIP, EFLAGS (interrupt mechanic)
current - > tss ( ) . esp = regs . esp + 12 ;
current - > tss ( ) . ss = regs . ss ;
if ( ( current - > tss ( ) . cs & 3 ) ! = 0 ) {
current - > tss ( ) . ss = regs . ss_if_crossRing ;
current - > tss ( ) . esp = regs . esp_if_crossRing ;
}
if ( ! pick_next ( ) )
return ;
prepare_for_iret_to_new_process ( ) ;
// Set the NT (nested task) flag.
asm (
" pushf \n "
" orl $0x00004000, (%esp) \n "
" popf \n "
) ;
}