2020-01-18 09:38:21 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
#include <AK/QuickSort.h>
|
2019-02-06 15:05:47 +01:00
|
|
|
#include <AK/TemporaryChange.h>
|
2019-06-07 11:43:58 +02:00
|
|
|
#include <Kernel/FileSystem/FileDescription.h>
|
2020-02-16 01:27:42 +01:00
|
|
|
#include <Kernel/Net/Socket.h>
|
2019-06-07 19:29:34 +02:00
|
|
|
#include <Kernel/Process.h>
|
2019-12-11 20:36:56 +01:00
|
|
|
#include <Kernel/Profiling.h>
|
2019-06-07 19:29:34 +02:00
|
|
|
#include <Kernel/RTC.h>
|
|
|
|
#include <Kernel/Scheduler.h>
|
Kernel: Introduce the new Time management subsystem
This new subsystem includes better abstractions of how time will be
handled in the OS. We take advantage of the existing RTC timer to aid
in keeping time synchronized. This is standing in contrast to how we
handled time-keeping in the kernel, where the PIT was responsible for
that function in addition to update the scheduler about ticks.
With that new advantage, we can easily change the ticking dynamically
and still keep the time synchronized.
In the process context, we no longer use a fixed declaration of
TICKS_PER_SECOND, but we call the TimeManagement singleton class to
provide us the right value. This allows us to use dynamic ticking in
the future, a feature known as tickless kernel.
The scheduler no longer does by himself the calculation of real time
(Unix time), and just calls the TimeManagment singleton class to provide
the value.
Also, we can use 2 new boot arguments:
- the "time" boot argument accpets either the value "modern", or
"legacy". If "modern" is specified, the time management subsystem will
try to setup HPET. Otherwise, for "legacy" value, the time subsystem
will revert to use the PIT & RTC, leaving HPET disabled.
If this boot argument is not specified, the default pattern is to try
to setup HPET.
- the "hpet" boot argumet accepts either the value "periodic" or
"nonperiodic". If "periodic" is specified, the HPET will scan for
periodic timers, and will assert if none are found. If only one is
found, that timer will be assigned for the time-keeping task. If more
than one is found, both time-keeping task & scheduler-ticking task
will be assigned to periodic timers.
If this boot argument is not specified, the default pattern is to try
to scan for HPET periodic timers. This boot argument has no effect if
HPET is disabled.
In hardware context, PIT & RealTimeClock classes are merely inheriting
from the HardwareTimer class, and they allow to use the old i8254 (PIT)
and RTC devices, managing them via IO ports. By default, the RTC will be
programmed to a frequency of 1024Hz. The PIT will be programmed to a
frequency close to 1000Hz.
About HPET, depending if we need to scan for periodic timers or not,
we try to set a frequency close to 1000Hz for the time-keeping timer
and scheduler-ticking timer. Also, if possible, we try to enable the
Legacy replacement feature of the HPET. This feature if exists,
instructs the chipset to disconnect both i8254 (PIT) and RTC.
This behavior is observable on QEMU, and was verified against the source
code:
https://github.com/qemu/qemu/commit/ce967e2f33861b0e17753f97fa4527b5943c94b6
The HPETComparator class is inheriting from HardwareTimer class, and is
responsible for an individual HPET comparator, which is essentially a
timer. Therefore, it needs to call the singleton HPET class to perform
HPET-related operations.
The new abstraction of Hardware timers brings an opportunity of more new
features in the foreseeable future. For example, we can change the
callback function of each hardware timer, thus it makes it possible to
swap missions between hardware timers, or to allow to use a hardware
timer for other temporary missions (e.g. calibrating the LAPIC timer,
measuring the CPU frequency, etc).
2020-03-09 17:03:27 +02:00
|
|
|
#include <Kernel/Time/TimeManagement.h>
|
2019-12-27 11:58:28 +11:00
|
|
|
#include <Kernel/TimerQueue.h>
|
2018-11-07 22:15:02 +01:00
|
|
|
|
2019-12-27 00:46:38 +01:00
|
|
|
//#define LOG_EVERY_CONTEXT_SWITCH
|
|
|
|
//#define SCHEDULER_DEBUG
|
|
|
|
//#define SCHEDULER_RUNNABLE_DEBUG
|
|
|
|
|
2020-02-16 01:27:42 +01:00
|
|
|
namespace Kernel {
|
|
|
|
|
2019-08-07 20:43:54 +02:00
|
|
|
SchedulerData* g_scheduler_data;
|
2020-05-16 11:18:04 +02:00
|
|
|
timeval g_timeofday;
|
2020-06-28 15:34:31 -06:00
|
|
|
RecursiveSpinLock g_scheduler_lock;
|
2019-07-19 17:21:13 +02:00
|
|
|
|
|
|
|
void Scheduler::init_thread(Thread& thread)
|
|
|
|
{
|
2020-06-28 22:36:12 -06:00
|
|
|
ASSERT(g_scheduler_data);
|
2019-07-19 17:21:13 +02:00
|
|
|
g_scheduler_data->m_nonrunnable_threads.append(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scheduler::update_state_for_thread(Thread& thread)
|
|
|
|
{
|
2019-12-27 00:46:38 +01:00
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2020-06-28 22:36:12 -06:00
|
|
|
ASSERT(g_scheduler_data);
|
2019-12-30 18:46:17 +01:00
|
|
|
auto& list = g_scheduler_data->thread_list_for_state(thread.state());
|
2019-07-19 17:21:13 +02:00
|
|
|
|
|
|
|
if (list.contains(thread))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list.append(thread);
|
|
|
|
}
|
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
static u32 time_slice_for(const Thread& thread)
|
2019-02-07 12:21:17 +01:00
|
|
|
{
|
|
|
|
// One time slice unit == 1ms
|
2020-06-28 15:34:31 -06:00
|
|
|
if (&thread == Processor::current().idle_thread())
|
2019-04-20 15:58:45 +02:00
|
|
|
return 1;
|
2019-12-30 18:46:17 +01:00
|
|
|
return 10;
|
2019-02-07 12:21:17 +01:00
|
|
|
}
|
2018-11-07 22:15:02 +01:00
|
|
|
|
2020-03-14 00:59:45 +02:00
|
|
|
timeval Scheduler::time_since_boot()
|
|
|
|
{
|
|
|
|
return { TimeManagement::the().seconds_since_boot(), (suseconds_t)TimeManagement::the().ticks_this_second() * 1000 };
|
|
|
|
}
|
|
|
|
|
2019-03-23 22:03:17 +01:00
|
|
|
Thread* g_finalizer;
|
2019-12-01 19:17:17 +01:00
|
|
|
WaitQueue* g_finalizer_wait_queue;
|
2020-06-27 13:42:28 -06:00
|
|
|
Atomic<bool> g_finalizer_has_work{false};
|
2018-11-07 22:15:02 +01:00
|
|
|
static Process* s_colonel_process;
|
2019-07-03 21:17:35 +02:00
|
|
|
u64 g_uptime;
|
2018-11-07 22:15:02 +01:00
|
|
|
|
2019-11-14 21:04:34 +01:00
|
|
|
Thread::JoinBlocker::JoinBlocker(Thread& joinee, void*& joinee_exit_value)
|
2019-11-14 20:58:23 +01:00
|
|
|
: m_joinee(joinee)
|
2019-11-14 21:04:34 +01:00
|
|
|
, m_joinee_exit_value(joinee_exit_value)
|
2019-11-14 20:58:23 +01:00
|
|
|
{
|
|
|
|
ASSERT(m_joinee.m_joiner == nullptr);
|
2020-06-28 15:34:31 -06:00
|
|
|
auto current_thread = Thread::current();
|
|
|
|
m_joinee.m_joiner = current_thread;
|
|
|
|
current_thread->m_joinee = &joinee;
|
2019-11-14 20:58:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Thread::JoinBlocker::should_unblock(Thread& joiner, time_t, long)
|
|
|
|
{
|
|
|
|
return !joiner.m_joinee;
|
|
|
|
}
|
|
|
|
|
2019-07-19 13:19:47 +02:00
|
|
|
Thread::FileDescriptionBlocker::FileDescriptionBlocker(const FileDescription& description)
|
2019-07-18 16:22:26 +02:00
|
|
|
: m_blocked_description(description)
|
2019-12-01 11:57:20 +01:00
|
|
|
{
|
|
|
|
}
|
2019-07-18 16:22:26 +02:00
|
|
|
|
2019-07-19 13:19:47 +02:00
|
|
|
const FileDescription& Thread::FileDescriptionBlocker::blocked_description() const
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
|
|
|
return m_blocked_description;
|
|
|
|
}
|
|
|
|
|
2019-07-19 13:19:47 +02:00
|
|
|
Thread::AcceptBlocker::AcceptBlocker(const FileDescription& description)
|
2019-07-18 18:12:37 +02:00
|
|
|
: FileDescriptionBlocker(description)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
bool Thread::AcceptBlocker::should_unblock(Thread&, time_t, long)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
2019-07-19 13:19:47 +02:00
|
|
|
auto& socket = *blocked_description().socket();
|
2019-07-18 16:22:26 +02:00
|
|
|
return socket.can_accept();
|
|
|
|
}
|
|
|
|
|
2019-07-19 13:19:47 +02:00
|
|
|
Thread::ConnectBlocker::ConnectBlocker(const FileDescription& description)
|
2019-07-18 18:12:37 +02:00
|
|
|
: FileDescriptionBlocker(description)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
bool Thread::ConnectBlocker::should_unblock(Thread&, time_t, long)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
2019-07-19 13:19:47 +02:00
|
|
|
auto& socket = *blocked_description().socket();
|
2019-08-10 13:17:00 +10:00
|
|
|
return socket.setup_state() == Socket::SetupState::Completed;
|
2019-07-18 16:22:26 +02:00
|
|
|
}
|
|
|
|
|
2019-07-19 13:19:47 +02:00
|
|
|
Thread::WriteBlocker::WriteBlocker(const FileDescription& description)
|
2019-07-18 18:12:37 +02:00
|
|
|
: FileDescriptionBlocker(description)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
2020-01-26 17:54:23 +01:00
|
|
|
if (description.is_socket()) {
|
|
|
|
auto& socket = *description.socket();
|
|
|
|
if (socket.has_send_timeout()) {
|
2020-03-14 00:59:45 +02:00
|
|
|
timeval deadline = Scheduler::time_since_boot();
|
2020-01-26 17:54:23 +01:00
|
|
|
deadline.tv_sec += socket.send_timeout().tv_sec;
|
|
|
|
deadline.tv_usec += socket.send_timeout().tv_usec;
|
|
|
|
deadline.tv_sec += (socket.send_timeout().tv_usec / 1000000) * 1;
|
|
|
|
deadline.tv_usec %= 1000000;
|
|
|
|
m_deadline = deadline;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Thread::WriteBlocker::should_unblock(Thread&, time_t now_sec, long now_usec)
|
|
|
|
{
|
|
|
|
if (m_deadline.has_value()) {
|
|
|
|
bool timed_out = now_sec > m_deadline.value().tv_sec || (now_sec == m_deadline.value().tv_sec && now_usec >= m_deadline.value().tv_usec);
|
|
|
|
return timed_out || blocked_description().can_write();
|
|
|
|
}
|
2019-07-19 13:19:47 +02:00
|
|
|
return blocked_description().can_write();
|
2019-07-18 16:22:26 +02:00
|
|
|
}
|
|
|
|
|
2019-07-19 13:19:47 +02:00
|
|
|
Thread::ReadBlocker::ReadBlocker(const FileDescription& description)
|
2019-07-18 18:12:37 +02:00
|
|
|
: FileDescriptionBlocker(description)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
2020-01-26 17:54:23 +01:00
|
|
|
if (description.is_socket()) {
|
|
|
|
auto& socket = *description.socket();
|
|
|
|
if (socket.has_receive_timeout()) {
|
2020-03-14 00:59:45 +02:00
|
|
|
timeval deadline = Scheduler::time_since_boot();
|
2020-01-26 17:54:23 +01:00
|
|
|
deadline.tv_sec += socket.receive_timeout().tv_sec;
|
|
|
|
deadline.tv_usec += socket.receive_timeout().tv_usec;
|
|
|
|
deadline.tv_sec += (socket.receive_timeout().tv_usec / 1000000) * 1;
|
|
|
|
deadline.tv_usec %= 1000000;
|
|
|
|
m_deadline = deadline;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Thread::ReadBlocker::should_unblock(Thread&, time_t now_sec, long now_usec)
|
|
|
|
{
|
|
|
|
if (m_deadline.has_value()) {
|
|
|
|
bool timed_out = now_sec > m_deadline.value().tv_sec || (now_sec == m_deadline.value().tv_sec && now_usec >= m_deadline.value().tv_usec);
|
|
|
|
return timed_out || blocked_description().can_read();
|
|
|
|
}
|
2019-07-19 13:19:47 +02:00
|
|
|
return blocked_description().can_read();
|
2019-07-18 16:22:26 +02:00
|
|
|
}
|
|
|
|
|
2019-07-19 13:19:47 +02:00
|
|
|
Thread::ConditionBlocker::ConditionBlocker(const char* state_string, Function<bool()>&& condition)
|
2019-07-18 16:22:26 +02:00
|
|
|
: m_block_until_condition(move(condition))
|
2019-07-19 09:51:48 +02:00
|
|
|
, m_state_string(state_string)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
|
|
|
ASSERT(m_block_until_condition);
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
bool Thread::ConditionBlocker::should_unblock(Thread&, time_t, long)
|
2019-07-18 16:22:26 +02:00
|
|
|
{
|
|
|
|
return m_block_until_condition();
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
Thread::SleepBlocker::SleepBlocker(u64 wakeup_time)
|
2019-07-18 17:26:11 +02:00
|
|
|
: m_wakeup_time(wakeup_time)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
bool Thread::SleepBlocker::should_unblock(Thread&, time_t, long)
|
2019-07-18 17:26:11 +02:00
|
|
|
{
|
|
|
|
return m_wakeup_time <= g_uptime;
|
|
|
|
}
|
|
|
|
|
2020-06-21 13:54:41 -04:00
|
|
|
Thread::SelectBlocker::SelectBlocker(const timespec& ts, bool select_has_timeout, const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds)
|
|
|
|
: m_select_timeout(ts)
|
2019-07-18 17:39:49 +02:00
|
|
|
, m_select_has_timeout(select_has_timeout)
|
|
|
|
, m_select_read_fds(read_fds)
|
|
|
|
, m_select_write_fds(write_fds)
|
|
|
|
, m_select_exceptional_fds(except_fds)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
bool Thread::SelectBlocker::should_unblock(Thread& thread, time_t now_sec, long now_usec)
|
2019-07-18 17:39:49 +02:00
|
|
|
{
|
|
|
|
if (m_select_has_timeout) {
|
2020-06-21 13:54:41 -04:00
|
|
|
if (now_sec > m_select_timeout.tv_sec || (now_sec == m_select_timeout.tv_sec && now_usec * 1000 >= m_select_timeout.tv_nsec))
|
2019-07-18 17:39:49 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& process = thread.process();
|
|
|
|
for (int fd : m_select_read_fds) {
|
2020-01-09 12:36:42 +01:00
|
|
|
if (!process.m_fds[fd])
|
|
|
|
continue;
|
2019-07-18 17:39:49 +02:00
|
|
|
if (process.m_fds[fd].description->can_read())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
for (int fd : m_select_write_fds) {
|
2020-01-09 12:36:42 +01:00
|
|
|
if (!process.m_fds[fd])
|
|
|
|
continue;
|
2019-07-18 17:39:49 +02:00
|
|
|
if (process.m_fds[fd].description->can_write())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
Thread::WaitBlocker::WaitBlocker(int wait_options, pid_t& waitee_pid)
|
2019-07-18 18:05:19 +02:00
|
|
|
: m_wait_options(wait_options)
|
|
|
|
, m_waitee_pid(waitee_pid)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-07-18 18:12:37 +02:00
|
|
|
bool Thread::WaitBlocker::should_unblock(Thread& thread, time_t, long)
|
2019-07-18 18:05:19 +02:00
|
|
|
{
|
2020-05-18 13:05:25 +02:00
|
|
|
bool should_unblock = m_wait_options & WNOHANG;
|
2019-09-08 13:54:48 +02:00
|
|
|
if (m_waitee_pid != -1) {
|
|
|
|
auto* peer = Process::from_pid(m_waitee_pid);
|
|
|
|
if (!peer)
|
|
|
|
return true;
|
|
|
|
}
|
2019-07-18 18:05:19 +02:00
|
|
|
thread.process().for_each_child([&](Process& child) {
|
|
|
|
if (m_waitee_pid != -1 && m_waitee_pid != child.pid())
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
|
|
|
|
bool child_exited = child.is_dead();
|
2020-03-28 11:03:43 +03:00
|
|
|
bool child_stopped = false;
|
|
|
|
if (child.thread_count()) {
|
2020-05-16 12:38:24 +02:00
|
|
|
child.for_each_thread([&](auto& child_thread) {
|
|
|
|
if (child_thread.state() == Thread::State::Stopped && !child_thread.has_pending_signal(SIGCONT)) {
|
|
|
|
child_stopped = true;
|
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2020-03-28 11:03:43 +03:00
|
|
|
}
|
2019-07-18 18:05:19 +02:00
|
|
|
|
2020-05-25 13:23:41 +03:00
|
|
|
bool fits_the_spec = ((m_wait_options & WEXITED) && child_exited)
|
|
|
|
|| ((m_wait_options & WSTOPPED) && child_stopped);
|
2019-07-18 18:05:19 +02:00
|
|
|
|
2020-05-25 13:23:41 +03:00
|
|
|
if (!fits_the_spec)
|
2019-07-18 18:05:19 +02:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
|
|
|
|
m_waitee_pid = child.pid();
|
|
|
|
should_unblock = true;
|
|
|
|
return IterationDecision::Break;
|
|
|
|
});
|
|
|
|
return should_unblock;
|
|
|
|
}
|
|
|
|
|
2019-07-19 09:34:11 +02:00
|
|
|
Thread::SemiPermanentBlocker::SemiPermanentBlocker(Reason reason)
|
|
|
|
: m_reason(reason)
|
2019-12-01 11:57:20 +01:00
|
|
|
{
|
|
|
|
}
|
2019-07-19 09:34:11 +02:00
|
|
|
|
|
|
|
bool Thread::SemiPermanentBlocker::should_unblock(Thread&, time_t, long)
|
|
|
|
{
|
|
|
|
// someone else has to unblock us
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-18 14:10:28 +02:00
|
|
|
// Called by the scheduler on threads that are blocked for some reason.
|
|
|
|
// Make a decision as to whether to unblock them or not.
|
|
|
|
void Thread::consider_unblock(time_t now_sec, long now_usec)
|
|
|
|
{
|
|
|
|
switch (state()) {
|
|
|
|
case Thread::Invalid:
|
|
|
|
case Thread::Runnable:
|
|
|
|
case Thread::Running:
|
|
|
|
case Thread::Dead:
|
|
|
|
case Thread::Stopped:
|
2019-12-01 15:54:47 +01:00
|
|
|
case Thread::Queued:
|
2019-12-01 19:17:17 +01:00
|
|
|
case Thread::Dying:
|
2019-07-18 14:10:28 +02:00
|
|
|
/* don't know, don't care */
|
|
|
|
return;
|
2019-07-19 09:37:34 +02:00
|
|
|
case Thread::Blocked:
|
2019-09-09 15:58:42 +12:00
|
|
|
ASSERT(m_blocker != nullptr);
|
|
|
|
if (m_blocker->should_unblock(*this, now_sec, now_usec))
|
2019-07-18 14:10:28 +02:00
|
|
|
unblock();
|
|
|
|
return;
|
|
|
|
case Thread::Skip1SchedulerPass:
|
|
|
|
set_state(Thread::Skip0SchedulerPasses);
|
|
|
|
return;
|
|
|
|
case Thread::Skip0SchedulerPasses:
|
|
|
|
set_state(Thread::Runnable);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
void Scheduler::start()
|
2018-11-07 22:15:02 +01:00
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2020-07-05 14:32:07 -06:00
|
|
|
|
|
|
|
// We need to acquire our scheduler lock, which will be released
|
|
|
|
// by the idle thread once control transferred there
|
|
|
|
g_scheduler_lock.lock();
|
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
auto& processor = Processor::current();
|
2020-06-28 22:36:12 -06:00
|
|
|
ASSERT(processor.is_initialized());
|
2020-06-28 15:34:31 -06:00
|
|
|
auto& idle_thread = *processor.idle_thread();
|
2020-06-28 22:36:12 -06:00
|
|
|
ASSERT(processor.current_thread() == &idle_thread);
|
|
|
|
ASSERT(processor.idle_thread() == &idle_thread);
|
2020-06-28 15:34:31 -06:00
|
|
|
idle_thread.set_ticks_left(time_slice_for(idle_thread));
|
|
|
|
idle_thread.did_schedule();
|
|
|
|
idle_thread.set_initialized(true);
|
2020-07-03 05:19:50 -06:00
|
|
|
processor.init_context(idle_thread, false);
|
2020-06-28 15:34:31 -06:00
|
|
|
idle_thread.set_state(Thread::Running);
|
2020-06-28 22:36:12 -06:00
|
|
|
ASSERT(idle_thread.affinity() == (1u << processor.id()));
|
2020-06-28 15:34:31 -06:00
|
|
|
processor.initialize_context_switching(idle_thread);
|
2020-06-27 13:42:28 -06:00
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
2018-11-07 22:15:02 +01:00
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
bool Scheduler::pick_next()
|
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2018-11-07 22:15:02 +01:00
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
auto current_thread = Thread::current();
|
2020-03-14 00:59:45 +02:00
|
|
|
auto now = time_since_boot();
|
2019-07-18 14:10:28 +02:00
|
|
|
|
2019-03-25 02:06:57 +01:00
|
|
|
auto now_sec = now.tv_sec;
|
|
|
|
auto now_usec = now.tv_usec;
|
2019-03-13 13:13:23 +01:00
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
|
2019-03-23 22:03:17 +01:00
|
|
|
// Check and unblock threads whose wait conditions have been met.
|
2019-07-19 17:21:13 +02:00
|
|
|
Scheduler::for_each_nonrunnable([&](Thread& thread) {
|
2019-07-18 14:10:28 +02:00
|
|
|
thread.consider_unblock(now_sec, now_usec);
|
2019-03-23 22:03:17 +01:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
|
2019-06-07 11:43:58 +02:00
|
|
|
Process::for_each([&](Process& process) {
|
2019-03-23 22:03:17 +01:00
|
|
|
if (process.is_dead()) {
|
2020-06-28 15:34:31 -06:00
|
|
|
if (current_thread->process().pid() != process.pid() && (!process.ppid() || !Process::from_pid(process.ppid()))) {
|
2018-11-28 22:01:24 +01:00
|
|
|
auto name = process.name();
|
|
|
|
auto pid = process.pid();
|
|
|
|
auto exit_status = Process::reap(process);
|
2020-06-28 22:36:12 -06:00
|
|
|
dbg() << "Scheduler[" << Processor::current().id() << "]: Reaped unparented process " << name << "(" << pid << "), exit status: " << exit_status.si_status;
|
2018-11-28 22:01:24 +01:00
|
|
|
}
|
2019-06-07 11:30:07 +02:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
}
|
|
|
|
if (process.m_alarm_deadline && g_uptime > process.m_alarm_deadline) {
|
|
|
|
process.m_alarm_deadline = 0;
|
|
|
|
process.send_signal(SIGALRM, nullptr);
|
2018-11-07 23:59:49 +01:00
|
|
|
}
|
2019-06-07 11:30:07 +02:00
|
|
|
return IterationDecision::Continue;
|
2018-11-07 22:15:02 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
// Dispatch any pending signals.
|
2020-06-28 15:34:31 -06:00
|
|
|
Thread::for_each_living([&](Thread& thread) -> IterationDecision {
|
2019-03-23 22:03:17 +01:00
|
|
|
if (!thread.has_unmasked_pending_signals())
|
2019-07-19 12:16:00 +02:00
|
|
|
return IterationDecision::Continue;
|
2019-03-05 12:50:55 +01:00
|
|
|
// FIXME: It would be nice if the Scheduler didn't have to worry about who is "current"
|
|
|
|
// For now, avoid dispatching signals to "current" and do it in a scheduling pass
|
|
|
|
// while some other process is interrupted. Otherwise a mess will be made.
|
2020-06-28 15:34:31 -06:00
|
|
|
if (&thread == current_thread)
|
2019-07-19 12:16:00 +02:00
|
|
|
return IterationDecision::Continue;
|
2018-11-07 22:15:02 +01:00
|
|
|
// We know how to interrupt blocked processes, but if they are just executing
|
2020-01-12 15:04:33 +01:00
|
|
|
// at some random point in the kernel, let them continue.
|
|
|
|
// Before returning to userspace from a syscall, we will block a thread if it has any
|
|
|
|
// pending unmasked signals, allowing it to be dispatched then.
|
2019-03-23 22:03:17 +01:00
|
|
|
if (thread.in_kernel() && !thread.is_blocked() && !thread.is_stopped())
|
2019-07-19 12:16:00 +02:00
|
|
|
return IterationDecision::Continue;
|
2018-11-28 23:30:06 +01:00
|
|
|
// NOTE: dispatch_one_pending_signal() may unblock the process.
|
2019-03-23 22:03:17 +01:00
|
|
|
bool was_blocked = thread.is_blocked();
|
|
|
|
if (thread.dispatch_one_pending_signal() == ShouldUnblockThread::No)
|
2019-07-19 12:16:00 +02:00
|
|
|
return IterationDecision::Continue;
|
2018-11-28 23:30:06 +01:00
|
|
|
if (was_blocked) {
|
2020-06-22 21:18:16 +02:00
|
|
|
#ifdef SCHEDULER_DEBUG
|
2020-06-28 22:36:12 -06:00
|
|
|
dbg() << "Scheduler[" << Processor::current().id() << "]:Unblock " << thread << " due to signal";
|
2020-06-22 21:18:16 +02:00
|
|
|
#endif
|
2019-09-09 15:58:42 +12:00
|
|
|
ASSERT(thread.m_blocker != nullptr);
|
|
|
|
thread.m_blocker->set_interrupted_by_signal();
|
2019-03-23 22:03:17 +01:00
|
|
|
thread.unblock();
|
2018-11-07 22:15:02 +01:00
|
|
|
}
|
2019-07-19 12:16:00 +02:00
|
|
|
return IterationDecision::Continue;
|
2018-11-07 22:15:02 +01:00
|
|
|
});
|
|
|
|
|
2019-07-17 14:17:49 +02:00
|
|
|
#ifdef SCHEDULER_RUNNABLE_DEBUG
|
2020-02-24 15:15:12 +02:00
|
|
|
dbg() << "Non-runnables:";
|
2019-08-01 20:00:45 +02:00
|
|
|
Scheduler::for_each_nonrunnable([](Thread& thread) -> IterationDecision {
|
2020-07-05 14:32:07 -06:00
|
|
|
if (thread.state() == Thread::Queued)
|
|
|
|
dbg() << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip) << " Reason: " << (thread.wait_reason() ? thread.wait_reason() : "none");
|
|
|
|
else if (thread.state() == Thread::Dying)
|
|
|
|
dbg() << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip) << " Finalizable: " << thread.is_finalizable();
|
2019-07-19 12:18:40 +02:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2019-05-20 03:44:45 +02:00
|
|
|
|
2020-02-24 15:15:12 +02:00
|
|
|
dbg() << "Runnables:";
|
2019-12-30 18:46:17 +01:00
|
|
|
Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision {
|
2020-02-29 12:51:44 +01:00
|
|
|
dbg() << " " << String::format("%3u", thread.effective_priority()) << "/" << String::format("%2u", thread.priority()) << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip);
|
2019-12-30 18:46:17 +01:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2018-11-07 22:15:02 +01:00
|
|
|
#endif
|
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
Vector<Thread*, 128> sorted_runnables;
|
|
|
|
for_each_runnable([&sorted_runnables](auto& thread) {
|
2020-06-28 22:36:12 -06:00
|
|
|
if ((thread.affinity() & (1u << Processor::current().id())) != 0)
|
|
|
|
sorted_runnables.append(&thread);
|
2019-12-30 18:46:17 +01:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2020-03-03 16:01:37 +01:00
|
|
|
quick_sort(sorted_runnables, [](auto& a, auto& b) { return a->effective_priority() >= b->effective_priority(); });
|
2019-05-18 20:24:55 +02:00
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
Thread* thread_to_schedule = nullptr;
|
2018-11-07 22:15:02 +01:00
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
for (auto* thread : sorted_runnables) {
|
|
|
|
if (thread->process().is_being_inspected())
|
|
|
|
continue;
|
2018-11-07 22:15:02 +01:00
|
|
|
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 14:28:28 +02:00
|
|
|
if (thread->process().exec_tid() && thread->process().exec_tid() != thread->tid())
|
|
|
|
continue;
|
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
ASSERT(thread->state() == Thread::Runnable || thread->state() == Thread::Running);
|
|
|
|
|
|
|
|
if (!thread_to_schedule) {
|
|
|
|
thread->m_extra_priority = 0;
|
|
|
|
thread_to_schedule = thread;
|
|
|
|
} else {
|
|
|
|
thread->m_extra_priority++;
|
2018-11-07 22:15:02 +01:00
|
|
|
}
|
|
|
|
}
|
2019-12-27 00:46:38 +01:00
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
if (!thread_to_schedule)
|
2020-06-28 15:34:31 -06:00
|
|
|
thread_to_schedule = Processor::current().idle_thread();
|
2019-12-30 18:46:17 +01:00
|
|
|
|
|
|
|
#ifdef SCHEDULER_DEBUG
|
2020-06-28 22:36:12 -06:00
|
|
|
dbg() << "Scheduler[" << Processor::current().id() << "]: Switch to " << *thread_to_schedule << " @ " << String::format("%04x:%08x", thread_to_schedule->tss().cs, thread_to_schedule->tss().eip);
|
2019-12-30 18:46:17 +01:00
|
|
|
#endif
|
|
|
|
|
2020-07-05 14:32:07 -06:00
|
|
|
return context_switch(thread_to_schedule);
|
2018-11-07 22:15:02 +01:00
|
|
|
}
|
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
bool Scheduler::yield()
|
|
|
|
{
|
2020-07-03 05:19:50 -06:00
|
|
|
InterruptDisabler disabler;
|
2020-06-28 15:34:31 -06:00
|
|
|
auto& proc = Processor::current();
|
|
|
|
auto current_thread = Thread::current();
|
|
|
|
#ifdef SCHEDULER_DEBUG
|
2020-07-03 05:19:50 -06:00
|
|
|
dbg() << "Scheduler[" << proc.id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq();
|
2020-06-27 13:42:28 -06:00
|
|
|
#endif
|
2020-06-28 15:34:31 -06:00
|
|
|
ASSERT(current_thread != nullptr);
|
2020-07-03 05:19:50 -06:00
|
|
|
if (proc.in_irq() || proc.in_critical()) {
|
|
|
|
// If we're handling an IRQ we can't switch context, or we're in
|
|
|
|
// a critical section where we don't want to switch contexts, then
|
|
|
|
// delay until exiting the trap or critical section
|
2020-06-28 15:34:31 -06:00
|
|
|
proc.invoke_scheduler_async();
|
2020-07-03 05:19:50 -06:00
|
|
|
return false;
|
2020-06-27 13:42:28 -06:00
|
|
|
} else if (!Scheduler::pick_next())
|
|
|
|
return false;
|
2020-06-28 15:34:31 -06:00
|
|
|
#ifdef SCHEDULER_DEBUG
|
2020-07-03 05:19:50 -06:00
|
|
|
dbg() << "Scheduler[" << proc.id() << "]: yield returns to thread " << *current_thread << " in_irq: " << proc.in_irq();
|
2020-06-27 13:42:28 -06:00
|
|
|
#endif
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-03-23 22:03:17 +01:00
|
|
|
bool Scheduler::donate_to(Thread* beneficiary, const char* reason)
|
2019-02-07 11:12:23 +01:00
|
|
|
{
|
2020-07-05 14:32:07 -06:00
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
2020-07-03 05:19:50 -06:00
|
|
|
auto& proc = Processor::current();
|
|
|
|
ASSERT(!proc.in_irq());
|
2019-04-17 12:41:51 +02:00
|
|
|
if (!Thread::is_thread(beneficiary))
|
|
|
|
return false;
|
|
|
|
|
2020-07-03 05:19:50 -06:00
|
|
|
if (proc.in_critical()) {
|
|
|
|
proc.invoke_scheduler_async();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-07 11:12:23 +01:00
|
|
|
(void)reason;
|
2020-06-28 15:34:31 -06:00
|
|
|
unsigned ticks_left = Thread::current()->ticks_left();
|
2019-04-17 12:41:51 +02:00
|
|
|
if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
|
2020-06-27 13:42:28 -06:00
|
|
|
return Scheduler::yield();
|
2019-02-07 11:12:23 +01:00
|
|
|
|
2019-12-30 18:46:17 +01:00
|
|
|
unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary));
|
2019-02-07 11:12:23 +01:00
|
|
|
#ifdef SCHEDULER_DEBUG
|
2020-07-03 05:19:50 -06:00
|
|
|
dbg() << "Scheduler[" << proc.id() << "]: Donating " << ticks_to_donate << " ticks to " << *beneficiary << ", reason=" << reason;
|
2019-02-07 11:12:23 +01:00
|
|
|
#endif
|
|
|
|
beneficiary->set_ticks_left(ticks_to_donate);
|
2020-07-05 14:32:07 -06:00
|
|
|
Scheduler::context_switch(beneficiary);
|
2019-04-17 12:41:51 +02:00
|
|
|
return false;
|
2019-02-07 11:12:23 +01:00
|
|
|
}
|
|
|
|
|
2020-07-05 14:32:07 -06:00
|
|
|
bool Scheduler::context_switch(Thread* thread)
|
2018-11-07 22:15:02 +01:00
|
|
|
{
|
2020-07-05 14:32:07 -06:00
|
|
|
thread->set_ticks_left(time_slice_for(*thread));
|
|
|
|
thread->did_schedule();
|
2018-11-07 22:15:02 +01:00
|
|
|
|
2020-07-05 14:32:07 -06:00
|
|
|
auto from_thread = Thread::current();
|
|
|
|
if (from_thread == thread)
|
2018-11-07 22:15:02 +01:00
|
|
|
return false;
|
|
|
|
|
2020-07-05 14:32:07 -06:00
|
|
|
if (from_thread) {
|
2018-11-07 22:15:02 +01:00
|
|
|
// If the last process hasn't blocked (still marked as running),
|
|
|
|
// mark it as runnable for the next round.
|
2020-07-05 14:32:07 -06:00
|
|
|
if (from_thread->state() == Thread::Running)
|
|
|
|
from_thread->set_state(Thread::Runnable);
|
2018-11-07 22:24:20 +01:00
|
|
|
|
|
|
|
#ifdef LOG_EVERY_CONTEXT_SWITCH
|
2020-07-05 14:32:07 -06:00
|
|
|
dbg() << "Scheduler[" << Processor::current().id() << "]: " << *from_thread << " -> " << *thread << " [" << thread->priority() << "] " << String::format("%w", thread->tss().cs) << ":" << String::format("%x", thread->tss().eip);
|
2018-11-07 22:24:20 +01:00
|
|
|
#endif
|
2018-11-07 22:15:02 +01:00
|
|
|
}
|
|
|
|
|
2020-07-03 05:19:50 -06:00
|
|
|
auto& proc = Processor::current();
|
2020-07-05 14:32:07 -06:00
|
|
|
if (!thread->is_initialized()) {
|
|
|
|
proc.init_context(*thread, false);
|
|
|
|
thread->set_initialized(true);
|
2019-09-07 15:50:44 +02:00
|
|
|
}
|
2020-07-05 14:32:07 -06:00
|
|
|
thread->set_state(Thread::Running);
|
|
|
|
|
|
|
|
// Mark it as active because we are using this thread. This is similar
|
|
|
|
// to comparing it with Processor::current_thread, but when there are
|
|
|
|
// multiple processors there's no easy way to check whether the thread
|
|
|
|
// is actually still needed. This prevents accidental finalization when
|
|
|
|
// a thread is no longer in Running state, but running on another core.
|
|
|
|
thread->set_active(true);
|
|
|
|
|
|
|
|
proc.switch_context(from_thread, thread);
|
|
|
|
|
|
|
|
// NOTE: from_thread at this point reflects the thread we were
|
|
|
|
// switched from, and thread reflects Thread::current()
|
|
|
|
enter_current(*from_thread);
|
|
|
|
ASSERT(thread == Thread::current());
|
2019-09-07 15:50:44 +02:00
|
|
|
|
2018-11-07 22:15:02 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-07-05 14:32:07 -06:00
|
|
|
void Scheduler::enter_current(Thread& prev_thread)
|
|
|
|
{
|
|
|
|
ASSERT(g_scheduler_lock.is_locked());
|
|
|
|
prev_thread.set_active(false);
|
|
|
|
if (prev_thread.state() == Thread::Dying) {
|
|
|
|
// If the thread we switched from is marked as dying, then notify
|
|
|
|
// the finalizer. Note that as soon as we leave the scheduler lock
|
|
|
|
// the finalizer may free from_thread!
|
|
|
|
notify_finalizer();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-04 10:28:12 +01:00
|
|
|
Process* Scheduler::colonel()
|
|
|
|
{
|
|
|
|
return s_colonel_process;
|
|
|
|
}
|
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
void Scheduler::initialize(u32 cpu)
|
2018-11-07 22:15:02 +01:00
|
|
|
{
|
2020-06-28 16:05:52 -06:00
|
|
|
static Atomic<u32> s_bsp_is_initialized;
|
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
ASSERT(&Processor::current() != nullptr); // sanity check
|
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
Thread* idle_thread = nullptr;
|
2020-06-27 13:42:28 -06:00
|
|
|
if (cpu == 0) {
|
2020-06-28 16:05:52 -06:00
|
|
|
ASSERT(s_bsp_is_initialized.load(AK::MemoryOrder::memory_order_consume) == 0);
|
|
|
|
g_scheduler_data = new SchedulerData;
|
|
|
|
g_finalizer_wait_queue = new WaitQueue;
|
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
|
2020-06-28 22:36:12 -06:00
|
|
|
s_colonel_process = Process::create_kernel_process(idle_thread, "colonel", idle_loop, 1 << cpu);
|
2020-06-28 15:34:31 -06:00
|
|
|
ASSERT(s_colonel_process);
|
|
|
|
ASSERT(idle_thread);
|
|
|
|
idle_thread->set_priority(THREAD_PRIORITY_MIN);
|
|
|
|
idle_thread->set_name(String::format("idle thread #%u", cpu));
|
|
|
|
} else {
|
2020-06-28 16:05:52 -06:00
|
|
|
// We need to make sure the BSP initialized the global data first
|
|
|
|
if (s_bsp_is_initialized.load(AK::MemoryOrder::memory_order_consume) == 0) {
|
2020-06-28 22:36:12 -06:00
|
|
|
#ifdef SCHEDULER_DEBUG
|
|
|
|
dbg() << "Scheduler[" << cpu << "]: waiting for BSP to initialize...";
|
|
|
|
#endif
|
2020-06-28 16:05:52 -06:00
|
|
|
while (s_bsp_is_initialized.load(AK::MemoryOrder::memory_order_consume) == 0) {
|
|
|
|
}
|
2020-06-28 22:36:12 -06:00
|
|
|
#ifdef SCHEDULER_DEBUG
|
|
|
|
dbg() << "Scheduler[" << cpu << "]: initializing now";
|
|
|
|
#endif
|
2020-06-28 16:05:52 -06:00
|
|
|
}
|
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
ASSERT(s_colonel_process);
|
2020-06-28 22:36:12 -06:00
|
|
|
idle_thread = s_colonel_process->create_kernel_thread(idle_loop, THREAD_PRIORITY_MIN, String::format("idle thread #%u", cpu), 1 << cpu, false);
|
2020-06-28 15:34:31 -06:00
|
|
|
ASSERT(idle_thread);
|
2020-06-27 13:42:28 -06:00
|
|
|
}
|
2020-06-28 15:34:31 -06:00
|
|
|
|
|
|
|
Processor::current().set_idle_thread(*idle_thread);
|
2020-06-28 22:36:12 -06:00
|
|
|
Processor::current().set_current_thread(*idle_thread);
|
2020-06-28 16:05:52 -06:00
|
|
|
|
|
|
|
if (cpu == 0)
|
|
|
|
s_bsp_is_initialized.store(1, AK::MemoryOrder::memory_order_release);
|
2018-11-07 22:15:02 +01:00
|
|
|
}
|
2018-11-08 00:24:59 +01:00
|
|
|
|
2020-03-09 16:24:29 +02:00
|
|
|
void Scheduler::timer_tick(const RegisterState& regs)
|
2018-11-08 00:24:59 +01:00
|
|
|
{
|
2020-06-27 13:42:28 -06:00
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
|
|
|
ASSERT(Processor::current().in_irq());
|
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
auto current_thread = Processor::current().current_thread();
|
|
|
|
if (!current_thread)
|
2018-11-08 00:24:59 +01:00
|
|
|
return;
|
|
|
|
|
2019-04-14 01:29:14 +02:00
|
|
|
++g_uptime;
|
2018-11-08 00:24:59 +01:00
|
|
|
|
2020-05-16 11:33:24 +02:00
|
|
|
g_timeofday = TimeManagement::now_as_timeval();
|
2019-12-15 21:29:26 +01:00
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
if (current_thread->process().is_profiling()) {
|
2020-01-12 02:02:29 +01:00
|
|
|
SmapDisabler disabler;
|
2020-06-28 15:34:31 -06:00
|
|
|
auto backtrace = current_thread->raw_backtrace(regs.ebp, regs.eip);
|
2019-12-11 20:36:56 +01:00
|
|
|
auto& sample = Profiling::next_sample_slot();
|
2020-06-28 15:34:31 -06:00
|
|
|
sample.pid = current_thread->process().pid();
|
|
|
|
sample.tid = current_thread->tid();
|
2019-12-11 20:36:56 +01:00
|
|
|
sample.timestamp = g_uptime;
|
2020-03-01 12:35:09 +01:00
|
|
|
for (size_t i = 0; i < min(backtrace.size(), Profiling::max_stack_frame_count); ++i) {
|
2019-12-11 20:36:56 +01:00
|
|
|
sample.frames[i] = backtrace[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-27 11:58:28 +11:00
|
|
|
TimerQueue::the().fire();
|
|
|
|
|
2020-06-28 15:34:31 -06:00
|
|
|
if (current_thread->tick())
|
2018-11-08 00:24:59 +01:00
|
|
|
return;
|
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
|
|
|
ASSERT(Processor::current().in_irq());
|
|
|
|
Processor::current().invoke_scheduler_async();
|
2018-11-08 00:24:59 +01:00
|
|
|
}
|
2019-09-14 19:44:22 +02:00
|
|
|
|
2020-06-27 13:42:28 -06:00
|
|
|
void Scheduler::invoke_async()
|
2019-09-14 19:44:22 +02:00
|
|
|
{
|
2020-06-27 13:42:28 -06:00
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
|
|
|
ASSERT(!Processor::current().in_irq());
|
|
|
|
pick_next();
|
2019-09-14 19:44:22 +02:00
|
|
|
}
|
|
|
|
|
2020-07-05 14:32:07 -06:00
|
|
|
void Scheduler::notify_finalizer()
|
|
|
|
{
|
|
|
|
if (g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel) == false)
|
|
|
|
g_finalizer_wait_queue->wake_all();
|
|
|
|
}
|
|
|
|
|
2019-09-14 19:44:22 +02:00
|
|
|
void Scheduler::idle_loop()
|
|
|
|
{
|
2020-06-28 22:36:12 -06:00
|
|
|
dbg() << "Scheduler[" << Processor::current().id() << "]: idle loop running";
|
2020-06-27 13:42:28 -06:00
|
|
|
ASSERT(are_interrupts_enabled());
|
2019-09-14 19:44:22 +02:00
|
|
|
for (;;) {
|
|
|
|
asm("hlt");
|
2020-06-27 13:42:28 -06:00
|
|
|
yield();
|
2019-09-14 19:44:22 +02:00
|
|
|
}
|
|
|
|
}
|
2020-02-16 01:27:42 +01:00
|
|
|
|
|
|
|
}
|