Kernel: Don't dispatch signals in Processor::enter_current()

Signal dispatch is already taken care of elsewhere, so there appears to
be no need for the hack in enter_current().

This also allows us to remove the Thread::m_in_block flag, simplifying
thread blocking logic somewhat.

Verified with the original repro for #4336 which this was meant to fix.
This commit is contained in:
Andreas Kling 2022-01-29 13:57:39 +01:00
parent 3845c90e08
commit 677da0288c
5 changed files with 5 additions and 23 deletions

View file

@ -1285,7 +1285,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe
VERIFY(to_thread == Thread::current());
Scheduler::enter_current(*from_thread, true);
Scheduler::enter_current(*from_thread);
auto in_critical = to_thread->saved_critical();
VERIFY(in_critical > 0);

View file

@ -307,7 +307,7 @@ bool Scheduler::context_switch(Thread* thread)
// NOTE: from_thread at this point reflects the thread we were
// switched from, and thread reflects Thread::current()
enter_current(*from_thread, false);
enter_current(*from_thread);
VERIFY(thread == Thread::current());
if (thread->process().is_user_process() && thread->previous_mode() != Thread::PreviousMode::KernelMode && thread->current_trap()) {
@ -321,7 +321,7 @@ bool Scheduler::context_switch(Thread* thread)
return true;
}
void Scheduler::enter_current(Thread& prev_thread, bool is_first)
void Scheduler::enter_current(Thread& prev_thread)
{
VERIFY(g_scheduler_lock.is_locked_by_current_processor());
@ -337,15 +337,6 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first)
// the finalizer. Note that as soon as we leave the scheduler lock
// the finalizer may free from_thread!
notify_finalizer();
} else if (!is_first) {
// Check if we have any signals we should deliver (even if we don't
// end up switching to another thread).
if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode && current_thread->current_trap()) {
SpinlockLocker lock(current_thread->get_lock());
if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) {
current_thread->dispatch_one_pending_signal();
}
}
}
}

View file

@ -39,7 +39,7 @@ public:
static bool pick_next();
static bool yield();
static bool context_switch(Thread*);
static void enter_current(Thread& prev_thread, bool is_first);
static void enter_current(Thread& prev_thread);
static void leave_on_first_switch(u32 flags);
static void prepare_after_exec();
static void prepare_for_idle_loop();

View file

@ -158,8 +158,6 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo
SpinlockLocker block_lock(m_block_lock);
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
// as it is constructed and registered elsewhere
VERIFY(!m_in_block);
TemporaryChange in_block_change(m_in_block, true);
ScopeGuard finalize_guard([&] {
blocker.finalize();
@ -264,8 +262,7 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo
TimerQueue::the().cancel_timer(*m_block_timer);
}
if (previous_locked != LockMode::Unlocked) {
// NOTE: this may trigger another call to Thread::block(), so
// we need to do this after we're all done and restored m_in_block!
// NOTE: This may trigger another call to Thread::block().
relock_process(previous_locked, lock_count_to_restore);
}
return result;

View file

@ -822,11 +822,6 @@ public:
[[nodiscard]] bool should_be_stopped() const;
[[nodiscard]] bool is_stopped() const { return m_state == Stopped; }
[[nodiscard]] bool is_blocked() const { return m_state == Blocked; }
[[nodiscard]] bool is_in_block() const
{
SpinlockLocker lock(m_block_lock);
return m_in_block;
}
u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
@ -1279,7 +1274,6 @@ private:
bool m_dump_backtrace_on_finalization { false };
bool m_should_die { false };
bool m_initialized { false };
bool m_in_block { false };
bool m_is_idle_thread { false };
bool m_is_crashing { false };
bool m_is_promise_violation_pending { false };