From 677da0288c20871f736aed9fa3d0f01283aa3378 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sat, 29 Jan 2022 13:57:39 +0100 Subject: [PATCH] Kernel: Don't dispatch signals in Processor::enter_current() Signal dispatch is already taken care of elsewhere, so there appears to be no need for the hack in enter_current(). This also allows us to remove the Thread::m_in_block flag, simplifying thread blocking logic somewhat. Verified with the original repro for #4336 which this was meant to fix. --- Kernel/Arch/x86/common/Processor.cpp | 2 +- Kernel/Scheduler.cpp | 13 ++----------- Kernel/Scheduler.h | 2 +- Kernel/Thread.cpp | 5 +---- Kernel/Thread.h | 6 ------ 5 files changed, 5 insertions(+), 23 deletions(-) diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index fe0db59a499..3565a2ea64f 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -1285,7 +1285,7 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe VERIFY(to_thread == Thread::current()); - Scheduler::enter_current(*from_thread, true); + Scheduler::enter_current(*from_thread); auto in_critical = to_thread->saved_critical(); VERIFY(in_critical > 0); diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index b6c3a7df19f..a0fab77edb8 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -307,7 +307,7 @@ bool Scheduler::context_switch(Thread* thread) // NOTE: from_thread at this point reflects the thread we were // switched from, and thread reflects Thread::current() - enter_current(*from_thread, false); + enter_current(*from_thread); VERIFY(thread == Thread::current()); if (thread->process().is_user_process() && thread->previous_mode() != Thread::PreviousMode::KernelMode && thread->current_trap()) { @@ -321,7 +321,7 @@ bool Scheduler::context_switch(Thread* thread) return true; } -void Scheduler::enter_current(Thread& prev_thread, bool is_first) +void Scheduler::enter_current(Thread& prev_thread) { VERIFY(g_scheduler_lock.is_locked_by_current_processor()); @@ -337,15 +337,6 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first) // the finalizer. Note that as soon as we leave the scheduler lock // the finalizer may free from_thread! notify_finalizer(); - } else if (!is_first) { - // Check if we have any signals we should deliver (even if we don't - // end up switching to another thread). - if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode && current_thread->current_trap()) { - SpinlockLocker lock(current_thread->get_lock()); - if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) { - current_thread->dispatch_one_pending_signal(); - } - } } } diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h index 02746f42dad..a6b623a5c30 100644 --- a/Kernel/Scheduler.h +++ b/Kernel/Scheduler.h @@ -39,7 +39,7 @@ public: static bool pick_next(); static bool yield(); static bool context_switch(Thread*); - static void enter_current(Thread& prev_thread, bool is_first); + static void enter_current(Thread& prev_thread); static void leave_on_first_switch(u32 flags); static void prepare_after_exec(); static void prepare_for_idle_loop(); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index df6af16399b..bd668c1731f 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -158,8 +158,6 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo SpinlockLocker block_lock(m_block_lock); // We need to hold m_block_lock so that nobody can unblock a blocker as soon // as it is constructed and registered elsewhere - VERIFY(!m_in_block); - TemporaryChange in_block_change(m_in_block, true); ScopeGuard finalize_guard([&] { blocker.finalize(); @@ -264,8 +262,7 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo TimerQueue::the().cancel_timer(*m_block_timer); } if (previous_locked != LockMode::Unlocked) { - // NOTE: this may trigger another call to Thread::block(), so - // we need to do this after we're all done and restored m_in_block! + // NOTE: This may trigger another call to Thread::block(). relock_process(previous_locked, lock_count_to_restore); } return result; diff --git a/Kernel/Thread.h b/Kernel/Thread.h index a6f310d70a7..13620747787 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -822,11 +822,6 @@ public: [[nodiscard]] bool should_be_stopped() const; [[nodiscard]] bool is_stopped() const { return m_state == Stopped; } [[nodiscard]] bool is_blocked() const { return m_state == Blocked; } - [[nodiscard]] bool is_in_block() const - { - SpinlockLocker lock(m_block_lock); - return m_in_block; - } u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); } void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); } @@ -1279,7 +1274,6 @@ private: bool m_dump_backtrace_on_finalization { false }; bool m_should_die { false }; bool m_initialized { false }; - bool m_in_block { false }; bool m_is_idle_thread { false }; bool m_is_crashing { false }; bool m_is_promise_violation_pending { false };