mirror of
https://github.com/SerenityOS/serenity.git
synced 2025-01-23 09:51:57 -05:00
Kernel: Stop using *LockRefPtr for Thread
These were stored in a bunch of places. The main one that's a bit iffy is the Mutex::m_holder one, which I'm going to simplify in a subsequent commit. In Plan9FS and WorkQueue, we can't make the NNRPs const due to initialization order problems. That's probably doable with further cleanup, but left as an exercise for our future selves. Before starting this, I expected the thread blockers to be a problem, but as it turns out they were super straightforward (for once!) as they don't mutate the thread after initiating a block, so they can just use simple const-ified NNRPs.
This commit is contained in:
parent
a098266ff5
commit
c3915e4058
9 changed files with 26 additions and 30 deletions
|
@ -137,7 +137,7 @@ private:
|
|||
HashMap<u16, NonnullLockRefPtr<ReceiveCompletion>> m_completions;
|
||||
|
||||
Spinlock<LockRank::None> m_thread_lock {};
|
||||
LockRefPtr<Thread> m_thread;
|
||||
RefPtr<Thread> m_thread;
|
||||
Atomic<bool> m_thread_running { false };
|
||||
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_thread_shutdown { false };
|
||||
};
|
||||
|
|
|
@ -101,7 +101,7 @@ private:
|
|||
// the lock is unlocked, it just means we don't know which threads hold it.
|
||||
// When locked exclusively, this is always the one thread that holds the
|
||||
// lock.
|
||||
LockRefPtr<Thread> m_holder;
|
||||
RefPtr<Thread> m_holder;
|
||||
size_t m_shared_holders { 0 };
|
||||
|
||||
struct BlockedThreadLists {
|
||||
|
|
|
@ -329,7 +329,7 @@ Process::Process(NonnullOwnPtr<KString> name, NonnullRefPtr<Credentials> credent
|
|||
}
|
||||
}
|
||||
|
||||
ErrorOr<NonnullLockRefPtr<Thread>> Process::attach_resources(NonnullOwnPtr<Memory::AddressSpace>&& preallocated_space, Process* fork_parent)
|
||||
ErrorOr<NonnullRefPtr<Thread>> Process::attach_resources(NonnullOwnPtr<Memory::AddressSpace>&& preallocated_space, Process* fork_parent)
|
||||
{
|
||||
m_space.with([&](auto& space) {
|
||||
space = move(preallocated_space);
|
||||
|
@ -338,10 +338,10 @@ ErrorOr<NonnullLockRefPtr<Thread>> Process::attach_resources(NonnullOwnPtr<Memor
|
|||
auto create_first_thread = [&] {
|
||||
if (fork_parent) {
|
||||
// NOTE: fork() doesn't clone all threads; the thread that called fork() becomes the only thread in the new process.
|
||||
return Thread::current()->try_clone(*this);
|
||||
return Thread::current()->clone(*this);
|
||||
}
|
||||
// NOTE: This non-forked code path is only taken when the kernel creates a process "manually" (at boot.)
|
||||
return Thread::try_create(*this);
|
||||
return Thread::create(*this);
|
||||
};
|
||||
|
||||
auto first_thread = TRY(create_first_thread());
|
||||
|
@ -906,17 +906,13 @@ ErrorOr<void> Process::send_signal(u8 signal, Process* sender)
|
|||
return ESRCH;
|
||||
}
|
||||
|
||||
LockRefPtr<Thread> Process::create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity, bool joinable)
|
||||
ErrorOr<NonnullRefPtr<Thread>> Process::create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity, bool joinable)
|
||||
{
|
||||
VERIFY((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX));
|
||||
|
||||
// FIXME: Do something with guard pages?
|
||||
|
||||
auto thread_or_error = Thread::try_create(*this);
|
||||
if (thread_or_error.is_error())
|
||||
return {};
|
||||
|
||||
auto thread = thread_or_error.release_value();
|
||||
auto thread = TRY(Thread::create(*this));
|
||||
thread->set_name(move(name));
|
||||
thread->set_affinity(affinity);
|
||||
thread->set_priority(priority);
|
||||
|
|
|
@ -187,7 +187,7 @@ public:
|
|||
|
||||
struct ProcessAndFirstThread {
|
||||
NonnullRefPtr<Process> process;
|
||||
NonnullLockRefPtr<Thread> first_thread;
|
||||
NonnullRefPtr<Thread> first_thread;
|
||||
};
|
||||
|
||||
template<typename EntryFunction>
|
||||
|
@ -203,7 +203,7 @@ public:
|
|||
|
||||
~Process();
|
||||
|
||||
LockRefPtr<Thread> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
|
||||
ErrorOr<NonnullRefPtr<Thread>> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true);
|
||||
|
||||
bool is_profiling() const { return m_profiling; }
|
||||
void set_profiling(bool profiling) { m_profiling = profiling; }
|
||||
|
@ -570,7 +570,7 @@ public:
|
|||
ErrorOr<void> set_coredump_property(NonnullOwnPtr<KString> key, NonnullOwnPtr<KString> value);
|
||||
ErrorOr<void> try_set_coredump_property(StringView key, StringView value);
|
||||
|
||||
Vector<NonnullLockRefPtr<Thread>> const& threads_for_coredump(Badge<Coredump>) const { return m_threads_for_coredump; }
|
||||
Vector<NonnullRefPtr<Thread>> const& threads_for_coredump(Badge<Coredump>) const { return m_threads_for_coredump; }
|
||||
|
||||
PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; }
|
||||
PerformanceEventBuffer const* perf_events() const { return m_perf_event_buffer; }
|
||||
|
@ -600,7 +600,7 @@ private:
|
|||
|
||||
Process(NonnullOwnPtr<KString> name, NonnullRefPtr<Credentials>, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory, RefPtr<Custody> executable, TTY* tty, UnveilNode unveil_tree, UnveilNode exec_unveil_tree);
|
||||
static ErrorOr<ProcessAndFirstThread> create(NonnullOwnPtr<KString> name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
||||
ErrorOr<NonnullLockRefPtr<Thread>> attach_resources(NonnullOwnPtr<Memory::AddressSpace>&&, Process* fork_parent);
|
||||
ErrorOr<NonnullRefPtr<Thread>> attach_resources(NonnullOwnPtr<Memory::AddressSpace>&&, Process* fork_parent);
|
||||
static ProcessID allocate_pid();
|
||||
|
||||
void kill_threads_except_self();
|
||||
|
@ -892,7 +892,7 @@ private:
|
|||
};
|
||||
|
||||
SpinlockProtected<Array<CoredumpProperty, 4>, LockRank::None> m_coredump_properties {};
|
||||
Vector<NonnullLockRefPtr<Thread>> m_threads_for_coredump;
|
||||
Vector<NonnullRefPtr<Thread>> m_threads_for_coredump;
|
||||
|
||||
struct SignalActionData {
|
||||
VirtualAddress handler_or_sigaction;
|
||||
|
|
|
@ -391,7 +391,7 @@ UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu)
|
|||
VERIFY(Processor::is_bootstrap_processor());
|
||||
|
||||
VERIFY(s_colonel_process);
|
||||
Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, MUST(KString::formatted("idle thread #{}", cpu)), 1 << cpu, false);
|
||||
Thread* idle_thread = MUST(s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, MUST(KString::formatted("idle thread #{}", cpu)), 1 << cpu, false));
|
||||
VERIFY(idle_thread);
|
||||
return idle_thread;
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ ErrorOr<FlatPtr> Process::sys$create_thread(void* (*entry)(void*), Userspace<Sys
|
|||
|
||||
// FIXME: Do something with guard pages?
|
||||
|
||||
auto thread = TRY(Thread::try_create(*this));
|
||||
auto thread = TRY(Thread::create(*this));
|
||||
|
||||
// We know this thread is not the main_thread,
|
||||
// So give it a unique name until the user calls $set_thread_name on it
|
||||
|
|
|
@ -39,7 +39,7 @@ SpinlockProtected<Thread::GlobalList, LockRank::None>& Thread::all_instances()
|
|||
return *s_list;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullLockRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
|
||||
ErrorOr<NonnullRefPtr<Thread>> Thread::create(NonnullRefPtr<Process> process)
|
||||
{
|
||||
auto kernel_stack_region = TRY(MM.allocate_kernel_region(default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow));
|
||||
kernel_stack_region->set_stack(true);
|
||||
|
@ -47,7 +47,7 @@ ErrorOr<NonnullLockRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> pro
|
|||
auto block_timer = TRY(try_make_lock_ref_counted<Timer>());
|
||||
|
||||
auto name = TRY(process->name().with([](auto& name) { return name->try_clone(); }));
|
||||
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) Thread(move(process), move(kernel_stack_region), move(block_timer), move(name)));
|
||||
return adopt_nonnull_ref_or_enomem(new (nothrow) Thread(move(process), move(kernel_stack_region), move(block_timer), move(name)));
|
||||
}
|
||||
|
||||
Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Memory::Region> kernel_stack_region, NonnullLockRefPtr<Timer> block_timer, NonnullOwnPtr<KString> name)
|
||||
|
@ -1217,9 +1217,9 @@ RegisterState& Thread::get_register_dump_from_stack()
|
|||
return *trap->regs;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullLockRefPtr<Thread>> Thread::try_clone(Process& process)
|
||||
ErrorOr<NonnullRefPtr<Thread>> Thread::clone(NonnullRefPtr<Process> process)
|
||||
{
|
||||
auto clone = TRY(Thread::try_create(process));
|
||||
auto clone = TRY(Thread::create(move(process)));
|
||||
m_signal_action_masks.span().copy_to(clone->m_signal_action_masks);
|
||||
clone->m_signal_mask = m_signal_mask;
|
||||
clone->m_fpu_state = m_fpu_state;
|
||||
|
@ -1399,9 +1399,9 @@ ErrorOr<void> Thread::make_thread_specific_region(Badge<Process>)
|
|||
});
|
||||
}
|
||||
|
||||
LockRefPtr<Thread> Thread::from_tid(ThreadID tid)
|
||||
RefPtr<Thread> Thread::from_tid(ThreadID tid)
|
||||
{
|
||||
return Thread::all_instances().with([&](auto& list) -> LockRefPtr<Thread> {
|
||||
return Thread::all_instances().with([&](auto& list) -> RefPtr<Thread> {
|
||||
for (Thread& thread : list) {
|
||||
if (thread.tid() == tid)
|
||||
return thread;
|
||||
|
|
|
@ -67,10 +67,10 @@ public:
|
|||
return Processor::current_thread();
|
||||
}
|
||||
|
||||
static ErrorOr<NonnullLockRefPtr<Thread>> try_create(NonnullRefPtr<Process>);
|
||||
static ErrorOr<NonnullRefPtr<Thread>> create(NonnullRefPtr<Process>);
|
||||
~Thread();
|
||||
|
||||
static LockRefPtr<Thread> from_tid(ThreadID);
|
||||
static RefPtr<Thread> from_tid(ThreadID);
|
||||
static void finalize_dying_threads();
|
||||
|
||||
ThreadID tid() const { return m_tid; }
|
||||
|
@ -294,7 +294,7 @@ public:
|
|||
|
||||
private:
|
||||
BlockerSet* m_blocker_set { nullptr };
|
||||
NonnullLockRefPtr<Thread> m_thread;
|
||||
NonnullRefPtr<Thread> const m_thread;
|
||||
u8 m_was_interrupted_by_signal { 0 };
|
||||
bool m_is_blocking { false };
|
||||
bool m_was_interrupted_by_death { false };
|
||||
|
@ -429,7 +429,7 @@ public:
|
|||
bool unblock(void*, bool);
|
||||
|
||||
private:
|
||||
NonnullLockRefPtr<Thread> m_joinee;
|
||||
NonnullRefPtr<Thread> const m_joinee;
|
||||
void*& m_joinee_exit_value;
|
||||
ErrorOr<void>& m_try_join_result;
|
||||
bool m_did_unblock { false };
|
||||
|
@ -961,7 +961,7 @@ public:
|
|||
return !m_is_joinable;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullLockRefPtr<Thread>> try_clone(Process&);
|
||||
ErrorOr<NonnullRefPtr<Thread>> clone(NonnullRefPtr<Process>);
|
||||
|
||||
template<IteratorFunction<Thread&> Callback>
|
||||
static IterationDecision for_each_in_state(State, Callback);
|
||||
|
|
|
@ -61,7 +61,7 @@ private:
|
|||
|
||||
void do_queue(WorkItem&);
|
||||
|
||||
LockRefPtr<Thread> m_thread;
|
||||
RefPtr<Thread> m_thread;
|
||||
WaitQueue m_wait_queue;
|
||||
SpinlockProtected<IntrusiveList<&WorkItem::m_node>, LockRank::None> m_items {};
|
||||
};
|
||||
|
|
Loading…
Add table
Reference in a new issue