2020-01-18 03:38:21 -05:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 04:24:48 -04:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 03:38:21 -05:00
|
|
|
*/
|
|
|
|
|
2020-07-03 07:19:50 -04:00
|
|
|
#include <AK/TemporaryChange.h>
|
2021-01-25 10:07:10 -05:00
|
|
|
#include <Kernel/Debug.h>
|
2020-02-15 19:27:42 -05:00
|
|
|
#include <Kernel/KSyms.h>
|
2019-07-29 06:00:14 -04:00
|
|
|
#include <Kernel/Lock.h>
|
2020-12-14 18:36:22 -05:00
|
|
|
#include <Kernel/Thread.h>
|
|
|
|
|
2020-02-15 19:27:42 -05:00
|
|
|
namespace Kernel {
|
|
|
|
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2020-11-30 21:04:36 -05:00
|
|
|
void Lock::lock(Mode mode)
|
2019-07-29 06:00:14 -04:00
|
|
|
{
|
2020-11-30 21:04:36 -05:00
|
|
|
lock("unknown", 0, mode);
|
2020-04-18 05:21:00 -04:00
|
|
|
}
|
|
|
|
|
2020-11-30 21:04:36 -05:00
|
|
|
void Lock::lock(const char* file, int line, Mode mode)
|
|
|
|
#else
|
2020-04-18 05:21:00 -04:00
|
|
|
void Lock::lock(Mode mode)
|
2020-11-30 21:04:36 -05:00
|
|
|
#endif
|
2020-04-18 05:21:00 -04:00
|
|
|
{
|
2020-11-30 21:04:36 -05:00
|
|
|
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
|
|
|
// and also from within critical sections!
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!Processor::current().in_irq());
|
|
|
|
VERIFY(mode != Mode::Unlocked);
|
2020-06-28 17:34:31 -04:00
|
|
|
auto current_thread = Thread::current();
|
2020-12-14 18:36:22 -05:00
|
|
|
ScopedCritical critical; // in case we're not in a critical section already
|
2019-07-29 06:00:14 -04:00
|
|
|
for (;;) {
|
2021-01-23 12:43:52 -05:00
|
|
|
if (m_lock.exchange(true, AK::memory_order_acq_rel) != false) {
|
|
|
|
// I don't know *who* is using "m_lock", so just yield.
|
|
|
|
Scheduler::yield_from_critical();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Do not add new readers if writers are queued.
|
|
|
|
Mode current_mode = m_mode;
|
|
|
|
switch (current_mode) {
|
|
|
|
case Mode::Unlocked: {
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
|
2021-01-23 12:43:52 -05:00
|
|
|
m_mode = mode;
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!m_holder);
|
|
|
|
VERIFY(m_shared_holders.is_empty());
|
2021-01-23 12:43:52 -05:00
|
|
|
if (mode == Mode::Exclusive) {
|
|
|
|
m_holder = current_thread;
|
|
|
|
} else {
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(mode == Mode::Shared);
|
2021-01-23 12:43:52 -05:00
|
|
|
m_shared_holders.set(current_thread, 1);
|
|
|
|
}
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_times_locked == 0);
|
2021-01-23 12:43:52 -05:00
|
|
|
m_times_locked++;
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2021-04-24 17:16:31 -04:00
|
|
|
if (current_thread) {
|
|
|
|
current_thread->holding_lock(*this, 1, file, line);
|
|
|
|
}
|
2020-12-14 18:36:22 -05:00
|
|
|
#endif
|
2021-01-24 00:30:10 -05:00
|
|
|
m_queue.should_block(true);
|
2021-01-23 12:43:52 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Mode::Exclusive: {
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_holder);
|
2021-01-23 12:43:52 -05:00
|
|
|
if (m_holder != current_thread)
|
|
|
|
break;
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_shared_holders.is_empty());
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-01-23 12:43:52 -05:00
|
|
|
if constexpr (LOCK_TRACE_DEBUG) {
|
|
|
|
if (mode == Mode::Exclusive)
|
2021-01-24 00:30:10 -05:00
|
|
|
dbgln("Lock::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
|
2021-01-23 12:43:52 -05:00
|
|
|
else
|
2021-01-24 00:30:10 -05:00
|
|
|
dbgln("Lock::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
|
2021-01-23 12:43:52 -05:00
|
|
|
}
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
|
|
|
|
VERIFY(m_times_locked > 0);
|
2021-01-23 12:43:52 -05:00
|
|
|
m_times_locked++;
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2021-01-23 12:43:52 -05:00
|
|
|
current_thread->holding_lock(*this, 1, file, line);
|
2020-12-14 18:36:22 -05:00
|
|
|
#endif
|
2021-01-23 12:43:52 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Mode::Shared: {
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!m_holder);
|
2021-01-23 12:43:52 -05:00
|
|
|
if (mode != Mode::Shared)
|
|
|
|
break;
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_times_locked > 0);
|
2021-01-23 12:43:52 -05:00
|
|
|
m_times_locked++;
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!m_shared_holders.is_empty());
|
2021-01-23 12:43:52 -05:00
|
|
|
auto it = m_shared_holders.find(current_thread);
|
|
|
|
if (it != m_shared_holders.end())
|
|
|
|
it->value++;
|
|
|
|
else
|
|
|
|
m_shared_holders.set(current_thread, 1);
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2021-01-23 12:43:52 -05:00
|
|
|
current_thread->holding_lock(*this, 1, file, line);
|
2020-12-14 18:36:22 -05:00
|
|
|
#endif
|
2021-01-23 12:43:52 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
default:
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY_NOT_REACHED();
|
2019-07-29 06:00:14 -04:00
|
|
|
}
|
2021-01-23 12:43:52 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waiting...", this, m_name);
|
2021-02-14 18:02:14 -05:00
|
|
|
m_queue.wait_forever(m_name);
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waited", this, m_name);
|
2019-07-29 06:00:14 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Lock::unlock()
|
|
|
|
{
|
2020-11-30 21:04:36 -05:00
|
|
|
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
|
|
|
// and also from within critical sections!
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!Processor::current().in_irq());
|
2020-06-28 17:34:31 -04:00
|
|
|
auto current_thread = Thread::current();
|
2020-12-14 18:36:22 -05:00
|
|
|
ScopedCritical critical; // in case we're not in a critical section already
|
2019-07-29 06:00:14 -04:00
|
|
|
for (;;) {
|
2020-10-28 22:03:15 -04:00
|
|
|
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
2021-01-03 18:58:50 -05:00
|
|
|
Mode current_mode = m_mode;
|
2021-01-23 17:59:27 -05:00
|
|
|
if constexpr (LOCK_TRACE_DEBUG) {
|
2021-01-18 11:25:44 -05:00
|
|
|
if (current_mode == Mode::Shared)
|
2021-01-24 00:30:10 -05:00
|
|
|
dbgln("Lock::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
|
2021-01-18 11:25:44 -05:00
|
|
|
else
|
2021-01-24 00:30:10 -05:00
|
|
|
dbgln("Lock::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
|
2021-01-18 11:25:44 -05:00
|
|
|
}
|
|
|
|
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(current_mode != Mode::Unlocked);
|
2020-04-18 05:21:00 -04:00
|
|
|
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_times_locked > 0);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_times_locked--;
|
2020-11-30 21:04:36 -05:00
|
|
|
|
2020-12-14 18:36:22 -05:00
|
|
|
switch (current_mode) {
|
|
|
|
case Mode::Exclusive:
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_holder == current_thread);
|
|
|
|
VERIFY(m_shared_holders.is_empty());
|
2020-11-30 21:04:36 -05:00
|
|
|
if (m_times_locked == 0)
|
|
|
|
m_holder = nullptr;
|
2020-12-14 18:36:22 -05:00
|
|
|
break;
|
|
|
|
case Mode::Shared: {
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!m_holder);
|
2020-12-14 18:36:22 -05:00
|
|
|
auto it = m_shared_holders.find(current_thread);
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(it != m_shared_holders.end());
|
2020-12-14 18:36:22 -05:00
|
|
|
if (it->value > 1) {
|
|
|
|
it->value--;
|
|
|
|
} else {
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(it->value > 0);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_shared_holders.remove(it);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-12-14 18:36:22 -05:00
|
|
|
}
|
|
|
|
|
2021-01-24 00:30:10 -05:00
|
|
|
bool unlocked_last = (m_times_locked == 0);
|
|
|
|
if (unlocked_last) {
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
|
2021-01-03 18:58:50 -05:00
|
|
|
m_mode = Mode::Unlocked;
|
2021-01-24 00:30:10 -05:00
|
|
|
m_queue.should_block(false);
|
2020-11-30 21:04:36 -05:00
|
|
|
}
|
2020-12-14 18:36:22 -05:00
|
|
|
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2021-04-24 17:16:31 -04:00
|
|
|
if (current_thread) {
|
|
|
|
current_thread->holding_lock(*this, -1);
|
|
|
|
}
|
2020-11-30 21:04:36 -05:00
|
|
|
#endif
|
2020-04-18 05:21:00 -04:00
|
|
|
|
2020-12-07 23:29:41 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
2021-01-24 00:30:10 -05:00
|
|
|
if (unlocked_last) {
|
|
|
|
u32 did_wake = m_queue.wake_one();
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_TRACE_DEBUG, "Lock::unlock @ {} ({}) wake one ({})", this, m_name, did_wake);
|
2021-01-24 00:30:10 -05:00
|
|
|
}
|
2019-07-29 06:00:14 -04:00
|
|
|
return;
|
|
|
|
}
|
2019-12-01 05:57:20 -05:00
|
|
|
// I don't know *who* is using "m_lock", so just yield.
|
2020-11-30 21:04:36 -05:00
|
|
|
Scheduler::yield_from_critical();
|
2019-07-29 06:00:14 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-14 18:36:22 -05:00
|
|
|
auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
|
2019-07-29 06:00:14 -04:00
|
|
|
{
|
2020-11-30 21:04:36 -05:00
|
|
|
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
|
|
|
|
// and also from within critical sections!
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!Processor::current().in_irq());
|
2020-12-14 18:36:22 -05:00
|
|
|
auto current_thread = Thread::current();
|
|
|
|
ScopedCritical critical; // in case we're not in a critical section already
|
2020-11-30 21:04:36 -05:00
|
|
|
for (;;) {
|
|
|
|
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
2020-12-14 18:36:22 -05:00
|
|
|
Mode previous_mode;
|
|
|
|
auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
|
|
|
|
switch (current_mode) {
|
|
|
|
case Mode::Exclusive: {
|
|
|
|
if (m_holder != current_thread) {
|
|
|
|
m_lock.store(false, AK::MemoryOrder::memory_order_release);
|
|
|
|
lock_count_to_restore = 0;
|
|
|
|
return Mode::Unlocked;
|
|
|
|
}
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
|
2021-01-24 00:30:10 -05:00
|
|
|
#if LOCK_DEBUG
|
2021-04-24 17:16:31 -04:00
|
|
|
m_holder->holding_lock(*this, -(int)m_times_locked);
|
2021-01-24 00:30:10 -05:00
|
|
|
#endif
|
2020-12-14 18:36:22 -05:00
|
|
|
m_holder = nullptr;
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_times_locked > 0);
|
2020-12-14 18:36:22 -05:00
|
|
|
lock_count_to_restore = m_times_locked;
|
|
|
|
m_times_locked = 0;
|
2021-01-03 18:58:50 -05:00
|
|
|
m_mode = Mode::Unlocked;
|
2021-01-24 00:30:10 -05:00
|
|
|
m_queue.should_block(false);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
|
|
|
previous_mode = Mode::Exclusive;
|
|
|
|
break;
|
2020-11-30 21:04:36 -05:00
|
|
|
}
|
2020-12-14 18:36:22 -05:00
|
|
|
case Mode::Shared: {
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!m_holder);
|
2020-12-14 18:36:22 -05:00
|
|
|
auto it = m_shared_holders.find(current_thread);
|
|
|
|
if (it == m_shared_holders.end()) {
|
|
|
|
m_lock.store(false, AK::MemoryOrder::memory_order_release);
|
|
|
|
lock_count_to_restore = 0;
|
|
|
|
return Mode::Unlocked;
|
|
|
|
}
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
|
2021-01-18 11:25:44 -05:00
|
|
|
this, it->value, m_times_locked);
|
|
|
|
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(it->value > 0);
|
2020-12-14 18:36:22 -05:00
|
|
|
lock_count_to_restore = it->value;
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(lock_count_to_restore > 0);
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2020-12-14 18:36:22 -05:00
|
|
|
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
|
2020-11-30 21:04:36 -05:00
|
|
|
#endif
|
2020-12-14 18:36:22 -05:00
|
|
|
m_shared_holders.remove(it);
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_times_locked >= lock_count_to_restore);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_times_locked -= lock_count_to_restore;
|
2021-01-24 00:30:10 -05:00
|
|
|
if (m_times_locked == 0) {
|
2021-01-03 18:58:50 -05:00
|
|
|
m_mode = Mode::Unlocked;
|
2021-01-24 00:30:10 -05:00
|
|
|
m_queue.should_block(false);
|
|
|
|
}
|
2020-12-14 18:36:22 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
|
|
|
previous_mode = Mode::Shared;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Mode::Unlocked: {
|
|
|
|
m_lock.store(false, AK::memory_order_relaxed);
|
|
|
|
lock_count_to_restore = 0;
|
|
|
|
previous_mode = Mode::Unlocked;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-12-14 18:36:22 -05:00
|
|
|
}
|
2020-12-07 23:29:41 -05:00
|
|
|
m_queue.wake_one();
|
2020-12-14 18:36:22 -05:00
|
|
|
return previous_mode;
|
|
|
|
}
|
|
|
|
// I don't know *who* is using "m_lock", so just yield.
|
|
|
|
Scheduler::yield_from_critical();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2020-12-14 18:36:22 -05:00
|
|
|
void Lock::restore_lock(Mode mode, u32 lock_count)
|
|
|
|
{
|
|
|
|
return restore_lock("unknown", 0, mode, lock_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Lock::restore_lock(const char* file, int line, Mode mode, u32 lock_count)
|
|
|
|
#else
|
|
|
|
void Lock::restore_lock(Mode mode, u32 lock_count)
|
|
|
|
#endif
|
|
|
|
{
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(mode != Mode::Unlocked);
|
|
|
|
VERIFY(lock_count > 0);
|
|
|
|
VERIFY(!Processor::current().in_irq());
|
2020-12-14 18:36:22 -05:00
|
|
|
auto current_thread = Thread::current();
|
|
|
|
ScopedCritical critical; // in case we're not in a critical section already
|
|
|
|
for (;;) {
|
|
|
|
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
|
|
|
|
switch (mode) {
|
|
|
|
case Mode::Exclusive: {
|
|
|
|
auto expected_mode = Mode::Unlocked;
|
2021-01-03 18:58:50 -05:00
|
|
|
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive))
|
2020-12-14 18:36:22 -05:00
|
|
|
break;
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was unlocked", this, mode_to_string(mode), lock_count);
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_times_locked == 0);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_times_locked = lock_count;
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!m_holder);
|
|
|
|
VERIFY(m_shared_holders.is_empty());
|
2020-12-14 18:36:22 -05:00
|
|
|
m_holder = current_thread;
|
2021-01-24 00:30:10 -05:00
|
|
|
m_queue.should_block(true);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2020-12-14 18:36:22 -05:00
|
|
|
m_holder->holding_lock(*this, (int)lock_count, file, line);
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Mode::Shared: {
|
|
|
|
auto expected_mode = Mode::Unlocked;
|
2021-01-03 18:58:50 -05:00
|
|
|
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared) && expected_mode != Mode::Shared)
|
2020-12-14 18:36:22 -05:00
|
|
|
break;
|
2021-01-18 11:25:44 -05:00
|
|
|
|
2021-02-07 07:03:24 -05:00
|
|
|
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was {}",
|
2021-01-18 11:25:44 -05:00
|
|
|
this, mode_to_string(mode), lock_count, mode_to_string(expected_mode));
|
|
|
|
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(expected_mode == Mode::Shared || m_times_locked == 0);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_times_locked += lock_count;
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(!m_holder);
|
|
|
|
VERIFY((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
|
2020-12-14 18:36:22 -05:00
|
|
|
auto set_result = m_shared_holders.set(current_thread, lock_count);
|
|
|
|
// There may be other shared lock holders already, but we should not have an entry yet
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
|
2021-01-24 00:30:10 -05:00
|
|
|
m_queue.should_block(true);
|
2020-12-14 18:36:22 -05:00
|
|
|
m_lock.store(false, AK::memory_order_release);
|
2021-01-23 17:29:11 -05:00
|
|
|
#if LOCK_DEBUG
|
2020-12-14 18:36:22 -05:00
|
|
|
m_holder->holding_lock(*this, (int)lock_count, file, line);
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
default:
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-12-14 18:36:22 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
m_lock.store(false, AK::memory_order_relaxed);
|
2020-11-30 21:04:36 -05:00
|
|
|
}
|
|
|
|
// I don't know *who* is using "m_lock", so just yield.
|
|
|
|
Scheduler::yield_from_critical();
|
|
|
|
}
|
2019-07-29 06:00:14 -04:00
|
|
|
}
|
2020-02-15 19:27:42 -05:00
|
|
|
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 07:28:28 -05:00
|
|
|
void Lock::clear_waiters()
|
|
|
|
{
|
2021-02-23 14:42:32 -05:00
|
|
|
VERIFY(m_mode != Mode::Shared);
|
2020-12-07 23:29:41 -05:00
|
|
|
m_queue.wake_all();
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 07:28:28 -05:00
|
|
|
}
|
|
|
|
|
2020-02-15 19:27:42 -05:00
|
|
|
}
|