2020-01-18 09:38:21 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 01:24:48 -07:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 09:38:21 +01:00
|
|
|
*/
|
|
|
|
|
2018-10-23 23:32:53 +02:00
|
|
|
#pragma once
|
|
|
|
|
2019-03-16 13:18:22 +01:00
|
|
|
#include <AK/Assertions.h>
|
2019-10-12 11:17:34 -06:00
|
|
|
#include <AK/Atomic.h>
|
2020-12-14 16:36:22 -07:00
|
|
|
#include <AK/HashMap.h>
|
2019-12-01 11:57:20 +01:00
|
|
|
#include <AK/Types.h>
|
2021-03-07 21:28:28 +01:00
|
|
|
#include <Kernel/Arch/x86/CPU.h>
|
2020-02-16 01:50:16 +01:00
|
|
|
#include <Kernel/Forward.h>
|
2020-12-14 16:36:22 -07:00
|
|
|
#include <Kernel/LockMode.h>
|
2019-12-01 11:57:20 +01:00
|
|
|
#include <Kernel/WaitQueue.h>
|
2019-01-16 16:03:50 +01:00
|
|
|
|
2020-02-16 01:27:42 +01:00
|
|
|
namespace Kernel {
|
|
|
|
|
2019-01-17 16:25:02 +01:00
|
|
|
class Lock {
|
2020-12-14 16:36:22 -07:00
|
|
|
AK_MAKE_NONCOPYABLE(Lock);
|
|
|
|
AK_MAKE_NONMOVABLE(Lock);
|
|
|
|
|
2018-10-23 23:32:53 +02:00
|
|
|
public:
|
2020-12-14 16:36:22 -07:00
|
|
|
using Mode = LockMode;
|
|
|
|
|
2019-05-28 11:53:16 +02:00
|
|
|
Lock(const char* name = nullptr)
|
|
|
|
: m_name(name)
|
|
|
|
{
|
|
|
|
}
|
2021-02-28 14:42:08 +01:00
|
|
|
~Lock() = default;
|
2018-10-23 23:32:53 +02:00
|
|
|
|
2021-01-23 23:29:11 +01:00
|
|
|
#if LOCK_DEBUG
|
2021-04-24 15:17:02 -07:00
|
|
|
void lock(Mode mode = Mode::Exclusive, const SourceLocation& location = SourceLocation::current());
|
|
|
|
void restore_lock(Mode, u32, const SourceLocation& location = SourceLocation::current());
|
|
|
|
#else
|
|
|
|
void lock(Mode = Mode::Exclusive);
|
|
|
|
void restore_lock(Mode, u32);
|
2020-11-30 19:04:36 -07:00
|
|
|
#endif
|
2021-04-24 15:17:02 -07:00
|
|
|
|
2019-01-16 16:03:50 +01:00
|
|
|
void unlock();
|
2020-12-14 16:36:22 -07:00
|
|
|
[[nodiscard]] Mode force_unlock_if_locked(u32&);
|
2021-02-14 15:24:44 -08:00
|
|
|
[[nodiscard]] bool is_locked() const { return m_mode != Mode::Unlocked; }
|
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 14:28:28 +02:00
|
|
|
void clear_waiters();
|
2018-10-23 23:32:53 +02:00
|
|
|
|
2021-02-14 15:24:44 -08:00
|
|
|
[[nodiscard]] const char* name() const { return m_name; }
|
2019-02-07 11:12:23 +01:00
|
|
|
|
2020-12-14 16:36:22 -07:00
|
|
|
static const char* mode_to_string(Mode mode)
|
|
|
|
{
|
|
|
|
switch (mode) {
|
|
|
|
case Mode::Unlocked:
|
|
|
|
return "unlocked";
|
|
|
|
case Mode::Exclusive:
|
|
|
|
return "exclusive";
|
|
|
|
case Mode::Shared:
|
|
|
|
return "shared";
|
|
|
|
default:
|
|
|
|
return "invalid";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-23 23:32:53 +02:00
|
|
|
private:
|
2019-10-12 11:17:34 -06:00
|
|
|
Atomic<bool> m_lock { false };
|
2019-02-07 11:12:23 +01:00
|
|
|
const char* m_name { nullptr };
|
2019-12-01 11:57:20 +01:00
|
|
|
WaitQueue m_queue;
|
2021-01-03 16:58:50 -07:00
|
|
|
Atomic<Mode, AK::MemoryOrder::memory_order_relaxed> m_mode { Mode::Unlocked };
|
2020-04-18 12:21:00 +03:00
|
|
|
|
|
|
|
// When locked exclusively, only the thread already holding the lock can
|
|
|
|
// lock it again. When locked in shared mode, any thread can do that.
|
|
|
|
u32 m_times_locked { 0 };
|
|
|
|
|
|
|
|
// One of the threads that hold this lock, or nullptr. When locked in shared
|
|
|
|
// mode, this is stored on best effort basis: nullptr value does *not* mean
|
|
|
|
// the lock is unlocked, it just means we don't know which threads hold it.
|
|
|
|
// When locked exclusively, this is always the one thread that holds the
|
|
|
|
// lock.
|
2020-11-29 17:09:14 -07:00
|
|
|
RefPtr<Thread> m_holder;
|
2020-12-14 16:36:22 -07:00
|
|
|
HashMap<Thread*, u32> m_shared_holders;
|
2018-10-23 23:32:53 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
class Locker {
|
|
|
|
public:
|
2021-01-23 23:29:11 +01:00
|
|
|
#if LOCK_DEBUG
|
2021-04-24 15:17:02 -07:00
|
|
|
ALWAYS_INLINE explicit Locker(Lock& l, Lock::Mode mode = Lock::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
|
|
|
|
#else
|
2020-04-30 11:43:25 +02:00
|
|
|
ALWAYS_INLINE explicit Locker(Lock& l, Lock::Mode mode = Lock::Mode::Exclusive)
|
2021-04-24 15:17:02 -07:00
|
|
|
#endif
|
2019-05-28 11:53:16 +02:00
|
|
|
: m_lock(l)
|
|
|
|
{
|
2021-04-24 15:17:02 -07:00
|
|
|
#if LOCK_DEBUG
|
|
|
|
m_lock.lock(mode, location);
|
|
|
|
#else
|
2020-04-18 12:21:00 +03:00
|
|
|
m_lock.lock(mode);
|
2021-04-24 15:17:02 -07:00
|
|
|
#endif
|
2019-05-28 11:53:16 +02:00
|
|
|
}
|
2021-04-24 15:17:02 -07:00
|
|
|
|
2021-01-14 19:14:10 -07:00
|
|
|
ALWAYS_INLINE ~Locker()
|
|
|
|
{
|
|
|
|
if (m_locked)
|
|
|
|
unlock();
|
|
|
|
}
|
|
|
|
ALWAYS_INLINE void unlock()
|
|
|
|
{
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY(m_locked);
|
2021-01-14 19:14:10 -07:00
|
|
|
m_locked = false;
|
|
|
|
m_lock.unlock();
|
|
|
|
}
|
2021-04-24 15:17:02 -07:00
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
ALWAYS_INLINE void lock(Lock::Mode mode = Lock::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
|
|
|
|
#else
|
2021-01-14 19:14:10 -07:00
|
|
|
ALWAYS_INLINE void lock(Lock::Mode mode = Lock::Mode::Exclusive)
|
2021-04-24 15:17:02 -07:00
|
|
|
#endif
|
2021-01-14 19:14:10 -07:00
|
|
|
{
|
2021-02-23 20:42:32 +01:00
|
|
|
VERIFY(!m_locked);
|
2021-01-14 19:14:10 -07:00
|
|
|
m_locked = true;
|
2021-04-24 15:17:02 -07:00
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
m_lock.lock(mode, location);
|
|
|
|
#else
|
2021-01-14 19:14:10 -07:00
|
|
|
m_lock.lock(mode);
|
2021-04-24 15:17:02 -07:00
|
|
|
#endif
|
2021-01-14 19:14:10 -07:00
|
|
|
}
|
2018-10-23 23:32:53 +02:00
|
|
|
|
2021-02-06 11:33:31 -07:00
|
|
|
Lock& get_lock() { return m_lock; }
|
|
|
|
const Lock& get_lock() const { return m_lock; }
|
|
|
|
|
2018-10-23 23:32:53 +02:00
|
|
|
private:
|
2019-01-17 16:25:02 +01:00
|
|
|
Lock& m_lock;
|
2021-01-14 19:14:10 -07:00
|
|
|
bool m_locked { true };
|
2018-10-23 23:32:53 +02:00
|
|
|
};
|
|
|
|
|
2019-02-08 09:46:13 +01:00
|
|
|
template<typename T>
|
|
|
|
class Lockable {
|
|
|
|
public:
|
2021-02-28 14:42:08 +01:00
|
|
|
Lockable() = default;
|
2019-05-28 11:53:16 +02:00
|
|
|
Lockable(T&& resource)
|
|
|
|
: m_resource(move(resource))
|
|
|
|
{
|
|
|
|
}
|
2021-02-14 15:24:44 -08:00
|
|
|
[[nodiscard]] Lock& lock() { return m_lock; }
|
|
|
|
[[nodiscard]] T& resource() { return m_resource; }
|
2019-02-08 09:46:13 +01:00
|
|
|
|
2021-02-14 15:24:44 -08:00
|
|
|
[[nodiscard]] T lock_and_copy()
|
2019-02-08 16:18:24 +01:00
|
|
|
{
|
2021-04-24 15:27:32 -07:00
|
|
|
Locker locker(m_lock);
|
2019-02-08 16:18:24 +01:00
|
|
|
return m_resource;
|
|
|
|
}
|
|
|
|
|
2019-02-08 09:46:13 +01:00
|
|
|
private:
|
|
|
|
T m_resource;
|
|
|
|
Lock m_lock;
|
|
|
|
};
|
2020-02-16 01:27:42 +01:00
|
|
|
|
2021-02-06 11:33:31 -07:00
|
|
|
class ScopedLockRelease {
|
|
|
|
AK_MAKE_NONCOPYABLE(ScopedLockRelease);
|
|
|
|
|
|
|
|
public:
|
|
|
|
ScopedLockRelease& operator=(ScopedLockRelease&&) = delete;
|
|
|
|
|
|
|
|
ScopedLockRelease(Lock& lock)
|
|
|
|
: m_lock(&lock)
|
|
|
|
, m_previous_mode(lock.force_unlock_if_locked(m_previous_recursions))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ScopedLockRelease(ScopedLockRelease&& from)
|
|
|
|
: m_lock(exchange(from.m_lock, nullptr))
|
|
|
|
, m_previous_mode(exchange(from.m_previous_mode, Lock::Mode::Unlocked))
|
|
|
|
, m_previous_recursions(exchange(from.m_previous_recursions, 0))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
~ScopedLockRelease()
|
|
|
|
{
|
|
|
|
if (m_lock && m_previous_mode != Lock::Mode::Unlocked)
|
|
|
|
m_lock->restore_lock(m_previous_mode, m_previous_recursions);
|
|
|
|
}
|
|
|
|
|
|
|
|
void restore_lock()
|
|
|
|
{
|
|
|
|
VERIFY(m_lock);
|
|
|
|
if (m_previous_mode != Lock::Mode::Unlocked) {
|
|
|
|
m_lock->restore_lock(m_previous_mode, m_previous_recursions);
|
|
|
|
m_previous_mode = Lock::Mode::Unlocked;
|
|
|
|
m_previous_recursions = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void do_not_restore()
|
|
|
|
{
|
|
|
|
VERIFY(m_lock);
|
|
|
|
m_previous_mode = Lock::Mode::Unlocked;
|
|
|
|
m_previous_recursions = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Lock* m_lock;
|
|
|
|
Lock::Mode m_previous_mode;
|
|
|
|
u32 m_previous_recursions;
|
|
|
|
};
|
|
|
|
|
2020-02-16 01:27:42 +01:00
|
|
|
}
|