mirror of
https://github.com/SerenityOS/serenity.git
synced 2025-01-22 17:31:58 -05:00
Kernel: Some futex improvements
This adds support for FUTEX_WAKE_OP, FUTEX_WAIT_BITSET, FUTEX_WAKE_BITSET, FUTEX_REQUEUE, and FUTEX_CMP_REQUEUE, as well well as global and private futex and absolute/relative timeouts against the appropriate clock. This also changes the implementation so that kernel resources are only used when a thread is blocked on a futex. Global futexes are implemented as offsets in VMObjects, so that different processes can share a futex against the same VMObject despite potentially being mapped at different virtual addresses.
This commit is contained in:
parent
7581b64705
commit
1d621ab172
23 changed files with 928 additions and 63 deletions
|
@ -310,10 +310,15 @@ struct SC_getpeername_params {
|
|||
};
|
||||
|
||||
struct SC_futex_params {
|
||||
const i32* userspace_address;
|
||||
u32* userspace_address;
|
||||
int futex_op;
|
||||
i32 val;
|
||||
const timespec* timeout;
|
||||
u32 val;
|
||||
union {
|
||||
const timespec* timeout;
|
||||
u32 val2;
|
||||
};
|
||||
u32* userspace_address2;
|
||||
u32 val3;
|
||||
};
|
||||
|
||||
struct SC_setkeymap_params {
|
||||
|
|
|
@ -66,6 +66,7 @@ set(KERNEL_SOURCES
|
|||
FileSystem/ProcFS.cpp
|
||||
FileSystem/TmpFS.cpp
|
||||
FileSystem/VirtualFileSystem.cpp
|
||||
FutexQueue.cpp
|
||||
Interrupts/APIC.cpp
|
||||
Interrupts/GenericInterruptHandler.cpp
|
||||
Interrupts/IOAPIC.cpp
|
||||
|
|
|
@ -37,6 +37,7 @@ class DiskCache;
|
|||
class DoubleBuffer;
|
||||
class File;
|
||||
class FileDescription;
|
||||
class FutexQueue;
|
||||
class IPv4Socket;
|
||||
class Inode;
|
||||
class InodeIdentifier;
|
||||
|
|
159
Kernel/FutexQueue.cpp
Normal file
159
Kernel/FutexQueue.cpp
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright (c) 2020, The SerenityOS developers.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <Kernel/FutexQueue.h>
|
||||
#include <Kernel/Thread.h>
|
||||
|
||||
//#define FUTEXQUEUE_DEBUG
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
|
||||
{
|
||||
ASSERT(data != nullptr); // Thread that is requesting to be blocked
|
||||
ASSERT(m_lock.is_locked());
|
||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": should block thread " << *static_cast<Thread*>(data);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target)
|
||||
{
|
||||
is_empty_target = false;
|
||||
ScopedSpinLock lock(m_lock);
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_n_requeue(" << wake_count << ", " << requeue_count << ")";
|
||||
#endif
|
||||
u32 did_wake = 0, did_requeue = 0;
|
||||
do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
|
||||
ASSERT(data);
|
||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_n_requeue unblocking " << *static_cast<Thread*>(data);
|
||||
#endif
|
||||
ASSERT(did_wake < wake_count);
|
||||
if (blocker.unblock()) {
|
||||
if (++did_wake >= wake_count)
|
||||
stop_iterating = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
is_empty = is_empty_locked();
|
||||
if (requeue_count > 0) {
|
||||
auto blockers_to_requeue = do_take_blockers(requeue_count);
|
||||
if (!blockers_to_requeue.is_empty()) {
|
||||
if (auto* target_futex_queue = get_target_queue()) {
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_n_requeue requeueing " << blockers_to_requeue.size() << " blockers to " << target_futex_queue;
|
||||
#endif
|
||||
// While still holding m_lock, notify each blocker
|
||||
for (auto& info : blockers_to_requeue) {
|
||||
ASSERT(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
|
||||
blocker.begin_requeue();
|
||||
}
|
||||
|
||||
lock.unlock();
|
||||
did_requeue = blockers_to_requeue.size();
|
||||
|
||||
ScopedSpinLock target_lock(target_futex_queue->m_lock);
|
||||
// Now that we have the lock of the target, append the blockers
|
||||
// and notify them that they completed the move
|
||||
for (auto& info : blockers_to_requeue) {
|
||||
ASSERT(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
|
||||
blocker.finish_requeue(*target_futex_queue);
|
||||
}
|
||||
target_futex_queue->do_append_blockers(move(blockers_to_requeue));
|
||||
is_empty_target = target_futex_queue->is_empty_locked();
|
||||
} else {
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_n_requeue could not get target queue to requeueing " << blockers_to_requeue.size() << " blockers";
|
||||
#endif
|
||||
do_append_blockers(move(blockers_to_requeue));
|
||||
}
|
||||
}
|
||||
}
|
||||
return did_wake + did_requeue;
|
||||
}
|
||||
|
||||
u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_empty)
|
||||
{
|
||||
if (wake_count == 0)
|
||||
return 0; // should we assert instaed?
|
||||
ScopedSpinLock lock(m_lock);
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_n(" << wake_count << ")";
|
||||
#endif
|
||||
u32 did_wake = 0;
|
||||
do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
|
||||
ASSERT(data);
|
||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_n unblocking " << *static_cast<Thread*>(data);
|
||||
#endif
|
||||
ASSERT(did_wake < wake_count);
|
||||
if (bitset.has_value() ? blocker.unblock_bitset(bitset.value()) : blocker.unblock()) {
|
||||
if (++did_wake >= wake_count)
|
||||
stop_iterating = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
is_empty = is_empty_locked();
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
u32 FutexQueue::wake_all(bool& is_empty)
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_all";
|
||||
#endif
|
||||
u32 did_wake = 0;
|
||||
do_unblock([&](Thread::Blocker& b, void* data, bool&) {
|
||||
ASSERT(data);
|
||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
|
||||
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
|
||||
#ifdef FUTEXQUEUE_DEBUG
|
||||
dbg() << "FutexQueue @ " << this << ": wake_all unblocking " << *static_cast<Thread*>(data);
|
||||
#endif
|
||||
if (blocker.unblock(true)) {
|
||||
did_wake++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
is_empty = is_empty_locked();
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
}
|
67
Kernel/FutexQueue.h
Normal file
67
Kernel/FutexQueue.h
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright (c) 2020, The SerenityOS developers.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Atomic.h>
|
||||
#include <AK/RefCounted.h>
|
||||
#include <Kernel/SpinLock.h>
|
||||
#include <Kernel/Thread.h>
|
||||
#include <Kernel/VM/VMObject.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class FutexQueue : public Thread::BlockCondition
|
||||
, public RefCounted<FutexQueue>
|
||||
, public VMObjectDeletedHandler {
|
||||
public:
|
||||
FutexQueue(FlatPtr user_address_or_offset, VMObject* vmobject = nullptr);
|
||||
virtual ~FutexQueue();
|
||||
|
||||
u32 wake_n_requeue(u32, const Function<FutexQueue*()>&, u32, bool&, bool&);
|
||||
u32 wake_n(u32, const Optional<u32>&, bool&);
|
||||
u32 wake_all(bool&);
|
||||
|
||||
template<class... Args>
|
||||
Thread::BlockResult wait_on(const Thread::BlockTimeout& timeout, Args&&... args)
|
||||
{
|
||||
return Thread::current()->block<Thread::FutexBlocker>(timeout, *this, forward<Args>(args)...);
|
||||
}
|
||||
|
||||
virtual void vmobject_deleted(VMObject&) override;
|
||||
|
||||
protected:
|
||||
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override;
|
||||
|
||||
private:
|
||||
// For private futexes we just use the user space address.
|
||||
// But for global futexes we use the offset into the VMObject
|
||||
const FlatPtr m_user_address_or_offset;
|
||||
WeakPtr<VMObject> m_vmobject;
|
||||
const bool m_is_global;
|
||||
};
|
||||
|
||||
}
|
|
@ -38,6 +38,7 @@
|
|||
#include <Kernel/API/Syscall.h>
|
||||
#include <Kernel/FileSystem/InodeMetadata.h>
|
||||
#include <Kernel/Forward.h>
|
||||
#include <Kernel/FutexQueue.h>
|
||||
#include <Kernel/Lock.h>
|
||||
#include <Kernel/ProcessGroup.h>
|
||||
#include <Kernel/StdLib.h>
|
||||
|
@ -94,6 +95,8 @@ enum class VeilState {
|
|||
Locked,
|
||||
};
|
||||
|
||||
typedef HashMap<FlatPtr, RefPtr<FutexQueue>> FutexQueues;
|
||||
|
||||
class Process
|
||||
: public RefCounted<Process>
|
||||
, public InlineLinkedListNode<Process>
|
||||
|
@ -542,6 +545,8 @@ private:
|
|||
|
||||
bool has_tracee_thread(ProcessID tracer_pid);
|
||||
|
||||
void clear_futex_queues_on_exec();
|
||||
|
||||
RefPtr<PageDirectory> m_page_directory;
|
||||
|
||||
Process* m_prev { nullptr };
|
||||
|
@ -637,11 +642,11 @@ private:
|
|||
VeilState m_veil_state { VeilState::None };
|
||||
UnveilNode m_unveiled_paths { "/", { .full_path = "/", .unveil_inherited_from_root = true } };
|
||||
|
||||
WaitQueue& futex_queue(Userspace<const i32*>);
|
||||
HashMap<u32, OwnPtr<WaitQueue>> m_futex_queues;
|
||||
|
||||
OwnPtr<PerformanceEventBuffer> m_perf_event_buffer;
|
||||
|
||||
FutexQueues m_futex_queues;
|
||||
SpinLock<u8> m_futex_lock;
|
||||
|
||||
// This member is used in the implementation of ptrace's PT_TRACEME flag.
|
||||
// If it is set to true, the process will stop at the next execve syscall
|
||||
// and wait for a tracer to attach.
|
||||
|
|
|
@ -530,7 +530,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
|
|||
current_thread->set_default_signal_dispositions();
|
||||
current_thread->clear_signals();
|
||||
|
||||
m_futex_queues.clear();
|
||||
clear_futex_queues_on_exec();
|
||||
|
||||
m_region_lookup_cache = {};
|
||||
|
||||
|
|
|
@ -24,17 +24,83 @@
|
|||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <AK/Singleton.h>
|
||||
#include <AK/Time.h>
|
||||
#include <Kernel/Process.h>
|
||||
#include <Kernel/VM/MemoryManager.h>
|
||||
|
||||
//#define FUTEX_DEBUG
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
WaitQueue& Process::futex_queue(Userspace<const i32*> userspace_address)
|
||||
static SpinLock<u8> g_global_futex_lock;
|
||||
static AK::Singleton<HashMap<VMObject*, FutexQueues>> g_global_futex_queues;
|
||||
|
||||
FutexQueue::FutexQueue(FlatPtr user_address_or_offset, VMObject* vmobject)
|
||||
: m_user_address_or_offset(user_address_or_offset)
|
||||
, m_is_global(vmobject != nullptr)
|
||||
{
|
||||
auto& queue = m_futex_queues.ensure(userspace_address.ptr());
|
||||
if (!queue)
|
||||
queue = make<WaitQueue>();
|
||||
return *queue;
|
||||
#ifdef FUTEX_DEBUG
|
||||
dbg() << "Futex @ " << this << (m_is_global ? " (global)" : "(local)");
|
||||
#endif
|
||||
if (m_is_global) {
|
||||
// Only register for global futexes
|
||||
m_vmobject = vmobject->make_weak_ptr();
|
||||
vmobject->register_on_deleted_handler(*this);
|
||||
}
|
||||
}
|
||||
|
||||
FutexQueue::~FutexQueue()
|
||||
{
|
||||
if (m_is_global) {
|
||||
if (auto vmobject = m_vmobject.strong_ref())
|
||||
vmobject->unregister_on_deleted_handler(*this);
|
||||
}
|
||||
#ifdef FUTEX_DEBUG
|
||||
dbg() << "~Futex @ " << this << (m_is_global ? " (global)" : "(local)");
|
||||
#endif
|
||||
}
|
||||
|
||||
void FutexQueue::vmobject_deleted(VMObject& vmobject)
|
||||
{
|
||||
ASSERT(m_is_global); // If we got called we must be a global futex
|
||||
// Because we're taking ourselves out of the global queue, we need
|
||||
// to make sure we have at last a reference until we're done
|
||||
NonnullRefPtr<FutexQueue> own_ref(*this);
|
||||
|
||||
#ifdef FUTEX_DEBUG
|
||||
dbg() << "Futex::vmobject_deleted @ " << this << (m_is_global ? " (global)" : "(local)");
|
||||
#endif
|
||||
|
||||
// Because this is called from the VMObject's destructor, getting a
|
||||
// strong_ref in this function is unsafe!
|
||||
m_vmobject = nullptr; // Just to be safe...
|
||||
|
||||
{
|
||||
ScopedSpinLock lock(g_global_futex_lock);
|
||||
g_global_futex_queues->remove(&vmobject);
|
||||
}
|
||||
|
||||
bool did_wake_all;
|
||||
auto wake_count = wake_all(did_wake_all);
|
||||
#ifdef FUTEX_DEBUG
|
||||
if (wake_count > 0)
|
||||
dbg() << "Futex: @ " << this << " unblocked " << wake_count << " waiters due to vmobject free";
|
||||
#else
|
||||
(void)wake_count;
|
||||
#endif
|
||||
ASSERT(did_wake_all); // No one should be left behind...
|
||||
}
|
||||
|
||||
void Process::clear_futex_queues_on_exec()
|
||||
{
|
||||
ScopedSpinLock lock(m_futex_lock);
|
||||
for (auto& it : m_futex_queues) {
|
||||
bool did_wake_all;
|
||||
it.value->wake_all(did_wake_all);
|
||||
ASSERT(did_wake_all); // No one should be left behind...
|
||||
}
|
||||
m_futex_queues.clear();
|
||||
}
|
||||
|
||||
int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
|
||||
|
@ -45,42 +111,270 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
|
|||
if (!copy_from_user(¶ms, user_params))
|
||||
return -EFAULT;
|
||||
|
||||
switch (params.futex_op) {
|
||||
case FUTEX_WAIT: {
|
||||
i32 user_value;
|
||||
if (!copy_from_user(&user_value, params.userspace_address))
|
||||
return -EFAULT;
|
||||
if (user_value != params.val)
|
||||
return -EAGAIN;
|
||||
|
||||
Thread::BlockTimeout timeout;
|
||||
Thread::BlockTimeout timeout;
|
||||
u32 cmd = params.futex_op & FUTEX_CMD_MASK;
|
||||
switch (cmd) {
|
||||
case FUTEX_WAIT:
|
||||
case FUTEX_WAIT_BITSET:
|
||||
case FUTEX_REQUEUE:
|
||||
case FUTEX_CMP_REQUEUE: {
|
||||
if (params.timeout) {
|
||||
timespec ts_abstimeout { 0, 0 };
|
||||
if (!copy_from_user(&ts_abstimeout, params.timeout))
|
||||
timespec ts_stimeout { 0, 0 };
|
||||
if (!copy_from_user(&ts_stimeout, params.timeout))
|
||||
return -EFAULT;
|
||||
timeout = Thread::BlockTimeout(true, &ts_abstimeout);
|
||||
clockid_t clock_id = (params.futex_op & FUTEX_CLOCK_REALTIME) ? CLOCK_REALTIME_COARSE : CLOCK_MONOTONIC_COARSE;
|
||||
bool is_absolute = cmd != FUTEX_WAIT;
|
||||
timeout = Thread::BlockTimeout(is_absolute, &ts_stimeout, nullptr, clock_id);
|
||||
}
|
||||
if (cmd == FUTEX_WAIT_BITSET && params.val3 == FUTEX_BITSET_MATCH_ANY)
|
||||
cmd = FUTEX_WAIT;
|
||||
break;
|
||||
case FUTEX_WAKE_BITSET:
|
||||
if (params.val3 == FUTEX_BITSET_MATCH_ANY)
|
||||
cmd = FUTEX_WAKE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
WaitQueue& wait_queue = futex_queue((FlatPtr)params.userspace_address);
|
||||
Thread::BlockResult result = wait_queue.wait_on(timeout, "Futex");
|
||||
if (result == Thread::BlockResult::InterruptedByTimeout) {
|
||||
bool is_private = (params.futex_op & FUTEX_PRIVATE_FLAG) != 0;
|
||||
auto& queue_lock = is_private ? m_futex_lock : g_global_futex_lock;
|
||||
auto user_address_or_offset = FlatPtr(params.userspace_address);
|
||||
auto user_address_or_offset2 = FlatPtr(params.userspace_address2);
|
||||
|
||||
// If this is a global lock, look up the underlying VMObject *before*
|
||||
// acquiring the queue lock
|
||||
RefPtr<VMObject> vmobject, vmobject2;
|
||||
if (!is_private) {
|
||||
if (!Kernel::is_user_range(VirtualAddress(user_address_or_offset), sizeof(u32)))
|
||||
return -EFAULT;
|
||||
auto region = MM.find_region_from_vaddr(*Process::current(), VirtualAddress(user_address_or_offset));
|
||||
if (!region)
|
||||
return -EFAULT;
|
||||
vmobject = region->vmobject();
|
||||
user_address_or_offset = region->offset_in_vmobject_from_vaddr(VirtualAddress(user_address_or_offset));
|
||||
|
||||
switch (cmd) {
|
||||
case FUTEX_REQUEUE:
|
||||
case FUTEX_CMP_REQUEUE:
|
||||
case FUTEX_WAKE_OP: {
|
||||
if (!Kernel::is_user_range(VirtualAddress(user_address_or_offset2), sizeof(u32)))
|
||||
return -EFAULT;
|
||||
auto region2 = MM.find_region_from_vaddr(*Process::current(), VirtualAddress(user_address_or_offset2));
|
||||
if (!region2)
|
||||
return -EFAULT;
|
||||
vmobject2 = region2->vmobject();
|
||||
user_address_or_offset2 = region->offset_in_vmobject_from_vaddr(VirtualAddress(user_address_or_offset2));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto find_global_futex_queues = [&](VMObject& vmobject, bool create_if_not_found) -> FutexQueues* {
|
||||
auto& global_queues = *g_global_futex_queues;
|
||||
auto it = global_queues.find(&vmobject);
|
||||
if (it != global_queues.end())
|
||||
return &it->value;
|
||||
if (create_if_not_found) {
|
||||
// TODO: is there a better way than setting and finding it again?
|
||||
auto result = global_queues.set(&vmobject, {});
|
||||
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
|
||||
it = global_queues.find(&vmobject);
|
||||
ASSERT(it != global_queues.end());
|
||||
return &it->value;
|
||||
}
|
||||
return nullptr;
|
||||
};
|
||||
|
||||
auto find_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset, bool create_if_not_found) -> RefPtr<FutexQueue> {
|
||||
ASSERT(is_private || vmobject);
|
||||
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, create_if_not_found);
|
||||
if (!queues)
|
||||
return {};
|
||||
auto it = queues->find(user_address_or_offset);
|
||||
if (it != queues->end())
|
||||
return it->value;
|
||||
if (create_if_not_found) {
|
||||
auto futex_queue = adopt(*new FutexQueue(user_address_or_offset, vmobject));
|
||||
auto result = queues->set(user_address_or_offset, futex_queue);
|
||||
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
|
||||
return futex_queue;
|
||||
}
|
||||
return {};
|
||||
};
|
||||
|
||||
auto remove_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset) {
|
||||
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, false);
|
||||
if (queues) {
|
||||
queues->remove(user_address_or_offset);
|
||||
if (!is_private && queues->is_empty())
|
||||
g_global_futex_queues->remove(vmobject);
|
||||
}
|
||||
};
|
||||
|
||||
auto do_wake = [&](VMObject* vmobject, FlatPtr user_address_or_offset, u32 count, Optional<u32> bitmask) -> int {
|
||||
if (count == 0)
|
||||
return 0;
|
||||
auto futex_queue = find_futex_queue(vmobject, user_address_or_offset, false);
|
||||
if (!futex_queue)
|
||||
return 0;
|
||||
bool is_empty;
|
||||
u32 woke_count = futex_queue->wake_n(count, bitmask, is_empty);
|
||||
if (is_empty) {
|
||||
// If there are no more waiters, we want to get rid of the futex!
|
||||
remove_futex_queue(vmobject, user_address_or_offset);
|
||||
}
|
||||
return (int)woke_count;
|
||||
};
|
||||
|
||||
ScopedSpinLock lock(queue_lock);
|
||||
|
||||
auto do_wait = [&](u32 bitset) -> int {
|
||||
auto user_value = user_atomic_load_relaxed(params.userspace_address);
|
||||
if (!user_value.has_value())
|
||||
return -EFAULT;
|
||||
if (user_value.value() != params.val) {
|
||||
dbg() << "futex wait: EAGAIN. user value: " << (void*)user_value.value() << " @ " << (void*)params.userspace_address << " != val: " << params.val;
|
||||
return -EAGAIN;
|
||||
}
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||
|
||||
auto futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, true);
|
||||
ASSERT(futex_queue);
|
||||
|
||||
// We need to release the lock before blocking. But we have a reference
|
||||
// to the FutexQueue so that we can keep it alive.
|
||||
lock.unlock();
|
||||
|
||||
Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset);
|
||||
|
||||
lock.lock();
|
||||
if (futex_queue->is_empty()) {
|
||||
// If there are no more waiters, we want to get rid of the futex!
|
||||
remove_futex_queue(vmobject, user_address_or_offset);
|
||||
}
|
||||
if (block_result == Thread::BlockResult::InterruptedByTimeout) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
|
||||
break;
|
||||
}
|
||||
case FUTEX_WAKE:
|
||||
if (params.val == 0)
|
||||
return 0;
|
||||
if (params.val == 1) {
|
||||
futex_queue((FlatPtr)params.userspace_address).wake_one();
|
||||
} else {
|
||||
futex_queue((FlatPtr)params.userspace_address).wake_n(params.val);
|
||||
auto do_requeue = [&](Optional<u32> val3) -> int {
|
||||
auto user_value = user_atomic_load_relaxed(params.userspace_address);
|
||||
if (!user_value.has_value())
|
||||
return -EFAULT;
|
||||
if (val3.has_value() && val3.value() != user_value.value())
|
||||
return -EAGAIN;
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||
|
||||
int woken_or_requeued = 0;
|
||||
if (auto futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, false)) {
|
||||
RefPtr<FutexQueue> target_futex_queue;
|
||||
bool is_empty, is_target_empty;
|
||||
woken_or_requeued = futex_queue->wake_n_requeue(
|
||||
params.val, [&]() -> FutexQueue* {
|
||||
// NOTE: futex_queue's lock is being held while this callback is called
|
||||
// The reason we're doing this in a callback is that we don't want to always
|
||||
// create a target queue, only if we actually have anything to move to it!
|
||||
target_futex_queue = find_futex_queue(vmobject2.ptr(), user_address_or_offset2, true);
|
||||
return target_futex_queue.ptr();
|
||||
},
|
||||
params.val2, is_empty, is_target_empty);
|
||||
if (is_empty)
|
||||
remove_futex_queue(vmobject, user_address_or_offset);
|
||||
if (is_target_empty && target_futex_queue)
|
||||
remove_futex_queue(vmobject2, user_address_or_offset2);
|
||||
}
|
||||
break;
|
||||
return woken_or_requeued;
|
||||
};
|
||||
|
||||
switch (cmd) {
|
||||
case FUTEX_WAIT:
|
||||
return do_wait(0);
|
||||
|
||||
case FUTEX_WAKE:
|
||||
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, {});
|
||||
|
||||
case FUTEX_WAKE_OP: {
|
||||
Optional<u32> oldval;
|
||||
u32 op_arg = _FUTEX_OP_ARG(params.val3);
|
||||
auto op = _FUTEX_OP(params.val3);
|
||||
if (op & FUTEX_OP_ARG_SHIFT) {
|
||||
op_arg = 1 << op_arg;
|
||||
op &= FUTEX_OP_ARG_SHIFT;
|
||||
}
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_release);
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
oldval = user_atomic_exchange_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
oldval = user_atomic_fetch_add_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
oldval = user_atomic_fetch_or_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
oldval = user_atomic_fetch_and_not_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
oldval = user_atomic_fetch_xor_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!oldval.has_value())
|
||||
return -EFAULT;
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||
int result = do_wake(vmobject.ptr(), user_address_or_offset, params.val, {});
|
||||
if (params.val2 > 0) {
|
||||
bool compare_result;
|
||||
switch (_FUTEX_CMP(params.val3)) {
|
||||
case FUTEX_OP_CMP_EQ:
|
||||
compare_result = (oldval.value() == _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_NE:
|
||||
compare_result = (oldval.value() != _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_LT:
|
||||
compare_result = (oldval.value() < _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_LE:
|
||||
compare_result = (oldval.value() <= _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_GT:
|
||||
compare_result = (oldval.value() > _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_GE:
|
||||
compare_result = (oldval.value() >= _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (compare_result)
|
||||
result += do_wake(vmobject2.ptr(), user_address_or_offset2, params.val2, {});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return 0;
|
||||
case FUTEX_REQUEUE:
|
||||
return do_requeue({});
|
||||
|
||||
case FUTEX_CMP_REQUEUE:
|
||||
return do_requeue(params.val3);
|
||||
|
||||
case FUTEX_WAIT_BITSET:
|
||||
ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAIT
|
||||
if (params.val3 == 0)
|
||||
return -EINVAL;
|
||||
return do_wait(params.val3);
|
||||
|
||||
case FUTEX_WAKE_BITSET:
|
||||
ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
|
||||
if (params.val3 == 0)
|
||||
return -EINVAL;
|
||||
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, params.val3);
|
||||
}
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -255,6 +255,7 @@ public:
|
|||
enum class Type {
|
||||
Unknown = 0,
|
||||
File,
|
||||
Futex,
|
||||
Plan9FS,
|
||||
Join,
|
||||
Queue,
|
||||
|
@ -346,6 +347,10 @@ public:
|
|||
}
|
||||
|
||||
bool set_block_condition(BlockCondition&, void* = nullptr);
|
||||
void set_block_condition_raw_locked(BlockCondition* block_condition)
|
||||
{
|
||||
m_block_condition = block_condition;
|
||||
}
|
||||
|
||||
mutable RecursiveSpinLock m_lock;
|
||||
|
||||
|
@ -390,6 +395,12 @@ public:
|
|||
});
|
||||
}
|
||||
|
||||
bool is_empty() const
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
return is_empty_locked();
|
||||
}
|
||||
|
||||
protected:
|
||||
template<typename UnblockOne>
|
||||
bool unblock(UnblockOne unblock_one)
|
||||
|
@ -417,15 +428,52 @@ public:
|
|||
return did_unblock;
|
||||
}
|
||||
|
||||
bool is_empty_locked() const
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
return m_blockers.is_empty();
|
||||
}
|
||||
|
||||
virtual bool should_add_blocker(Blocker&, void*) { return true; }
|
||||
|
||||
SpinLock<u8> m_lock;
|
||||
|
||||
private:
|
||||
struct BlockerInfo {
|
||||
Blocker* blocker;
|
||||
void* data;
|
||||
};
|
||||
|
||||
Vector<BlockerInfo, 4> do_take_blockers(size_t count)
|
||||
{
|
||||
if (m_blockers.size() <= count)
|
||||
return move(m_blockers);
|
||||
|
||||
size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
|
||||
ASSERT(move_count > 0);
|
||||
|
||||
Vector<BlockerInfo, 4> taken_blockers;
|
||||
taken_blockers.ensure_capacity(move_count);
|
||||
for (size_t i = 0; i < move_count; i++)
|
||||
taken_blockers.append(m_blockers.take(i));
|
||||
m_blockers.remove(0, move_count);
|
||||
return taken_blockers;
|
||||
}
|
||||
|
||||
void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
|
||||
{
|
||||
if (blockers_to_append.is_empty())
|
||||
return;
|
||||
if (m_blockers.is_empty()) {
|
||||
m_blockers = move(blockers_to_append);
|
||||
return;
|
||||
}
|
||||
m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
|
||||
for (size_t i = 0; i < blockers_to_append.size(); i++)
|
||||
m_blockers.append(blockers_to_append.take(i));
|
||||
blockers_to_append.clear();
|
||||
}
|
||||
|
||||
mutable SpinLock<u8> m_lock;
|
||||
|
||||
private:
|
||||
Vector<BlockerInfo, 4> m_blockers;
|
||||
};
|
||||
|
||||
|
@ -471,6 +519,39 @@ public:
|
|||
bool m_did_unblock { false };
|
||||
};
|
||||
|
||||
class FutexBlocker : public Blocker {
|
||||
public:
|
||||
explicit FutexBlocker(FutexQueue&, u32);
|
||||
virtual ~FutexBlocker();
|
||||
|
||||
virtual Type blocker_type() const override { return Type::Futex; }
|
||||
virtual const char* state_string() const override { return "Futex"; }
|
||||
virtual void not_blocking(bool) override { }
|
||||
|
||||
virtual bool should_block() override
|
||||
{
|
||||
return m_should_block;
|
||||
}
|
||||
|
||||
u32 bitset() const { return m_bitset; }
|
||||
|
||||
void begin_requeue()
|
||||
{
|
||||
// We need to hold the lock until we moved it over
|
||||
m_relock_flags = m_lock.lock();
|
||||
}
|
||||
void finish_requeue(FutexQueue&);
|
||||
|
||||
bool unblock_bitset(u32 bitset);
|
||||
bool unblock(bool force = false);
|
||||
|
||||
protected:
|
||||
u32 m_bitset;
|
||||
u32 m_relock_flags { 0 };
|
||||
bool m_should_block { true };
|
||||
bool m_did_unblock { false };
|
||||
};
|
||||
|
||||
class FileBlocker : public Blocker {
|
||||
public:
|
||||
enum class BlockFlags : u32 {
|
||||
|
|
|
@ -148,6 +148,52 @@ bool Thread::QueueBlocker::unblock()
|
|||
return true;
|
||||
}
|
||||
|
||||
Thread::FutexBlocker::FutexBlocker(FutexQueue& futex_queue, u32 bitset)
|
||||
: m_bitset(bitset)
|
||||
{
|
||||
if (!set_block_condition(futex_queue, Thread::current()))
|
||||
m_should_block = false;
|
||||
}
|
||||
|
||||
Thread::FutexBlocker::~FutexBlocker()
|
||||
{
|
||||
}
|
||||
|
||||
void Thread::FutexBlocker::finish_requeue(FutexQueue& futex_queue)
|
||||
{
|
||||
ASSERT(m_lock.own_lock());
|
||||
set_block_condition_raw_locked(&futex_queue);
|
||||
// We can now releas the lock
|
||||
m_lock.unlock(m_relock_flags);
|
||||
}
|
||||
|
||||
bool Thread::FutexBlocker::unblock_bitset(u32 bitset)
|
||||
{
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
if (m_did_unblock || (bitset != FUTEX_BITSET_MATCH_ANY && (m_bitset & bitset) == 0))
|
||||
return false;
|
||||
|
||||
m_did_unblock = true;
|
||||
}
|
||||
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Thread::FutexBlocker::unblock(bool force)
|
||||
{
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
if (m_did_unblock)
|
||||
return force;
|
||||
m_did_unblock = true;
|
||||
}
|
||||
|
||||
unblock_from_blocker();
|
||||
return true;
|
||||
}
|
||||
|
||||
Thread::FileDescriptionBlocker::FileDescriptionBlocker(FileDescription& description, BlockFlags flags, BlockFlags& unblocked_flags)
|
||||
: m_blocked_description(description)
|
||||
, m_flags(flags)
|
||||
|
|
|
@ -113,8 +113,47 @@ enum {
|
|||
|
||||
#define FD_CLOEXEC 1
|
||||
|
||||
#define _FUTEX_OP_SHIFT_OP 28
|
||||
#define _FUTEX_OP_MASK_OP 0xf
|
||||
#define _FUTEX_OP_SHIFT_CMP 24
|
||||
#define _FUTEX_OP_MASK_CMP 0xf
|
||||
#define _FUTEX_OP_SHIFT_OP_ARG 12
|
||||
#define _FUTEX_OP_MASK_OP_ARG 0xfff
|
||||
#define _FUTEX_OP_SHIFT_CMP_ARG 0
|
||||
#define _FUTEX_OP_MASK_CMP_ARG 0xfff
|
||||
|
||||
#define _FUTEX_OP(val3) (((val3) >> _FUTEX_OP_SHIFT_OP) & _FUTEX_OP_MASK_OP)
|
||||
#define _FUTEX_CMP(val3) (((val3) >> _FUTEX_OP_SHIFT_CMP) & _FUTEX_OP_MASK_CMP)
|
||||
#define _FUTEX_OP_ARG(val3) (((val3) >> _FUTEX_OP_SHIFT_OP_ARG) & _FUTEX_OP_MASK_OP_ARG)
|
||||
#define _FUTEX_CMP_ARG(val3) (((val3) >> _FUTEX_OP_SHIFT_CMP_ARG) & _FUTEX_OP_MASK_CMP_ARG)
|
||||
|
||||
#define FUTEX_OP_SET 0
|
||||
#define FUTEX_OP_ADD 1
|
||||
#define FUTEX_OP_OR 2
|
||||
#define FUTEX_OP_ANDN 3
|
||||
#define FUTEX_OP_XOR 4
|
||||
#define FUTEX_OP_ARG_SHIFT 8
|
||||
|
||||
#define FUTEX_OP_CMP_EQ 0
|
||||
#define FUTEX_OP_CMP_NE 1
|
||||
#define FUTEX_OP_CMP_LT 2
|
||||
#define FUTEX_OP_CMP_LE 3
|
||||
#define FUTEX_OP_CMP_GT 4
|
||||
#define FUTEX_OP_CMP_GE 5
|
||||
|
||||
#define FUTEX_WAIT 1
|
||||
#define FUTEX_WAKE 2
|
||||
#define FUTEX_REQUEUE 3
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
#define FUTEX_WAKE_OP 5
|
||||
#define FUTEX_WAIT_BITSET 9
|
||||
#define FUTEX_WAKE_BITSET 10
|
||||
|
||||
#define FUTEX_PRIVATE_FLAG (1 << 7)
|
||||
#define FUTEX_CLOCK_REALTIME (1 << 8)
|
||||
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
||||
|
||||
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
|
||||
|
||||
#define S_IFMT 0170000
|
||||
#define S_IFDIR 0040000
|
||||
|
|
|
@ -192,6 +192,11 @@ public:
|
|||
return m_offset_in_vmobject;
|
||||
}
|
||||
|
||||
size_t offset_in_vmobject_from_vaddr(VirtualAddress vaddr) const
|
||||
{
|
||||
return m_offset_in_vmobject + vaddr.get() - this->vaddr().get();
|
||||
}
|
||||
|
||||
size_t amount_resident() const;
|
||||
size_t amount_shared() const;
|
||||
size_t amount_dirty() const;
|
||||
|
|
|
@ -45,6 +45,13 @@ VMObject::VMObject(size_t size)
|
|||
|
||||
VMObject::~VMObject()
|
||||
{
|
||||
{
|
||||
ScopedSpinLock lock(m_on_deleted_lock);
|
||||
for (auto& it : m_on_deleted)
|
||||
it->vmobject_deleted(*this);
|
||||
m_on_deleted.clear();
|
||||
}
|
||||
|
||||
MM.unregister_vmobject(*this);
|
||||
ASSERT(m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <AK/HashTable.h>
|
||||
#include <AK/InlineLinkedList.h>
|
||||
#include <AK/RefCounted.h>
|
||||
#include <AK/RefPtr.h>
|
||||
|
@ -38,6 +39,12 @@ namespace Kernel {
|
|||
class Inode;
|
||||
class PhysicalPage;
|
||||
|
||||
class VMObjectDeletedHandler {
|
||||
public:
|
||||
virtual ~VMObjectDeletedHandler() { }
|
||||
virtual void vmobject_deleted(VMObject&) = 0;
|
||||
};
|
||||
|
||||
class VMObject : public RefCounted<VMObject>
|
||||
, public Weakable<VMObject>
|
||||
, public InlineLinkedListNode<VMObject> {
|
||||
|
@ -71,6 +78,15 @@ public:
|
|||
ALWAYS_INLINE void unref_region() { m_regions_count--; }
|
||||
ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count > 1; }
|
||||
|
||||
void register_on_deleted_handler(VMObjectDeletedHandler& handler)
|
||||
{
|
||||
m_on_deleted.set(&handler);
|
||||
}
|
||||
void unregister_on_deleted_handler(VMObjectDeletedHandler& handler)
|
||||
{
|
||||
m_on_deleted.remove(&handler);
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit VMObject(size_t);
|
||||
explicit VMObject(const VMObject&);
|
||||
|
@ -89,6 +105,8 @@ private:
|
|||
VMObject(VMObject&&) = delete;
|
||||
|
||||
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_regions_count { 0 };
|
||||
HashTable<VMObjectDeletedHandler*> m_on_deleted;
|
||||
SpinLock<u8> m_on_deleted_lock;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -71,14 +71,15 @@ void WaitQueue::wake_one()
|
|||
m_wake_requested = !did_unblock_one;
|
||||
}
|
||||
|
||||
void WaitQueue::wake_n(u32 wake_count)
|
||||
u32 WaitQueue::wake_n(u32 wake_count)
|
||||
{
|
||||
if (wake_count == 0)
|
||||
return; // should we assert instead?
|
||||
return 0; // should we assert instaed?
|
||||
ScopedSpinLock lock(m_lock);
|
||||
#ifdef WAITQUEUE_DEBUG
|
||||
dbg() << "WaitQueue @ " << this << ": wake_n(" << wake_count << ")";
|
||||
#endif
|
||||
u32 did_wake = 0;
|
||||
bool did_unblock_some = do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
|
||||
ASSERT(data);
|
||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Queue);
|
||||
|
@ -86,23 +87,25 @@ void WaitQueue::wake_n(u32 wake_count)
|
|||
#ifdef WAITQUEUE_DEBUG
|
||||
dbg() << "WaitQueue @ " << this << ": wake_n unblocking " << *static_cast<Thread*>(data);
|
||||
#endif
|
||||
ASSERT(wake_count > 0);
|
||||
ASSERT(did_wake < wake_count);
|
||||
if (blocker.unblock()) {
|
||||
if (--wake_count == 0)
|
||||
if (++did_wake >= wake_count)
|
||||
stop_iterating = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
m_wake_requested = !did_unblock_some;
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
void WaitQueue::wake_all()
|
||||
u32 WaitQueue::wake_all()
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
#ifdef WAITQUEUE_DEBUG
|
||||
dbg() << "WaitQueue @ " << this << ": wake_all";
|
||||
#endif
|
||||
u32 did_wake = 0;
|
||||
bool did_unblock_any = do_unblock([&](Thread::Blocker& b, void* data, bool&) {
|
||||
ASSERT(data);
|
||||
ASSERT(b.blocker_type() == Thread::Blocker::Type::Queue);
|
||||
|
@ -110,11 +113,14 @@ void WaitQueue::wake_all()
|
|||
#ifdef WAITQUEUE_DEBUG
|
||||
dbg() << "WaitQueue @ " << this << ": wake_all unblocking " << *static_cast<Thread*>(data);
|
||||
#endif
|
||||
bool did_unblock = blocker.unblock();
|
||||
ASSERT(did_unblock);
|
||||
return true;
|
||||
if (blocker.unblock()) {
|
||||
did_wake++;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
m_wake_requested = !did_unblock_any;
|
||||
return did_wake;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ namespace Kernel {
|
|||
class WaitQueue : public Thread::BlockCondition {
|
||||
public:
|
||||
void wake_one();
|
||||
void wake_n(u32 wake_count);
|
||||
void wake_all();
|
||||
u32 wake_n(u32 wake_count);
|
||||
u32 wake_all();
|
||||
|
||||
template<class... Args>
|
||||
Thread::BlockResult wait_on(const Thread::BlockTimeout& timeout, Args&&... args)
|
||||
|
|
|
@ -60,6 +60,8 @@ add_compile_definitions("FIFO_DEBUG")
|
|||
add_compile_definitions("FILEDESCRIPTION_DEBUG")
|
||||
add_compile_definitions("FILL_PATH_DEBUG")
|
||||
add_compile_definitions("FORK_DEBUG")
|
||||
add_compile_definitions("FUTEXQUEUE_DEBUG")
|
||||
add_compile_definitions("FUTEX_DEBUG")
|
||||
add_compile_definitions("GBOXLAYOUT_DEBUG")
|
||||
add_compile_definitions("GEMINIJOB_DEBUG")
|
||||
add_compile_definitions("GEMINI_DEBUG")
|
||||
|
|
|
@ -60,10 +60,38 @@ int profiling_disable(pid_t pid)
|
|||
__RETURN_WITH_ERRNO(rc, rc, -1);
|
||||
}
|
||||
|
||||
int futex(int32_t* userspace_address, int futex_op, int32_t value, const struct timespec* timeout)
|
||||
int futex(uint32_t* userspace_address, int futex_op, uint32_t value, const struct timespec* timeout, uint32_t* userspace_address2, uint32_t value3)
|
||||
{
|
||||
Syscall::SC_futex_params params { userspace_address, futex_op, value, timeout };
|
||||
int rc = syscall(SC_futex, ¶ms);
|
||||
int rc;
|
||||
switch (futex_op & FUTEX_CMD_MASK) {
|
||||
//case FUTEX_CMP_REQUEUE:
|
||||
// FUTEX_CMP_REQUEUE_PI:
|
||||
case FUTEX_WAKE_OP: {
|
||||
// These interpret timeout as a u32 value for val2
|
||||
Syscall::SC_futex_params params {
|
||||
.userspace_address = userspace_address,
|
||||
.futex_op = futex_op,
|
||||
.val = value,
|
||||
.val2 = (uint32_t)timeout,
|
||||
.userspace_address2 = userspace_address2,
|
||||
.val3 = value3
|
||||
};
|
||||
rc = syscall(SC_futex, ¶ms);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
Syscall::SC_futex_params params {
|
||||
.userspace_address = userspace_address,
|
||||
.futex_op = futex_op,
|
||||
.val = value,
|
||||
.timeout = timeout,
|
||||
.userspace_address2 = userspace_address2,
|
||||
.val3 = value3
|
||||
};
|
||||
rc = syscall(SC_futex, ¶ms);
|
||||
break;
|
||||
}
|
||||
}
|
||||
__RETURN_WITH_ERRNO(rc, rc, -1);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,10 +45,48 @@ int profiling_disable(pid_t);
|
|||
#define THREAD_PRIORITY_HIGH 50
|
||||
#define THREAD_PRIORITY_MAX 99
|
||||
|
||||
#define _FUTEX_OP_SHIFT_OP 28
|
||||
#define _FUTEX_OP_MASK_OP 0xf
|
||||
#define _FUTEX_OP_SHIFT_CMP 24
|
||||
#define _FUTEX_OP_MASK_CMP 0xf
|
||||
#define _FUTEX_OP_SHIFT_OP_ARG 12
|
||||
#define _FUTEX_OP_MASK_OP_ARG 0xfff
|
||||
#define _FUTEX_OP_SHIFT_CMP_ARG 0
|
||||
#define _FUTEX_OP_MASK_CMP_ARG 0xfff
|
||||
|
||||
#define FUTEX_OP(op, op_arg, cmp, cmp_arg) \
|
||||
((((op)&_FUTEX_OP_MASK_OP) << _FUTEX_OP_SHIFT_OP) | (((cmp)&_FUTEX_OP_MASK_CMP) << _FUTEX_OP_SHIFT_CMP) | (((op_arg)&_FUTEX_OP_MASK_OP_ARG) << _FUTEX_OP_SHIFT_OP_ARG) | (((cmp_arg)&_FUTEX_OP_MASK_CMP_ARG) << _FUTEX_OP_SHIFT_CMP_ARG))
|
||||
|
||||
#define FUTEX_OP_SET 0
|
||||
#define FUTEX_OP_ADD 1
|
||||
#define FUTEX_OP_OR 2
|
||||
#define FUTEX_OP_ANDN 3
|
||||
#define FUTEX_OP_XOR 4
|
||||
#define FUTEX_OP_ARG_SHIFT 8
|
||||
|
||||
#define FUTEX_OP_CMP_EQ 0
|
||||
#define FUTEX_OP_CMP_NE 1
|
||||
#define FUTEX_OP_CMP_LT 2
|
||||
#define FUTEX_OP_CMP_LE 3
|
||||
#define FUTEX_OP_CMP_GT 4
|
||||
#define FUTEX_OP_CMP_GE 5
|
||||
|
||||
#define FUTEX_WAIT 1
|
||||
#define FUTEX_WAKE 2
|
||||
|
||||
int futex(int32_t* userspace_address, int futex_op, int32_t value, const struct timespec* timeout);
|
||||
#define FUTEX_REQUEUE 3
|
||||
#define FUTEX_CMP_REQUEUE 4
|
||||
#define FUTEX_WAKE_OP 5
|
||||
#define FUTEX_WAIT_BITSET 9
|
||||
#define FUTEX_WAKE_BITSET 10
|
||||
|
||||
#define FUTEX_PRIVATE_FLAG (1 << 7)
|
||||
#define FUTEX_CLOCK_REALTIME (1 << 8)
|
||||
#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
||||
|
||||
#define FUTEX_BITSET_MATCH_ANY 0xffffffff
|
||||
|
||||
int futex(uint32_t* userspace_address, int futex_op, uint32_t value, const struct timespec* timeout, uint32_t* userspace_address2, uint32_t value3);
|
||||
|
||||
#define PURGE_ALL_VOLATILE 0x1
|
||||
#define PURGE_ALL_CLEAN_INODE 0x2
|
||||
|
|
50
Userland/Libraries/LibC/serenity.h.rej
Normal file
50
Userland/Libraries/LibC/serenity.h.rej
Normal file
|
@ -0,0 +1,50 @@
|
|||
diff a/Userland/Libraries/LibC/serenity.h b/Userland/Libraries/LibC/serenity.h (rejected hunks)
|
||||
@@ -55,10 +55,47 @@ int profiling_disable(pid_t);
|
||||
int set_thread_boost(pid_t tid, int amount);
|
||||
int set_process_boost(pid_t, int amount);
|
||||
|
||||
+#define _FUTEX_OP_SHIFT_OP 28
|
||||
+#define _FUTEX_OP_MASK_OP 0xf
|
||||
+#define _FUTEX_OP_SHIFT_CMP 24
|
||||
+#define _FUTEX_OP_MASK_CMP 0xf
|
||||
+#define _FUTEX_OP_SHIFT_OP_ARG 12
|
||||
+#define _FUTEX_OP_MASK_OP_ARG 0xfff
|
||||
+#define _FUTEX_OP_SHIFT_CMP_ARG 0
|
||||
+#define _FUTEX_OP_MASK_CMP_ARG 0xfff
|
||||
+
|
||||
+#define FUTEX_OP(op, op_arg, cmp, cmp_arg) \
|
||||
+ ((((op)&_FUTEX_OP_MASK_OP) << _FUTEX_OP_SHIFT_OP) | (((cmp)&_FUTEX_OP_MASK_CMP) << _FUTEX_OP_SHIFT_CMP) | (((op_arg)&_FUTEX_OP_MASK_OP_ARG) << _FUTEX_OP_SHIFT_OP_ARG) | (((cmp_arg)&_FUTEX_OP_MASK_CMP_ARG) << _FUTEX_OP_SHIFT_CMP_ARG))
|
||||
+
|
||||
+#define FUTEX_OP_SET 0
|
||||
+#define FUTEX_OP_ADD 1
|
||||
+#define FUTEX_OP_OR 2
|
||||
+#define FUTEX_OP_ANDN 3
|
||||
+#define FUTEX_OP_XOR 4
|
||||
+#define FUTEX_OP_ARG_SHIFT 8
|
||||
+
|
||||
+#define FUTEX_OP_CMP_EQ 0
|
||||
+#define FUTEX_OP_CMP_NE 1
|
||||
+#define FUTEX_OP_CMP_LT 2
|
||||
+#define FUTEX_OP_CMP_LE 3
|
||||
+#define FUTEX_OP_CMP_GT 4
|
||||
+#define FUTEX_OP_CMP_GE 5
|
||||
+
|
||||
#define FUTEX_WAIT 1
|
||||
#define FUTEX_WAKE 2
|
||||
+#define FUTEX_REQUEUE 3
|
||||
+#define FUTEX_CMP_REQUEUE 4
|
||||
+#define FUTEX_WAKE_OP 5
|
||||
+#define FUTEX_WAIT_BITSET 9
|
||||
+#define FUTEX_WAKE_BITSET 10
|
||||
+
|
||||
+#define FUTEX_PRIVATE_FLAG (1 << 7)
|
||||
+#define FUTEX_CLOCK_REALTIME (1 << 8)
|
||||
+#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
||||
+
|
||||
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
|
||||
|
||||
-int futex(int32_t* userspace_address, int futex_op, int32_t value, const struct timespec* timeout);
|
||||
+int futex(uint32_t* userspace_address, int futex_op, uint32_t value, const struct timespec* timeout, uint32_t* userspace_address2, uint32_t value3);
|
||||
|
||||
#define PURGE_ALL_VOLATILE 0x1
|
||||
#define PURGE_ALL_CLEAN_INODE 0x2
|
|
@ -78,7 +78,7 @@ struct utimbuf {
|
|||
|
||||
typedef int pthread_t;
|
||||
typedef int pthread_key_t;
|
||||
typedef int32_t pthread_once_t;
|
||||
typedef uint32_t pthread_once_t;
|
||||
|
||||
typedef struct __pthread_mutex_t {
|
||||
uint32_t lock;
|
||||
|
@ -93,7 +93,7 @@ typedef struct __pthread_mutexattr_t {
|
|||
} pthread_mutexattr_t;
|
||||
|
||||
typedef struct __pthread_cond_t {
|
||||
int32_t value;
|
||||
uint32_t value;
|
||||
uint32_t previous;
|
||||
int clockid; // clockid_t
|
||||
} pthread_cond_t;
|
||||
|
|
|
@ -495,12 +495,25 @@ int pthread_cond_destroy(pthread_cond_t*)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int futex_wait(uint32_t& futex_addr, uint32_t value, const struct timespec* abstime)
|
||||
{
|
||||
int saved_errno = errno;
|
||||
// NOTE: FUTEX_WAIT takes a relative timeout, so use FUTEX_WAIT_BITSET instead!
|
||||
int rc = futex(&futex_addr, FUTEX_WAIT_BITSET, value, abstime, nullptr, FUTEX_BITSET_MATCH_ANY);
|
||||
if (rc < 0 && errno == EAGAIN) {
|
||||
// If we didn't wait, that's not an error
|
||||
errno = saved_errno;
|
||||
rc = 0;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex, const struct timespec* abstime)
|
||||
{
|
||||
i32 value = cond->value;
|
||||
u32 value = cond->value;
|
||||
cond->previous = value;
|
||||
pthread_mutex_unlock(mutex);
|
||||
int rc = futex(&cond->value, FUTEX_WAIT, value, abstime);
|
||||
int rc = futex_wait(cond->value, value, abstime);
|
||||
pthread_mutex_lock(mutex);
|
||||
return rc;
|
||||
}
|
||||
|
@ -538,7 +551,7 @@ int pthread_cond_signal(pthread_cond_t* cond)
|
|||
{
|
||||
u32 value = cond->previous + 1;
|
||||
cond->value = value;
|
||||
int rc = futex(&cond->value, FUTEX_WAKE, 1, nullptr);
|
||||
int rc = futex(&cond->value, FUTEX_WAKE, 1, nullptr, nullptr, 0);
|
||||
ASSERT(rc == 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -547,7 +560,7 @@ int pthread_cond_broadcast(pthread_cond_t* cond)
|
|||
{
|
||||
u32 value = cond->previous + 1;
|
||||
cond->value = value;
|
||||
int rc = futex(&cond->value, FUTEX_WAKE, INT32_MAX, nullptr);
|
||||
int rc = futex(&cond->value, FUTEX_WAKE, INT32_MAX, nullptr, nullptr, 0);
|
||||
ASSERT(rc == 0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ int pthread_once(pthread_once_t* self, void (*callback)(void))
|
|||
// anyone.
|
||||
break;
|
||||
case State::PERFORMING_WITH_WAITERS:
|
||||
futex(self, FUTEX_WAKE, INT_MAX, nullptr);
|
||||
futex(self, FUTEX_WAKE, INT_MAX, nullptr, nullptr, 0);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ int pthread_once(pthread_once_t* self, void (*callback)(void))
|
|||
[[fallthrough]];
|
||||
case State::PERFORMING_WITH_WAITERS:
|
||||
// Let's wait for it.
|
||||
futex(self, FUTEX_WAIT, state2, nullptr);
|
||||
futex(self, FUTEX_WAIT, state2, nullptr, nullptr, 0);
|
||||
// We have been woken up, but that might have been due to a signal
|
||||
// or something, so we have to reevaluate. We need acquire ordering
|
||||
// here for the same reason as above. Hopefully we'll just see
|
||||
|
|
Loading…
Reference in a new issue