Kernel: Add a SpinLock to the WaitQueue

We need to be able to prevent a WaitQueue from being
modified by another CPU. So, add a SpinLock to it.

Because this pushes some other class over the 64 byte
limit, we also need to add another 128-byte bucket to
the slab allocator.
This commit is contained in:
Tom 2020-07-04 16:00:57 -06:00 committed by Andreas Kling
parent 788b2d64c6
commit 49f5069b76
3 changed files with 13 additions and 5 deletions

View file

@ -114,6 +114,7 @@ private:
static SlabAllocator<16> s_slab_allocator_16;
static SlabAllocator<32> s_slab_allocator_32;
static SlabAllocator<64> s_slab_allocator_64;
static SlabAllocator<128> s_slab_allocator_128;
static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size());
@ -130,6 +131,7 @@ void slab_alloc_init()
s_slab_allocator_16.init(128 * KB);
s_slab_allocator_32.init(128 * KB);
s_slab_allocator_64.init(512 * KB);
s_slab_allocator_128.init(512 * KB);
}
void* slab_alloc(size_t slab_size)
@ -140,6 +142,8 @@ void* slab_alloc(size_t slab_size)
return s_slab_allocator_32.alloc();
if (slab_size <= 64)
return s_slab_allocator_64.alloc();
if (slab_size <= 128)
return s_slab_allocator_128.alloc();
ASSERT_NOT_REACHED();
}
@ -151,6 +155,8 @@ void slab_dealloc(void* ptr, size_t slab_size)
return s_slab_allocator_32.dealloc(ptr);
if (slab_size <= 64)
return s_slab_allocator_64.dealloc(ptr);
if (slab_size <= 128)
return s_slab_allocator_128.dealloc(ptr);
ASSERT_NOT_REACHED();
}

View file

@ -39,13 +39,13 @@ WaitQueue::~WaitQueue()
void WaitQueue::enqueue(Thread& thread)
{
ScopedCritical critical;
ScopedSpinLock queue_lock(m_lock);
m_threads.append(thread);
}
void WaitQueue::wake_one(Atomic<bool>* lock)
{
ScopedCritical critical;
ScopedSpinLock queue_lock(m_lock);
if (lock)
*lock = false;
if (m_threads.is_empty())
@ -57,7 +57,7 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
void WaitQueue::wake_n(i32 wake_count)
{
ScopedCritical critical;
ScopedSpinLock queue_lock(m_lock);
if (m_threads.is_empty())
return;
@ -72,7 +72,7 @@ void WaitQueue::wake_n(i32 wake_count)
void WaitQueue::wake_all()
{
ScopedCritical critical;
ScopedSpinLock queue_lock(m_lock);
if (m_threads.is_empty())
return;
while (!m_threads.is_empty())
@ -82,7 +82,7 @@ void WaitQueue::wake_all()
void WaitQueue::clear()
{
ScopedCritical critical;
ScopedSpinLock queue_lock(m_lock);
m_threads.clear();
}

View file

@ -28,6 +28,7 @@
#include <AK/Atomic.h>
#include <AK/SinglyLinkedList.h>
#include <Kernel/SpinLock.h>
#include <Kernel/Thread.h>
namespace Kernel {
@ -46,6 +47,7 @@ public:
private:
typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
ThreadList m_threads;
SpinLock<u32> m_lock;
};
}