mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-01-24 02:03:06 -05:00
5d491fa1cd
This is a freelist allocator with static size classes that works as a complement to the generic kmalloc(). It's a lot faster than kmalloc() since allocation just means popping from the freelist. It's also significantly more compact when there are a lot of objects smaller than the minimum kmalloc chunk size (32 bytes.) This patch enables it for the Region and PhysicalPage classes. In the PhysicalPage (8 bytes) case, it's a huge improvement since we no longer waste 75% of the storage allocated. There are also a number of ways this can be improved, so let's keep working on it going forward.
131 lines
3.9 KiB
C++
131 lines
3.9 KiB
C++
#pragma once
|
|
|
|
#include <AK/Bitmap.h>
|
|
#include <AK/InlineLinkedList.h>
|
|
#include <AK/String.h>
|
|
#include <Kernel/Heap/SlabAllocator.h>
|
|
#include <Kernel/VM/PageDirectory.h>
|
|
#include <Kernel/VM/RangeAllocator.h>
|
|
|
|
class Inode;
|
|
class VMObject;
|
|
|
|
class Region final : public RefCounted<Region>
|
|
, public InlineLinkedListNode<Region> {
|
|
friend class MemoryManager;
|
|
|
|
MAKE_SLAB_ALLOCATED(Region)
|
|
public:
|
|
enum Access {
|
|
Read = 1,
|
|
Write = 2,
|
|
Execute = 4,
|
|
};
|
|
|
|
static NonnullRefPtr<Region> create_user_accessible(const Range&, const StringView& name, u8 access, bool cow = false);
|
|
static NonnullRefPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cow = false);
|
|
static NonnullRefPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<Inode>, const StringView& name, u8 access, bool cow = false);
|
|
static NonnullRefPtr<Region> create_kernel_only(const Range&, const StringView& name, u8 access, bool cow = false);
|
|
|
|
~Region();
|
|
|
|
const Range& range() const { return m_range; }
|
|
VirtualAddress vaddr() const { return m_range.base(); }
|
|
size_t size() const { return m_range.size(); }
|
|
bool is_readable() const { return m_access & Access::Read; }
|
|
bool is_writable() const { return m_access & Access::Write; }
|
|
bool is_executable() const { return m_access & Access::Execute; }
|
|
const String& name() const { return m_name; }
|
|
unsigned access() const { return m_access; }
|
|
|
|
void set_name(const String& name) { m_name = name; }
|
|
|
|
const VMObject& vmobject() const { return *m_vmobject; }
|
|
VMObject& vmobject() { return *m_vmobject; }
|
|
|
|
bool is_shared() const { return m_shared; }
|
|
void set_shared(bool shared) { m_shared = shared; }
|
|
|
|
bool is_user_accessible() const { return m_user_accessible; }
|
|
|
|
NonnullRefPtr<Region> clone();
|
|
|
|
bool contains(VirtualAddress vaddr) const
|
|
{
|
|
return m_range.contains(vaddr);
|
|
}
|
|
|
|
bool contains(const Range& range) const
|
|
{
|
|
return m_range.contains(range);
|
|
}
|
|
|
|
unsigned page_index_from_address(VirtualAddress vaddr) const
|
|
{
|
|
return (vaddr - m_range.base()).get() / PAGE_SIZE;
|
|
}
|
|
|
|
size_t first_page_index() const
|
|
{
|
|
return m_offset_in_vmo / PAGE_SIZE;
|
|
}
|
|
|
|
size_t last_page_index() const
|
|
{
|
|
return (first_page_index() + page_count()) - 1;
|
|
}
|
|
|
|
size_t page_count() const
|
|
{
|
|
return size() / PAGE_SIZE;
|
|
}
|
|
|
|
int commit();
|
|
|
|
size_t amount_resident() const;
|
|
size_t amount_shared() const;
|
|
|
|
PageDirectory* page_directory() { return m_page_directory.ptr(); }
|
|
|
|
void set_page_directory(PageDirectory& page_directory)
|
|
{
|
|
ASSERT(!m_page_directory || m_page_directory == &page_directory);
|
|
m_page_directory = page_directory;
|
|
}
|
|
|
|
void release_page_directory()
|
|
{
|
|
ASSERT(m_page_directory);
|
|
m_page_directory.clear();
|
|
}
|
|
|
|
bool should_cow(size_t page_index) const { return m_cow_map.get(page_index); }
|
|
void set_should_cow(size_t page_index, bool cow) { m_cow_map.set(page_index, cow); }
|
|
|
|
void set_writable(bool b)
|
|
{
|
|
if (b)
|
|
m_access |= Access::Read;
|
|
else
|
|
m_access &= ~Access::Write;
|
|
}
|
|
|
|
// For InlineLinkedListNode
|
|
Region* m_next { nullptr };
|
|
Region* m_prev { nullptr };
|
|
|
|
private:
|
|
Region(const Range&, const String&, u8 access, bool cow = false);
|
|
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmo, const String&, u8 access, bool cow = false);
|
|
Region(const Range&, RefPtr<Inode>&&, const String&, u8 access, bool cow = false);
|
|
|
|
RefPtr<PageDirectory> m_page_directory;
|
|
Range m_range;
|
|
size_t m_offset_in_vmo { 0 };
|
|
NonnullRefPtr<VMObject> m_vmobject;
|
|
String m_name;
|
|
u8 m_access { 0 };
|
|
bool m_shared { false };
|
|
bool m_user_accessible { false };
|
|
Bitmap m_cow_map;
|
|
};
|