#pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1))) class KBuffer; class SynthFSInode; enum class PageFaultResponse { ShouldCrash, Continue, }; #define MM MemoryManager::the() class MemoryManager { AK_MAKE_ETERNAL friend class PageDirectory; friend class PhysicalPage; friend class PhysicalRegion; friend class Region; friend class VMObject; friend Optional procfs$mm(InodeIdentifier); friend Optional procfs$memstat(InodeIdentifier); public: static MemoryManager& the(); static void initialize(); PageFaultResponse handle_page_fault(const PageFault&); bool map_region(Process&, Region&); bool unmap_region(Region&, bool deallocate_range = true); void populate_page_directory(PageDirectory&); void enter_process_paging_scope(Process&); bool validate_user_read(const Process&, VirtualAddress) const; bool validate_user_write(const Process&, VirtualAddress) const; enum class ShouldZeroFill { No, Yes }; RefPtr allocate_user_physical_page(ShouldZeroFill); RefPtr allocate_supervisor_physical_page(); void deallocate_user_physical_page(PhysicalPage&&); void deallocate_supervisor_physical_page(PhysicalPage&&); void remap_region(PageDirectory&, Region&); void map_for_kernel(VirtualAddress, PhysicalAddress); OwnPtr allocate_kernel_region(size_t, const StringView& name, bool user_accessible = false, bool should_commit = true); OwnPtr allocate_user_accessible_kernel_region(size_t, const StringView& name); void map_region_at_address(PageDirectory&, Region&, VirtualAddress); unsigned user_physical_pages() const { return m_user_physical_pages; } unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } unsigned super_physical_pages() const { return m_super_physical_pages; } unsigned super_physical_pages_used() const { return m_super_physical_pages_used; } template static void for_each_vmobject(Callback callback) { for (auto& vmobject : MM.m_vmobjects) { if (callback(vmobject) == IterationDecision::Break) break; } } private: MemoryManager(); ~MemoryManager(); void register_vmo(VMObject&); void unregister_vmo(VMObject&); void register_region(Region&); void unregister_region(Region&); void remap_region_page(Region&, unsigned page_index_in_region); void initialize_paging(); void flush_entire_tlb(); void flush_tlb(VirtualAddress); void map_protected(VirtualAddress, size_t length); void create_identity_mapping(PageDirectory&, VirtualAddress, size_t length); static Region* region_from_vaddr(Process&, VirtualAddress); static const Region* region_from_vaddr(const Process&, VirtualAddress); static Region* user_region_from_vaddr(Process&, VirtualAddress); static Region* kernel_region_from_vaddr(VirtualAddress); static Region* region_from_vaddr(VirtualAddress); bool copy_on_write(Region&, unsigned page_index_in_region); bool page_in_from_inode(Region&, unsigned page_index_in_region); bool zero_page(Region& region, unsigned page_index_in_region); u8* quickmap_page(PhysicalPage&); void unquickmap_page(); PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); RefPtr m_kernel_page_directory; PageTableEntry* m_page_table_zero { nullptr }; PageTableEntry* m_page_table_one { nullptr }; VirtualAddress m_quickmap_addr; unsigned m_user_physical_pages { 0 }; unsigned m_user_physical_pages_used { 0 }; unsigned m_super_physical_pages { 0 }; unsigned m_super_physical_pages_used { 0 }; NonnullRefPtrVector m_user_physical_regions; NonnullRefPtrVector m_super_physical_regions; InlineLinkedList m_user_regions; InlineLinkedList m_kernel_regions; InlineLinkedList m_vmobjects; bool m_quickmap_in_use { false }; }; struct ProcessPagingScope { ProcessPagingScope(Process&); ~ProcessPagingScope(); };