Kernel/MM: Use same memory type for zeroing pages as the actual mapping

On some architectures like ARM, using mismatched memory types for
different aliases of the same physical memory can lead to unexpected
behavior.
(https://developer.arm.com/documentation/102376/0200/Memory-aliasing-and-mismatched-memory-types)

Flushing the data cache and using appropriate memory barriers might be
enough, but should have worse performance.
This commit is contained in:
Sönke Holz 2024-12-26 18:12:18 +01:00
parent 6cdd659c94
commit 6fa8165d91
4 changed files with 14 additions and 8 deletions

View file

@ -88,9 +88,9 @@ ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages), strategy, move(committed_pages))); return adopt_nonnull_lock_ref_or_enomem(new (nothrow) AnonymousVMObject(move(new_physical_pages), strategy, move(committed_pages)));
} }
ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size) ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size, MemoryType memory_type_for_zero_fill)
{ {
auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size)); auto contiguous_physical_pages = TRY(MM.allocate_contiguous_physical_pages(size, memory_type_for_zero_fill));
auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalRAMPage>>::create(contiguous_physical_pages.span())); auto new_physical_pages = TRY(FixedArray<RefPtr<PhysicalRAMPage>>::create(contiguous_physical_pages.span()));

View file

@ -22,7 +22,7 @@ public:
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size); static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalRAMPage>>); static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalRAMPage>>);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy); static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_purgeable_with_size(size_t, AllocationStrategy);
static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t); static ErrorOr<NonnullLockRefPtr<AnonymousVMObject>> try_create_physically_contiguous_with_size(size_t, MemoryType memory_type_for_zero_fill);
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override; virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override;
[[nodiscard]] NonnullRefPtr<PhysicalRAMPage> allocate_committed_page(Badge<Region>); [[nodiscard]] NonnullRefPtr<PhysicalRAMPage> allocate_committed_page(Badge<Region>);

View file

@ -1067,7 +1067,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
OwnPtr<KString> name_kstring; OwnPtr<KString> name_kstring;
if (!name.is_null()) if (!name.is_null())
name_kstring = TRY(KString::try_create(name)); name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size)); auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size, memory_type));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, memory_type)); auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, memory_type));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); })); TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory())); TRY(region->map(kernel_page_directory()));
@ -1092,7 +1092,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalRAMPage>>& dma_buffer_pages, MemoryType memory_type) ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalRAMPage>>& dma_buffer_pages, MemoryType memory_type)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size)); dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size, memory_type));
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default) // Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default)
return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, memory_type); return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, memory_type);
} }
@ -1262,6 +1262,8 @@ NonnullRefPtr<PhysicalRAMPage> MemoryManager::allocate_committed_physical_page(B
VERIFY(page); VERIFY(page);
if (should_zero_fill == ShouldZeroFill::Yes) { if (should_zero_fill == ShouldZeroFill::Yes) {
InterruptDisabler disabler; InterruptDisabler disabler;
// FIXME: To prevent aliasing memory with different memory types, this page should be mapped using the same memory type it will use later for the actual mapping.
// (See the comment above the memset in allocate_contiguous_physical_pages.)
auto* ptr = quickmap_page(*page); auto* ptr = quickmap_page(*page);
memset(ptr, 0, PAGE_SIZE); memset(ptr, 0, PAGE_SIZE);
unquickmap_page(); unquickmap_page();
@ -1315,6 +1317,8 @@ ErrorOr<NonnullRefPtr<PhysicalRAMPage>> MemoryManager::allocate_physical_page(Sh
} }
if (should_zero_fill == ShouldZeroFill::Yes) { if (should_zero_fill == ShouldZeroFill::Yes) {
// FIXME: To prevent aliasing memory with different memory types, this page should be mapped using the same memory type it will use later for the actual mapping.
// (See the comment above the memset in allocate_contiguous_physical_pages.)
auto* ptr = quickmap_page(*page); auto* ptr = quickmap_page(*page);
memset(ptr, 0, PAGE_SIZE); memset(ptr, 0, PAGE_SIZE);
unquickmap_page(); unquickmap_page();
@ -1326,7 +1330,7 @@ ErrorOr<NonnullRefPtr<PhysicalRAMPage>> MemoryManager::allocate_physical_page(Sh
}); });
} }
ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> MemoryManager::allocate_contiguous_physical_pages(size_t size) ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> MemoryManager::allocate_contiguous_physical_pages(size_t size, MemoryType memory_type_for_zero_fill)
{ {
VERIFY(!(size % PAGE_SIZE)); VERIFY(!(size % PAGE_SIZE));
size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE)); size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
@ -1349,7 +1353,9 @@ ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> MemoryManager::allocate_contiguo
})); }));
{ {
auto cleanup_region = TRY(MM.allocate_kernel_region_with_physical_pages(physical_pages, {}, Region::Access::Read | Region::Access::Write)); // The memory_type_for_zero_fill argument ensures that the cleanup region is mapped using the same memory type as the subsequent actual mapping, preventing aliasing of physical memory with mismatched memory types.
// On some architectures like ARM, aliasing memory with mismatched memory types can lead to unexpected behavior and potentially worse performance.
auto cleanup_region = TRY(MM.allocate_kernel_region_with_physical_pages(physical_pages, {}, Region::Access::Read | Region::Access::Write, memory_type_for_zero_fill));
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count); memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
} }
return physical_pages; return physical_pages;

View file

@ -165,7 +165,7 @@ public:
NonnullRefPtr<PhysicalRAMPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes); NonnullRefPtr<PhysicalRAMPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullRefPtr<PhysicalRAMPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); ErrorOr<NonnullRefPtr<PhysicalRAMPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> allocate_contiguous_physical_pages(size_t size); ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> allocate_contiguous_physical_pages(size_t size, MemoryType memory_type_for_zero_fill);
void deallocate_physical_page(PhysicalAddress); void deallocate_physical_page(PhysicalAddress);
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, MemoryType = MemoryType::Normal); ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, MemoryType = MemoryType::Normal);