Kernel: Ignore TLB flush requests for user addresses of other processes

If a TLB flush request is broadcast to other processors and the addresses
to flush are user mode addresses, we can ignore such a request on the
target processor if the page directory currently in use doesn't match
the addresses to be flushed. We still need to broadcast to all processors
in that case because the other processors may switch to that same page
directory at any time.
This commit is contained in:
Tom 2021-01-02 12:27:38 -07:00 committed by Andreas Kling
parent c630669304
commit 0d44ee6f2b
5 changed files with 25 additions and 12 deletions

View file

@ -1790,10 +1790,10 @@ void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
}
}
void Processor::flush_tlb(VirtualAddress vaddr, size_t page_count)
void Processor::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
{
if (s_smp_enabled)
smp_broadcast_flush_tlb(vaddr, page_count);
smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
else
flush_tlb_local(vaddr, page_count);
}
@ -1911,6 +1911,17 @@ bool Processor::smp_process_pending_messages()
msg->callback_with_data.handler(msg->callback_with_data.data);
break;
case ProcessorMessage::FlushTlb:
if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
// We assume that we don't cross into kernel land!
ASSERT(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
//This processor isn't using this page directory right now, we can ignore this request
#ifdef SMP_DEBUG
dbg() << "SMP[" << id() << "]: No need to flush " << msg->flush_tlb.page_count << " pages at " << VirtualAddress(msg->flush_tlb.ptr);
#endif
break;
}
}
flush_tlb_local(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count);
break;
}
@ -2068,11 +2079,12 @@ void Processor::smp_unicast(u32 cpu, void (*callback)(), bool async)
smp_unicast_message(cpu, msg, async);
}
void Processor::smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count)
void Processor::smp_broadcast_flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
{
auto& msg = smp_get_from_pool();
msg.async = false;
msg.type = ProcessorMessage::FlushTlb;
msg.flush_tlb.page_directory = page_directory;
msg.flush_tlb.ptr = vaddr.as_ptr();
msg.flush_tlb.page_count = page_count;
smp_broadcast_message(msg);

View file

@ -666,6 +666,7 @@ struct ProcessorMessage {
void (*free)(void*);
} callback_with_data;
struct {
const PageDirectory* page_directory;
u8* ptr;
size_t page_count;
} flush_tlb;
@ -787,7 +788,7 @@ public:
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(const PageDirectory*, VirtualAddress, size_t);
Descriptor& get_gdt_entry(u16 selector);
void flush_gdt();
@ -991,7 +992,7 @@ public:
}
static void smp_unicast(u32 cpu, void (*callback)(), bool async);
static void smp_unicast(u32 cpu, void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
static void smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count);
static void smp_broadcast_flush_tlb(const PageDirectory*, VirtualAddress, size_t);
template<typename Callback>
static void deferred_call_queue(Callback callback)

View file

@ -697,12 +697,12 @@ void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
Processor::flush_tlb_local(vaddr, page_count);
}
void MemoryManager::flush_tlb(VirtualAddress vaddr, size_t page_count)
void MemoryManager::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count)
{
#ifdef MM_DEBUG
dbg() << "MM: Flush " << page_count << " pages at " << vaddr;
#endif
Processor::flush_tlb(vaddr, page_count);
Processor::flush_tlb(page_directory, vaddr, page_count);
}
extern "C" PageTableEntry boot_pd3_pt1023[1024];

View file

@ -184,7 +184,7 @@ private:
void protect_kernel_image();
void parse_memory_map();
static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
static void flush_tlb(VirtualAddress, size_t page_count = 1);
static void flush_tlb(const PageDirectory*, VirtualAddress, size_t page_count = 1);
static Region* user_region_from_vaddr(Process&, VirtualAddress);
static Region* kernel_region_from_vaddr(VirtualAddress);

View file

@ -320,7 +320,7 @@ bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count)
index++;
}
if (index > page_index)
MM.flush_tlb(vaddr_from_page_index(page_index), index - page_index);
MM.flush_tlb(m_page_directory, vaddr_from_page_index(page_index), index - page_index);
return success;
}
@ -351,7 +351,7 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
ASSERT(physical_page(page_index));
bool success = map_individual_page_impl(page_index);
if (with_flush)
MM.flush_tlb(vaddr_from_page_index(page_index));
MM.flush_tlb(m_page_directory, vaddr_from_page_index(page_index));
return success;
}
@ -387,7 +387,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";
#endif
}
MM.flush_tlb(vaddr(), page_count());
MM.flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) {
if (m_page_directory->range_allocator().contains(range()))
m_page_directory->range_allocator().deallocate(range());
@ -419,7 +419,7 @@ bool Region::map(PageDirectory& page_directory)
++page_index;
}
if (page_index > 0) {
MM.flush_tlb(vaddr(), page_index);
MM.flush_tlb(m_page_directory, vaddr(), page_index);
return page_index == page_count();
}
return false;