mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-22 07:53:11 -05:00
mm: clear uffd-wp PTE/PMD state on mremap()
When mremap()ing a memory region previously registered with userfaultfd as
write-protected but without UFFD_FEATURE_EVENT_REMAP, an inconsistency in
flag clearing leads to a mismatch between the vma flags (which have
uffd-wp cleared) and the pte/pmd flags (which do not have uffd-wp
cleared). This mismatch causes a subsequent mprotect(PROT_WRITE) to
trigger a warning in page_table_check_pte_flags() due to setting the pte
to writable while uffd-wp is still set.
Fix this by always explicitly clearing the uffd-wp pte/pmd flags on any
such mremap() so that the values are consistent with the existing clearing
of VM_UFFD_WP. Be careful to clear the logical flag regardless of its
physical form; a PTE bit, a swap PTE bit, or a PTE marker. Cover PTE,
huge PMD and hugetlb paths.
Link: https://lkml.kernel.org/r/20250107144755.1871363-2-ryan.roberts@arm.com
Co-developed-by: Mikołaj Lenczewski <miko.lenczewski@arm.com>
Signed-off-by: Mikołaj Lenczewski <miko.lenczewski@arm.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Closes: https://lore.kernel.org/linux-mm/810b44a8-d2ae-4107-b665-5a42eae2d948@arm.com/
Fixes: 63b2d4174c
("userfaultfd: wp: add the writeprotect API to userfaultfd ioctl")
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4bcf297411
commit
0cef0bb836
4 changed files with 68 additions and 2 deletions
|
@ -247,6 +247,13 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
|
|||
vma_is_shmem(vma);
|
||||
}
|
||||
|
||||
static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
|
||||
{
|
||||
struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx;
|
||||
|
||||
return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0;
|
||||
}
|
||||
|
||||
extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
|
||||
extern void dup_userfaultfd_complete(struct list_head *);
|
||||
void dup_userfaultfd_fail(struct list_head *);
|
||||
|
@ -402,6 +409,11 @@ static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USERFAULTFD */
|
||||
|
||||
static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
|
||||
|
|
|
@ -2206,6 +2206,16 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
|
|||
return pmd;
|
||||
}
|
||||
|
||||
static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
|
||||
{
|
||||
if (pmd_present(pmd))
|
||||
pmd = pmd_clear_uffd_wp(pmd);
|
||||
else if (is_swap_pmd(pmd))
|
||||
pmd = pmd_swp_clear_uffd_wp(pmd);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
|
||||
{
|
||||
|
@ -2244,6 +2254,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
|
||||
}
|
||||
pmd = move_soft_dirty_pmd(pmd);
|
||||
if (vma_has_uffd_without_event_remap(vma))
|
||||
pmd = clear_uffd_wp_pmd(pmd);
|
||||
set_pmd_at(mm, new_addr, new_pmd, pmd);
|
||||
if (force_flush)
|
||||
flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
|
||||
|
|
12
mm/hugetlb.c
12
mm/hugetlb.c
|
@ -5402,6 +5402,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
|
||||
unsigned long sz)
|
||||
{
|
||||
bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
spinlock_t *src_ptl, *dst_ptl;
|
||||
|
@ -5418,7 +5419,18 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
||||
|
||||
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
|
||||
|
||||
if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
|
||||
huge_pte_clear(mm, new_addr, dst_pte, sz);
|
||||
else {
|
||||
if (need_clear_uffd_wp) {
|
||||
if (pte_present(pte))
|
||||
pte = huge_pte_clear_uffd_wp(pte);
|
||||
else if (is_swap_pte(pte))
|
||||
pte = pte_swp_clear_uffd_wp(pte);
|
||||
}
|
||||
set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
|
||||
}
|
||||
|
||||
if (src_ptl != dst_ptl)
|
||||
spin_unlock(src_ptl);
|
||||
|
|
30
mm/mremap.c
30
mm/mremap.c
|
@ -138,6 +138,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
||||
unsigned long new_addr, bool need_rmap_locks)
|
||||
{
|
||||
bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pte_t *old_pte, *new_pte, pte;
|
||||
pmd_t dummy_pmdval;
|
||||
|
@ -216,8 +217,19 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
force_flush = true;
|
||||
pte = move_pte(pte, old_addr, new_addr);
|
||||
pte = move_soft_dirty_pte(pte);
|
||||
|
||||
if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
|
||||
pte_clear(mm, new_addr, new_pte);
|
||||
else {
|
||||
if (need_clear_uffd_wp) {
|
||||
if (pte_present(pte))
|
||||
pte = pte_clear_uffd_wp(pte);
|
||||
else if (is_swap_pte(pte))
|
||||
pte = pte_swp_clear_uffd_wp(pte);
|
||||
}
|
||||
set_pte_at(mm, new_addr, new_pte, pte);
|
||||
}
|
||||
}
|
||||
|
||||
arch_leave_lazy_mmu_mode();
|
||||
if (force_flush)
|
||||
|
@ -278,6 +290,15 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
|
||||
return false;
|
||||
|
||||
/* If this pmd belongs to a uffd vma with remap events disabled, we need
|
||||
* to ensure that the uffd-wp state is cleared from all pgtables. This
|
||||
* means recursing into lower page tables in move_page_tables(), and we
|
||||
* can reuse the existing code if we simply treat the entry as "not
|
||||
* moved".
|
||||
*/
|
||||
if (vma_has_uffd_without_event_remap(vma))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
* ptlocks because exclusive mmap_lock prevents deadlock.
|
||||
|
@ -333,6 +354,15 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
if (WARN_ON_ONCE(!pud_none(*new_pud)))
|
||||
return false;
|
||||
|
||||
/* If this pud belongs to a uffd vma with remap events disabled, we need
|
||||
* to ensure that the uffd-wp state is cleared from all pgtables. This
|
||||
* means recursing into lower page tables in move_page_tables(), and we
|
||||
* can reuse the existing code if we simply treat the entry as "not
|
||||
* moved".
|
||||
*/
|
||||
if (vma_has_uffd_without_event_remap(vma))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
* ptlocks because exclusive mmap_lock prevents deadlock.
|
||||
|
|
Loading…
Reference in a new issue