mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-24 09:13:20 -05:00
hugetlb: revert use of page_cache_next_miss()
Ackerley Tng reported an issue with hugetlbfs fallocate as noted in the
Closes tag. The issue showed up after the conversion of hugetlb page
cache lookup code to use page_cache_next_miss. User visible effects are:
- hugetlbfs fallocate incorrectly returns -EEXIST if pages are presnet
in the file.
- hugetlb pages will not be included in core dumps if they need to be
brought in via GUP.
- userfaultfd UFFDIO_COPY will not notice pages already present in the
cache. It may try to allocate a new page and potentially return
ENOMEM as opposed to EEXIST.
Revert the use page_cache_next_miss() in hugetlb code.
IMPORTANT NOTE FOR STABLE BACKPORTS:
This patch will apply cleanly to v6.3. However, due to the change of
filemap_get_folio() return values, it will not function correctly. This
patch must be modified for stable backports.
[dan.carpenter@linaro.org: fix hugetlbfs_pagecache_present()]
Link: https://lkml.kernel.org/r/efa86091-6a2c-4064-8f55-9b44e1313015@moroto.mountain
Link: https://lkml.kernel.org/r/20230621212403.174710-2-mike.kravetz@oracle.com
Fixes: d0ce0e47b3
("mm/hugetlb: convert hugetlb fault paths to use alloc_hugetlb_folio()")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
Reported-by: Ackerley Tng <ackerleytng@google.com>
Closes: https://lore.kernel.org/linux-mm/cover.1683069252.git.ackerleytng@google.com
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Vishal Annapurve <vannapurve@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
16f8eb3eea
commit
fd4aed8d98
2 changed files with 9 additions and 11 deletions
|
@ -821,7 +821,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
||||||
*/
|
*/
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
bool present;
|
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
|
@ -842,10 +841,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
|
||||||
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||||
|
|
||||||
/* See if already present in mapping to avoid alloc/free */
|
/* See if already present in mapping to avoid alloc/free */
|
||||||
rcu_read_lock();
|
folio = filemap_get_folio(mapping, index);
|
||||||
present = page_cache_next_miss(mapping, index, 1) != index;
|
if (!IS_ERR(folio)) {
|
||||||
rcu_read_unlock();
|
folio_put(folio);
|
||||||
if (present) {
|
|
||||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
12
mm/hugetlb.c
12
mm/hugetlb.c
|
@ -5728,13 +5728,13 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
|
||||||
{
|
{
|
||||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||||
pgoff_t idx = vma_hugecache_offset(h, vma, address);
|
pgoff_t idx = vma_hugecache_offset(h, vma, address);
|
||||||
bool present;
|
struct folio *folio;
|
||||||
|
|
||||||
rcu_read_lock();
|
folio = filemap_get_folio(mapping, idx);
|
||||||
present = page_cache_next_miss(mapping, idx, 1) != idx;
|
if (IS_ERR(folio))
|
||||||
rcu_read_unlock();
|
return false;
|
||||||
|
folio_put(folio);
|
||||||
return present;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
|
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
|
||||||
|
|
Loading…
Add table
Reference in a new issue