1
0
Fork 0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-01-22 16:06:04 -05:00

25 hotfixes. 16 are cc:stable. 18 are MM and 7 are non-MM.

The usual bunch of singletons and two doubletons - please see the relevant
 changelogs for details.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZ3noXwAKCRDdBJ7gKXxA
 jkzRAP9Ejb8kbgCrA3cptnzlVkDCDUm0TmleepT3bx6B2rH0BgEAzSiTXf4ioZPg
 4pOHnKIGOWEVPcVwBrdA0irWG+QPYAQ=
 =nEIZ
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2025-01-04-18-02' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
 "25 hotfixes.  16 are cc:stable.  18 are MM and 7 are non-MM.

  The usual bunch of singletons and two doubletons - please see the
  relevant changelogs for details"

* tag 'mm-hotfixes-stable-2025-01-04-18-02' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (25 commits)
  MAINTAINERS: change Arınç _NAL's name and email address
  scripts/sorttable: fix orc_sort_cmp() to maintain symmetry and transitivity
  mm/util: make memdup_user_nul() similar to memdup_user()
  mm, madvise: fix potential workingset node list_lru leaks
  mm/damon/core: fix ignored quota goals and filters of newly committed schemes
  mm/damon/core: fix new damon_target objects leaks on damon_commit_targets()
  mm/list_lru: fix false warning of negative counter
  vmstat: disable vmstat_work on vmstat_cpu_down_prep()
  mm: shmem: fix the update of 'shmem_falloc->nr_unswapped'
  mm: shmem: fix incorrect index alignment for within_size policy
  percpu: remove intermediate variable in PERCPU_PTR()
  mm: zswap: fix race between [de]compression and CPU hotunplug
  ocfs2: fix slab-use-after-free due to dangling pointer dqi_priv
  fs/proc/task_mmu: fix pagemap flags with PMD THP entries on 32bit
  kcov: mark in_softirq_really() as __always_inline
  docs: mm: fix the incorrect 'FileHugeMapped' field
  mailmap: modify the entry for Mathieu Othacehe
  mm/kmemleak: fix sleeping function called from invalid context at print message
  mm: hugetlb: independent PMD page table shared count
  maple_tree: reload mas before the second call for mas_empty_area
  ...
This commit is contained in:
Linus Torvalds 2025-01-05 10:37:45 -08:00
commit 5635d8bad2
29 changed files with 211 additions and 68 deletions

View file

@ -435,7 +435,7 @@ Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
Mathieu Othacehe <m.othacehe@gmail.com> <othacehe@gnu.org>
Mathieu Othacehe <othacehe@gnu.org> <m.othacehe@gmail.com>
Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com>
Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>

View file

@ -436,7 +436,7 @@ AnonHugePmdMapped).
The number of file transparent huge pages mapped to userspace is available
by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``.
To identify what applications are mapping file transparent huge pages, it
is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields
is necessary to read ``/proc/PID/smaps`` and count the FilePmdMapped fields
for each mapping.
Note that reading the smaps file is expensive and reading it

View file

@ -14756,7 +14756,7 @@ F: drivers/memory/mtk-smi.c
F: include/soc/mediatek/smi.h
MEDIATEK SWITCH DRIVER
M: Arınç ÜNAL <arinc.unal@arinc9.com>
M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Daniel Golle <daniel@makrotopia.org>
M: DENG Qingfang <dqfext@gmail.com>
M: Sean Wang <sean.wang@mediatek.com>
@ -18460,7 +18460,7 @@ F: Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
F: drivers/pinctrl/mediatek/
PIN CONTROLLER - MEDIATEK MIPS
M: Arınç ÜNAL <arinc.unal@arinc9.com>
M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
L: linux-mips@vger.kernel.org
@ -19504,7 +19504,7 @@ S: Maintained
F: arch/mips/ralink
RALINK MT7621 MIPS ARCHITECTURE
M: Arınç ÜNAL <arinc.unal@arinc9.com>
M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
L: linux-mips@vger.kernel.org
S: Maintained

View file

@ -893,7 +893,7 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
int status = 0;
trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
if (!sb_has_quota_loaded(sb, type)) {
if (!sb_has_quota_active(sb, type)) {
status = -ESRCH;
goto out;
}

View file

@ -867,6 +867,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
brelse(oinfo->dqi_libh);
brelse(oinfo->dqi_lqi_bh);
kfree(oinfo);
info->dqi_priv = NULL;
return status;
}

View file

@ -1810,7 +1810,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
}
for (; addr != end; addr += PAGE_SIZE, idx++) {
unsigned long cur_flags = flags;
u64 cur_flags = flags;
pagemap_entry_t pme;
if (folio && (flags & PM_PRESENT) &&

View file

@ -7,6 +7,7 @@
#ifdef CONFIG_MEMFD_CREATE
extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
unsigned int *memfd_file_seals_ptr(struct file *file);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@ -16,6 +17,19 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
{
return ERR_PTR(-EINVAL);
}
static inline unsigned int *memfd_file_seals_ptr(struct file *file)
{
return NULL;
}
#endif
/* Retrieve memfd seals associated with the file, if any. */
static inline unsigned int memfd_file_seals(struct file *file)
{
unsigned int *sealsp = memfd_file_seals_ptr(file);
return sealsp ? *sealsp : 0;
}
#endif /* __LINUX_MEMFD_H */

View file

@ -3125,6 +3125,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
if (!pmd_ptlock_init(ptdesc))
return false;
__folio_set_pgtable(folio);
ptdesc_pmd_pts_init(ptdesc);
lruvec_stat_add_folio(folio, NR_PAGETABLE);
return true;
}
@ -4101,6 +4102,37 @@ void mem_dump_obj(void *object);
static inline void mem_dump_obj(void *object) {}
#endif
static inline bool is_write_sealed(int seals)
{
return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
}
/**
* is_readonly_sealed - Checks whether write-sealed but mapped read-only,
* in which case writes should be disallowing moving
* forwards.
* @seals: the seals to check
* @vm_flags: the VMA flags to check
*
* Returns whether readonly sealed, in which case writess should be disallowed
* going forward.
*/
static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags)
{
/*
* Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
* MAP_SHARED and read-only, take care to not allow mprotect to
* revert protections on such mappings. Do this only for shared
* mappings. For private mappings, don't need to mask
* VM_MAYWRITE as we still want them to be COW-writable.
*/
if (is_write_sealed(seals) &&
((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED))
return true;
return false;
}
/**
* seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
* handle them.
@ -4112,24 +4144,15 @@ static inline void mem_dump_obj(void *object) {}
*/
static inline int seal_check_write(int seals, struct vm_area_struct *vma)
{
if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
/*
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
* write seals are active.
*/
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
return -EPERM;
if (!is_write_sealed(seals))
return 0;
/*
* Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
* MAP_SHARED and read-only, take care to not allow mprotect to
* revert protections on such mappings. Do this only for shared
* mappings. For private mappings, don't need to mask
* VM_MAYWRITE as we still want them to be COW-writable.
*/
if (vma->vm_flags & VM_SHARED)
vm_flags_clear(vma, VM_MAYWRITE);
}
/*
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
* write seals are active.
*/
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
return -EPERM;
return 0;
}

View file

@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a);
* @pt_index: Used for s390 gmap.
* @pt_mm: Used for x86 pgds.
* @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
* @pt_share_count: Used for HugeTLB PMD page table share count.
* @_pt_pad_2: Padding to ensure proper alignment.
* @ptl: Lock for the page table.
* @__page_type: Same as page->page_type. Unused for page tables.
@ -471,6 +472,9 @@ struct ptdesc {
pgoff_t pt_index;
struct mm_struct *pt_mm;
atomic_t pt_frag_refcount;
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
atomic_t pt_share_count;
#endif
};
union {
@ -516,6 +520,32 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
const struct page *: (const struct ptdesc *)(p), \
struct page *: (struct ptdesc *)(p)))
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
{
atomic_set(&ptdesc->pt_share_count, 0);
}
static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
{
atomic_inc(&ptdesc->pt_share_count);
}
static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
{
atomic_dec(&ptdesc->pt_share_count);
}
static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
{
return atomic_read(&ptdesc->pt_share_count);
}
#else
static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
{
}
#endif
/*
* Used for sizing the vmemmap region on some architectures
*/

View file

@ -221,10 +221,7 @@ do { \
} while (0)
#define PERCPU_PTR(__p) \
({ \
unsigned long __pcpu_ptr = (__force unsigned long)(__p); \
(typeof(*(__p)) __force __kernel *)(__pcpu_ptr); \
})
(typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p))
#ifdef CONFIG_SMP

View file

@ -166,7 +166,7 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
* Unlike in_serving_softirq(), this function returns false when called during
* a hardirq or an NMI that happened in the softirq context.
*/
static inline bool in_softirq_really(void)
static __always_inline bool in_softirq_really(void)
{
return in_serving_softirq() && !in_hardirq() && !in_nmi();
}

View file

@ -4354,6 +4354,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
ret = 1;
}
if (ret < 0 && range_lo > min) {
mas_reset(mas);
ret = mas_empty_area(mas, min, range_hi, 1);
if (ret == 0)
ret = 1;

View file

@ -868,6 +868,11 @@ static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
NUMA_NO_NODE);
if (!new_scheme)
return -ENOMEM;
err = damos_commit(new_scheme, src_scheme);
if (err) {
damon_destroy_scheme(new_scheme);
return err;
}
damon_add_scheme(dst, new_scheme);
}
return 0;
@ -961,8 +966,11 @@ static int damon_commit_targets(
return -ENOMEM;
err = damon_commit_target(new_target, false,
src_target, damon_target_has_pid(src));
if (err)
if (err) {
damon_destroy_target(new_target);
return err;
}
damon_add_target(dst, new_target);
}
return 0;
}

View file

@ -124,15 +124,6 @@
* ->private_lock (zap_pte_range->block_dirty_folio)
*/
static void mapping_set_update(struct xa_state *xas,
struct address_space *mapping)
{
if (dax_mapping(mapping) || shmem_mapping(mapping))
return;
xas_set_update(xas, workingset_update_node);
xas_set_lru(xas, &shadow_nodes);
}
static void page_cache_delete(struct address_space *mapping,
struct folio *folio, void *shadow)
{

View file

@ -7211,7 +7211,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
spte = hugetlb_walk(svma, saddr,
vma_mmu_pagesize(svma));
if (spte) {
get_page(virt_to_page(spte));
ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
break;
}
}
@ -7226,7 +7226,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
mm_inc_nr_pmds(mm);
} else {
put_page(virt_to_page(spte));
ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
}
spin_unlock(&mm->page_table_lock);
out:
@ -7238,10 +7238,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
/*
* unmap huge page backed by shared pte.
*
* Hugetlb pte page is ref counted at the time of mapping. If pte is shared
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
* Called with page table lock held.
*
* returns: 1 successfully unmapped a shared pte page
@ -7250,18 +7246,20 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
unsigned long sz = huge_page_size(hstate_vma(vma));
pgd_t *pgd = pgd_offset(mm, addr);
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
hugetlb_vma_assert_locked(vma);
BUG_ON(page_count(virt_to_page(ptep)) == 0);
if (page_count(virt_to_page(ptep)) == 1)
if (sz != PMD_SIZE)
return 0;
if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep)))
return 0;
pud_clear(pud);
put_page(virt_to_page(ptep));
ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
mm_dec_nr_pmds(mm);
return 1;
}

View file

@ -1504,6 +1504,12 @@ static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
extern struct list_lru shadow_nodes;
#define mapping_set_update(xas, mapping) do { \
if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
xas_set_update(xas, workingset_update_node); \
xas_set_lru(xas, &shadow_nodes); \
} \
} while (0)
/* mremap.c */
unsigned long move_page_tables(struct vm_area_struct *vma,

View file

@ -19,6 +19,7 @@
#include <linux/rcupdate_wait.h>
#include <linux/swapops.h>
#include <linux/shmem_fs.h>
#include <linux/dax.h>
#include <linux/ksm.h>
#include <asm/tlb.h>
@ -1837,6 +1838,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
if (result != SCAN_SUCCEED)
goto out;
mapping_set_update(&xas, mapping);
__folio_set_locked(new_folio);
if (is_shmem)
__folio_set_swapbacked(new_folio);

View file

@ -373,7 +373,7 @@ static void print_unreferenced(struct seq_file *seq,
for (i = 0; i < nr_entries; i++) {
void *ptr = (void *)entries[i];
warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
warn_or_seq_printf(seq, " %pS\n", ptr);
}
}

View file

@ -77,7 +77,6 @@ lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
spin_lock(&l->lock);
nr_items = READ_ONCE(l->nr_items);
if (likely(nr_items != LONG_MIN)) {
WARN_ON(nr_items < 0);
rcu_read_unlock();
return l;
}
@ -450,6 +449,7 @@ static void memcg_reparent_list_lru_one(struct list_lru *lru, int nid,
list_splice_init(&src->list, &dst->list);
if (src->nr_items) {
WARN_ON(src->nr_items < 0);
dst->nr_items += src->nr_items;
set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
}

View file

@ -170,7 +170,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
return error;
}
static unsigned int *memfd_file_seals_ptr(struct file *file)
unsigned int *memfd_file_seals_ptr(struct file *file)
{
if (shmem_file(file))
return &SHMEM_I(file_inode(file))->seals;

View file

@ -47,6 +47,7 @@
#include <linux/oom.h>
#include <linux/sched/mm.h>
#include <linux/ksm.h>
#include <linux/memfd.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@ -368,6 +369,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
if (file) {
struct inode *inode = file_inode(file);
unsigned int seals = memfd_file_seals(file);
unsigned long flags_mask;
if (!file_mmap_ok(file, inode, pgoff, len))
@ -408,6 +410,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
else if (is_readonly_sealed(seals, vm_flags))
vm_flags &= ~VM_MAYWRITE;
fallthrough;
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
@ -888,7 +892,7 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
if (get_area) {
addr = get_area(file, addr, len, pgoff, flags);
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file
&& !addr /* no hint */
&& IS_ALIGNED(len, PMD_SIZE)) {
/* Ensures that larger anonymous mappings are THP aligned. */

View file

@ -646,7 +646,11 @@ void page_cache_async_ra(struct readahead_control *ractl,
1UL << order);
if (index == expected) {
ra->start += ra->size;
ra->size = get_next_ra_size(ra, max_pages);
/*
* In the case of MADV_HUGEPAGE, the actual size might exceed
* the readahead window.
*/
ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
ra->async_size = ra->size;
goto readit;
}

View file

@ -1535,7 +1535,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
!shmem_falloc->waitq &&
index >= shmem_falloc->start &&
index < shmem_falloc->next)
shmem_falloc->nr_unswapped++;
shmem_falloc->nr_unswapped += nr_pages;
else
shmem_falloc = NULL;
spin_unlock(&inode->i_lock);
@ -1689,6 +1689,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
pgoff_t aligned_index;
bool global_huge;
loff_t i_size;
int order;
@ -1723,9 +1724,9 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
/* Allow mTHP that will be fully within i_size. */
order = highest_order(within_size_orders);
while (within_size_orders) {
index = round_up(index + 1, order);
aligned_index = round_up(index + 1, 1 << order);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= index) {
if (i_size >> PAGE_SHIFT >= aligned_index) {
mask |= within_size_orders;
break;
}

View file

@ -297,12 +297,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
{
char *p;
/*
* Always use GFP_KERNEL, since copy_from_user() can sleep and
* cause pagefault, which makes it pointless to use GFP_NOFS
* or GFP_ATOMIC.
*/
p = kmalloc_track_caller(len + 1, GFP_KERNEL);
p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);

View file

@ -374,7 +374,14 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
/*
* If there are no reclaimable file-backed or anonymous pages,
* ensure zones with sufficient free pages are not skipped.
* This prevents zones like DMA32 from being ignored in reclaim
* scenarios where they can still help alleviate memory pressure.
*/
if (nr == 0)
nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
return nr;
}

View file

@ -2148,13 +2148,14 @@ static int vmstat_cpu_online(unsigned int cpu)
if (!node_state(cpu_to_node(cpu), N_CPU)) {
node_set_state(cpu_to_node(cpu), N_CPU);
}
enable_delayed_work(&per_cpu(vmstat_work, cpu));
return 0;
}
static int vmstat_cpu_down_prep(unsigned int cpu)
{
cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
return 0;
}

View file

@ -880,6 +880,18 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
return 0;
}
/* Prevent CPU hotplug from freeing up the per-CPU acomp_ctx resources */
static struct crypto_acomp_ctx *acomp_ctx_get_cpu(struct crypto_acomp_ctx __percpu *acomp_ctx)
{
cpus_read_lock();
return raw_cpu_ptr(acomp_ctx);
}
static void acomp_ctx_put_cpu(void)
{
cpus_read_unlock();
}
static bool zswap_compress(struct page *page, struct zswap_entry *entry,
struct zswap_pool *pool)
{
@ -893,8 +905,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
gfp_t gfp;
u8 *dst;
acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
acomp_ctx = acomp_ctx_get_cpu(pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
dst = acomp_ctx->buffer;
@ -950,6 +961,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
zswap_reject_alloc_fail++;
mutex_unlock(&acomp_ctx->mutex);
acomp_ctx_put_cpu();
return comp_ret == 0 && alloc_ret == 0;
}
@ -960,7 +972,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
struct crypto_acomp_ctx *acomp_ctx;
u8 *src;
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
acomp_ctx = acomp_ctx_get_cpu(entry->pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
@ -990,6 +1002,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
if (src != acomp_ctx->buffer)
zpool_unmap_handle(zpool, entry->handle);
acomp_ctx_put_cpu();
}
/*********************************

View file

@ -110,7 +110,7 @@ static inline unsigned long orc_ip(const int *ip)
static int orc_sort_cmp(const void *_a, const void *_b)
{
struct orc_entry *orc_a;
struct orc_entry *orc_a, *orc_b;
const int *a = g_orc_ip_table + *(int *)_a;
const int *b = g_orc_ip_table + *(int *)_b;
unsigned long a_val = orc_ip(a);
@ -128,6 +128,9 @@ static int orc_sort_cmp(const void *_a, const void *_b)
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = g_orc_table + (a - g_orc_ip_table);
orc_b = g_orc_table + (b - g_orc_ip_table);
if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
return 0;
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
}

View file

@ -282,6 +282,24 @@ static void *mfd_assert_mmap_shared(int fd)
return p;
}
static void *mfd_assert_mmap_read_shared(int fd)
{
void *p;
p = mmap(NULL,
mfd_def_size,
PROT_READ,
MAP_SHARED,
fd,
0);
if (p == MAP_FAILED) {
printf("mmap() failed: %m\n");
abort();
}
return p;
}
static void *mfd_assert_mmap_private(int fd)
{
void *p;
@ -980,6 +998,30 @@ static void test_seal_future_write(void)
close(fd);
}
static void test_seal_write_map_read_shared(void)
{
int fd;
void *p;
printf("%s SEAL-WRITE-MAP-READ\n", memfd_str);
fd = mfd_assert_new("kern_memfd_seal_write_map_read",
mfd_def_size,
MFD_CLOEXEC | MFD_ALLOW_SEALING);
mfd_assert_add_seals(fd, F_SEAL_WRITE);
mfd_assert_has_seals(fd, F_SEAL_WRITE);
p = mfd_assert_mmap_read_shared(fd);
mfd_assert_read(fd);
mfd_assert_read_shared(fd);
mfd_fail_write(fd);
munmap(p, mfd_def_size);
close(fd);
}
/*
* Test SEAL_SHRINK
* Test whether SEAL_SHRINK actually prevents shrinking
@ -1593,6 +1635,7 @@ int main(int argc, char **argv)
test_seal_write();
test_seal_future_write();
test_seal_write_map_read_shared();
test_seal_shrink();
test_seal_grow();
test_seal_resize();