mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-22 16:06:04 -05:00
mm: hugetlb_vmemmap: optimize vmemmap_optimize_mode handling
We hold an another reference to hugetlb_optimize_vmemmap_key when making vmemmap_optimize_mode on, because we use static_key to tell memory_hotplug that memory_hotplug.memmap_on_memory should be overridden. However, this rule has gone when we have introduced PageVmemmapSelfHosted. Therefore, we could simplify vmemmap_optimize_mode handling by not holding an another reference to hugetlb_optimize_vmemmap_key. This also means that we not incur the extra page_fixed_fake_head checks if there are no vmemmap optinmized hugetlb pages after this change. Link: https://lkml.kernel.org/r/20220628092235.91270-3-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Will Deacon <will@kernel.org> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2da1c30929
commit
cf5472e561
2 changed files with 9 additions and 62 deletions
|
@ -205,8 +205,7 @@ enum pageflags {
|
|||
#ifndef __GENERATING_BOUNDS_H
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
|
||||
DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
||||
hugetlb_optimize_vmemmap_key);
|
||||
DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
|
||||
|
||||
/*
|
||||
* If the feature of optimizing vmemmap pages associated with each HugeTLB
|
||||
|
@ -226,8 +225,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
|||
*/
|
||||
static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
|
||||
{
|
||||
if (!static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
||||
&hugetlb_optimize_vmemmap_key))
|
||||
if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
|
||||
return page;
|
||||
|
||||
/*
|
||||
|
|
|
@ -23,42 +23,15 @@
|
|||
#define RESERVE_VMEMMAP_NR 1U
|
||||
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
|
||||
|
||||
enum vmemmap_optimize_mode {
|
||||
VMEMMAP_OPTIMIZE_OFF,
|
||||
VMEMMAP_OPTIMIZE_ON,
|
||||
};
|
||||
|
||||
DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
||||
hugetlb_optimize_vmemmap_key);
|
||||
DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
|
||||
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
|
||||
|
||||
static enum vmemmap_optimize_mode vmemmap_optimize_mode =
|
||||
static bool vmemmap_optimize_enabled =
|
||||
IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
|
||||
|
||||
static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
|
||||
{
|
||||
if (vmemmap_optimize_mode == to)
|
||||
return;
|
||||
|
||||
if (to == VMEMMAP_OPTIMIZE_OFF)
|
||||
static_branch_dec(&hugetlb_optimize_vmemmap_key);
|
||||
else
|
||||
static_branch_inc(&hugetlb_optimize_vmemmap_key);
|
||||
WRITE_ONCE(vmemmap_optimize_mode, to);
|
||||
}
|
||||
|
||||
static int __init hugetlb_vmemmap_early_param(char *buf)
|
||||
{
|
||||
bool enable;
|
||||
enum vmemmap_optimize_mode mode;
|
||||
|
||||
if (kstrtobool(buf, &enable))
|
||||
return -EINVAL;
|
||||
|
||||
mode = enable ? VMEMMAP_OPTIMIZE_ON : VMEMMAP_OPTIMIZE_OFF;
|
||||
vmemmap_optimize_mode_switch(mode);
|
||||
|
||||
return 0;
|
||||
return kstrtobool(buf, &vmemmap_optimize_enabled);
|
||||
}
|
||||
early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
|
||||
|
||||
|
@ -100,7 +73,7 @@ int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
|
|||
static unsigned int vmemmap_optimizable_pages(struct hstate *h,
|
||||
struct page *head)
|
||||
{
|
||||
if (READ_ONCE(vmemmap_optimize_mode) == VMEMMAP_OPTIMIZE_OFF)
|
||||
if (!READ_ONCE(vmemmap_optimize_enabled))
|
||||
return 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
|
||||
|
@ -191,7 +164,6 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
|
|||
|
||||
if (!is_power_of_2(sizeof(struct page))) {
|
||||
pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n");
|
||||
static_branch_disable(&hugetlb_optimize_vmemmap_key);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -212,36 +184,13 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
static int hugetlb_optimize_vmemmap_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *length,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
enum vmemmap_optimize_mode mode;
|
||||
static DEFINE_MUTEX(sysctl_mutex);
|
||||
|
||||
if (write && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
mutex_lock(&sysctl_mutex);
|
||||
mode = vmemmap_optimize_mode;
|
||||
table->data = &mode;
|
||||
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
if (write && !ret)
|
||||
vmemmap_optimize_mode_switch(mode);
|
||||
mutex_unlock(&sysctl_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ctl_table hugetlb_vmemmap_sysctls[] = {
|
||||
{
|
||||
.procname = "hugetlb_optimize_vmemmap",
|
||||
.maxlen = sizeof(enum vmemmap_optimize_mode),
|
||||
.data = &vmemmap_optimize_enabled,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = hugetlb_optimize_vmemmap_handler,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
.proc_handler = proc_dobool,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue