mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-22 07:53:11 -05:00
mm: Create/affine kcompactd to its preferred node
Kcompactd is dedicated to a specific node. As such it wants to be preferrably affine to it, memory and CPUs-wise. Use the proper kthread API to achieve that. As a bonus it takes care of CPU-hotplug events and CPU-isolation on its behalf. Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
This commit is contained in:
parent
d1a8919758
commit
54880b5a2b
1 changed files with 3 additions and 40 deletions
|
@ -3154,15 +3154,9 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
|
|||
static int kcompactd(void *p)
|
||||
{
|
||||
pg_data_t *pgdat = (pg_data_t *)p;
|
||||
struct task_struct *tsk = current;
|
||||
long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
|
||||
long timeout = default_timeout;
|
||||
|
||||
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
|
||||
|
||||
if (!cpumask_empty(cpumask))
|
||||
set_cpus_allowed_ptr(tsk, cpumask);
|
||||
|
||||
set_freezable();
|
||||
|
||||
pgdat->kcompactd_max_order = 0;
|
||||
|
@ -3233,10 +3227,12 @@ void __meminit kcompactd_run(int nid)
|
|||
if (pgdat->kcompactd)
|
||||
return;
|
||||
|
||||
pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
|
||||
pgdat->kcompactd = kthread_create_on_node(kcompactd, pgdat, nid, "kcompactd%d", nid);
|
||||
if (IS_ERR(pgdat->kcompactd)) {
|
||||
pr_err("Failed to start kcompactd on node %d\n", nid);
|
||||
pgdat->kcompactd = NULL;
|
||||
} else {
|
||||
wake_up_process(pgdat->kcompactd);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3254,30 +3250,6 @@ void __meminit kcompactd_stop(int nid)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* It's optimal to keep kcompactd on the same CPUs as their memory, but
|
||||
* not required for correctness. So if the last cpu in a node goes
|
||||
* away, we get changed to run anywhere: as the first one comes back,
|
||||
* restore their cpu bindings.
|
||||
*/
|
||||
static int kcompactd_cpu_online(unsigned int cpu)
|
||||
{
|
||||
int nid;
|
||||
|
||||
for_each_node_state(nid, N_MEMORY) {
|
||||
pg_data_t *pgdat = NODE_DATA(nid);
|
||||
const struct cpumask *mask;
|
||||
|
||||
mask = cpumask_of_node(pgdat->node_id);
|
||||
|
||||
if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
|
||||
/* One of our CPUs online: restore mask */
|
||||
if (pgdat->kcompactd)
|
||||
set_cpus_allowed_ptr(pgdat->kcompactd, mask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table,
|
||||
int write, void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
|
@ -3337,15 +3309,6 @@ static struct ctl_table vm_compaction[] = {
|
|||
static int __init kcompactd_init(void)
|
||||
{
|
||||
int nid;
|
||||
int ret;
|
||||
|
||||
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
||||
"mm/compaction:online",
|
||||
kcompactd_cpu_online, NULL);
|
||||
if (ret < 0) {
|
||||
pr_err("kcompactd: failed to register hotplug callbacks.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for_each_node_state(nid, N_MEMORY)
|
||||
kcompactd_run(nid);
|
||||
|
|
Loading…
Reference in a new issue