mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-24 09:13:20 -05:00
sched,rt: Use cpumask_any*_distribute()
Replace a bunch of cpumask_any*() instances with cpumask_any*_distribute(), by injecting this little bit of random in cpu selection, we reduce the chance two competing balance operations working off the same lowest_mask pick the same CPU. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Link: https://lkml.kernel.org/r/20201023102347.190759694@infradead.org
This commit is contained in:
parent
3015ef4b98
commit
14e292f8d4
4 changed files with 30 additions and 6 deletions
|
@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||||
return cpumask_next_and(-1, src1p, src2p);
|
return cpumask_next_and(-1, src1p, src2p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int cpumask_any_distribute(const struct cpumask *srcp)
|
||||||
|
{
|
||||||
|
return cpumask_first(srcp);
|
||||||
|
}
|
||||||
|
|
||||||
#define for_each_cpu(cpu, mask) \
|
#define for_each_cpu(cpu, mask) \
|
||||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||||
#define for_each_cpu_not(cpu, mask) \
|
#define for_each_cpu_not(cpu, mask) \
|
||||||
|
@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
|
||||||
unsigned int cpumask_local_spread(unsigned int i, int node);
|
unsigned int cpumask_local_spread(unsigned int i, int node);
|
||||||
int cpumask_any_and_distribute(const struct cpumask *src1p,
|
int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||||
const struct cpumask *src2p);
|
const struct cpumask *src2p);
|
||||||
|
int cpumask_any_distribute(const struct cpumask *srcp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* for_each_cpu - iterate over every cpu in a mask
|
* for_each_cpu - iterate over every cpu in a mask
|
||||||
|
|
|
@ -2002,7 +2002,7 @@ static int find_later_rq(struct task_struct *task)
|
||||||
return this_cpu;
|
return this_cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
best_cpu = cpumask_first_and(later_mask,
|
best_cpu = cpumask_any_and_distribute(later_mask,
|
||||||
sched_domain_span(sd));
|
sched_domain_span(sd));
|
||||||
/*
|
/*
|
||||||
* Last chance: if a CPU being in both later_mask
|
* Last chance: if a CPU being in both later_mask
|
||||||
|
@ -2025,7 +2025,7 @@ static int find_later_rq(struct task_struct *task)
|
||||||
if (this_cpu != -1)
|
if (this_cpu != -1)
|
||||||
return this_cpu;
|
return this_cpu;
|
||||||
|
|
||||||
cpu = cpumask_any(later_mask);
|
cpu = cpumask_any_distribute(later_mask);
|
||||||
if (cpu < nr_cpu_ids)
|
if (cpu < nr_cpu_ids)
|
||||||
return cpu;
|
return cpu;
|
||||||
|
|
||||||
|
|
|
@ -1752,7 +1752,7 @@ static int find_lowest_rq(struct task_struct *task)
|
||||||
return this_cpu;
|
return this_cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
best_cpu = cpumask_first_and(lowest_mask,
|
best_cpu = cpumask_any_and_distribute(lowest_mask,
|
||||||
sched_domain_span(sd));
|
sched_domain_span(sd));
|
||||||
if (best_cpu < nr_cpu_ids) {
|
if (best_cpu < nr_cpu_ids) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -1770,7 +1770,7 @@ static int find_lowest_rq(struct task_struct *task)
|
||||||
if (this_cpu != -1)
|
if (this_cpu != -1)
|
||||||
return this_cpu;
|
return this_cpu;
|
||||||
|
|
||||||
cpu = cpumask_any(lowest_mask);
|
cpu = cpumask_any_distribute(lowest_mask);
|
||||||
if (cpu < nr_cpu_ids)
|
if (cpu < nr_cpu_ids)
|
||||||
return cpu;
|
return cpu;
|
||||||
|
|
||||||
|
|
|
@ -267,3 +267,21 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cpumask_any_and_distribute);
|
EXPORT_SYMBOL(cpumask_any_and_distribute);
|
||||||
|
|
||||||
|
int cpumask_any_distribute(const struct cpumask *srcp)
|
||||||
|
{
|
||||||
|
int next, prev;
|
||||||
|
|
||||||
|
/* NOTE: our first selection will skip 0. */
|
||||||
|
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
||||||
|
|
||||||
|
next = cpumask_next(prev, srcp);
|
||||||
|
if (next >= nr_cpu_ids)
|
||||||
|
next = cpumask_first(srcp);
|
||||||
|
|
||||||
|
if (next < nr_cpu_ids)
|
||||||
|
__this_cpu_write(distribute_cpu_mask_prev, next);
|
||||||
|
|
||||||
|
return next;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(cpumask_any_distribute);
|
||||||
|
|
Loading…
Add table
Reference in a new issue