mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-22 07:53:11 -05:00
sched: Define sched_clock_irqtime as static key
Since CPU time accounting is a performance-critical path, let's define sched_clock_irqtime as a static key to minimize potential overhead. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Michal Koutný <mkoutny@suse.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20250103022409.2544-2-laoar.shao@gmail.com
This commit is contained in:
parent
3229adbe78
commit
8722903cbb
2 changed files with 20 additions and 9 deletions
|
@ -9,6 +9,8 @@
|
|||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(sched_clock_irqtime);
|
||||
|
||||
/*
|
||||
* There are no locks covering percpu hardirq/softirq time.
|
||||
* They are only modified in vtime_account, on corresponding CPU
|
||||
|
@ -22,16 +24,14 @@
|
|||
*/
|
||||
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
|
||||
|
||||
static int sched_clock_irqtime;
|
||||
|
||||
void enable_sched_clock_irqtime(void)
|
||||
{
|
||||
sched_clock_irqtime = 1;
|
||||
static_branch_enable(&sched_clock_irqtime);
|
||||
}
|
||||
|
||||
void disable_sched_clock_irqtime(void)
|
||||
{
|
||||
sched_clock_irqtime = 0;
|
||||
static_branch_disable(&sched_clock_irqtime);
|
||||
}
|
||||
|
||||
static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
|
||||
|
@ -57,7 +57,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
|
|||
s64 delta;
|
||||
int cpu;
|
||||
|
||||
if (!sched_clock_irqtime)
|
||||
if (!irqtime_enabled())
|
||||
return;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
@ -90,8 +90,6 @@ static u64 irqtime_tick_accounted(u64 maxtime)
|
|||
|
||||
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
|
||||
#define sched_clock_irqtime (0)
|
||||
|
||||
static u64 irqtime_tick_accounted(u64 dummy)
|
||||
{
|
||||
return 0;
|
||||
|
@ -478,7 +476,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
|||
if (vtime_accounting_enabled_this_cpu())
|
||||
return;
|
||||
|
||||
if (sched_clock_irqtime) {
|
||||
if (irqtime_enabled()) {
|
||||
irqtime_account_process_tick(p, user_tick, 1);
|
||||
return;
|
||||
}
|
||||
|
@ -507,7 +505,7 @@ void account_idle_ticks(unsigned long ticks)
|
|||
{
|
||||
u64 cputime, steal;
|
||||
|
||||
if (sched_clock_irqtime) {
|
||||
if (irqtime_enabled()) {
|
||||
irqtime_account_idle_ticks(ticks);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -3233,6 +3233,12 @@ struct irqtime {
|
|||
};
|
||||
|
||||
DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
|
||||
DECLARE_STATIC_KEY_FALSE(sched_clock_irqtime);
|
||||
|
||||
static inline int irqtime_enabled(void)
|
||||
{
|
||||
return static_branch_likely(&sched_clock_irqtime);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the irqtime minus the softirq time computed by ksoftirqd.
|
||||
|
@ -3253,6 +3259,13 @@ static inline u64 irq_time_read(int cpu)
|
|||
return total;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int irqtime_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
|
|
Loading…
Reference in a new issue