diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e85a2fd258e2..a5d3422f7d0d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4502,7 +4502,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->se.nr_migrations = 0; p->se.vruntime = 0; p->se.vlag = 0; - p->se.slice = sysctl_sched_min_granularity; + p->se.slice = sysctl_sched_base_slice; INIT_LIST_HEAD(&p->se.group_node); #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index f8d190c7c8c0..4c3d0d9f3db6 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -347,7 +347,7 @@ static __init int sched_init_debug(void) debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); #endif - debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); + debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice); debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); @@ -863,7 +863,7 @@ static void sched_debug_header(struct seq_file *m) SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) - PN(sysctl_sched_min_granularity); + PN(sysctl_sched_base_slice); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); #undef PN diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0605eb45c58a..61747a25d06d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -75,8 +75,8 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 750000ULL; -static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; +unsigned int sysctl_sched_base_slice = 750000ULL; +static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; /* * After fork, child runs first. If set to 0 (default) then @@ -237,7 +237,7 @@ static void update_sysctl(void) #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) - SET_SYSCTL(sched_min_granularity); + SET_SYSCTL(sched_base_slice); #undef SET_SYSCTL } @@ -943,7 +943,7 @@ int sched_update_scaling(void) #define WRT_SYSCTL(name) \ (normalized_sysctl_##name = sysctl_##name / (factor)) - WRT_SYSCTL(sched_min_granularity); + WRT_SYSCTL(sched_base_slice); #undef WRT_SYSCTL return 0; @@ -964,9 +964,9 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) /* * For EEVDF the virtual time slope is determined by w_i (iow. * nice) while the request time r_i is determined by - * sysctl_sched_min_granularity. + * sysctl_sched_base_slice. */ - se->slice = sysctl_sched_min_granularity; + se->slice = sysctl_sched_base_slice; /* * EEVDF: vd_i = ve_i + r_i / w_i diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f814bb731235..7ff9965570e6 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2503,7 +2503,7 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); extern const_debug unsigned int sysctl_sched_nr_migrate; extern const_debug unsigned int sysctl_sched_migration_cost; -extern unsigned int sysctl_sched_min_granularity; +extern unsigned int sysctl_sched_base_slice; #ifdef CONFIG_SCHED_DEBUG extern int sysctl_resched_latency_warn_ms;