sched: improve the scheduler

This change is for general scheduler improvement.

Change-Id: Iba464885e8b2172f955cfba3bd6d55743d790b32
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
tirimbino
Satya Durga Srinivasu Prabhala 6 years ago
parent f6f026ce6f
commit 5be9b47afc
  1. 3
      include/linux/sched/sysctl.h
  2. 17
      kernel/sched/fair.c
  3. 2
      kernel/sched/sched.h
  4. 13
      kernel/sysctl.c

@ -41,7 +41,8 @@ extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
extern unsigned int sysctl_sched_walt_rotate_big_tasks;
extern unsigned int sysctl_sched_min_task_util_for_boost_colocation;
extern unsigned int sysctl_sched_min_task_util_for_boost;
extern unsigned int sysctl_sched_min_task_util_for_colocation;
extern unsigned int sysctl_sched_little_cluster_coloc_fmin_khz;
extern int

@ -199,7 +199,9 @@ unsigned int sched_capacity_margin_down[NR_CPUS] = {
#ifdef CONFIG_SCHED_WALT
/* 1ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_boost_colocation = 51;
unsigned int sysctl_sched_min_task_util_for_boost = 51;
/* 0.68ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_colocation = 35;
#endif
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
@ -7953,6 +7955,15 @@ static inline int wake_to_idle(struct task_struct *p)
}
#ifdef CONFIG_SCHED_WALT
static inline bool is_task_util_above_min_thresh(struct task_struct *p)
{
unsigned int threshold = (sysctl_sched_boost == CONSERVATIVE_BOOST) ?
sysctl_sched_min_task_util_for_boost :
sysctl_sched_min_task_util_for_colocation;
return task_util(p) > threshold;
}
static inline struct cpumask *find_rtg_target(struct task_struct *p)
{
struct related_thread_group *grp;
@ -7961,9 +7972,7 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
rcu_read_lock();
grp = task_related_thread_group(p);
if (grp && grp->preferred_cluster &&
(task_util(p) >
sysctl_sched_min_task_util_for_boost_colocation)) {
if (grp && grp->preferred_cluster && is_task_util_above_min_thresh(p)) {
rtg_target = &grp->preferred_cluster->cpus;
if (!task_fits_max(p, cpumask_first(rtg_target)))
rtg_target = NULL;

@ -2940,7 +2940,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
*/
if (sysctl_sched_boost == CONSERVATIVE_BOOST &&
task_util(p) <=
sysctl_sched_min_task_util_for_boost_colocation)
sysctl_sched_min_task_util_for_boost)
policy = SCHED_BOOST_NONE;
}

@ -378,8 +378,17 @@ static struct ctl_table kern_table[] = {
.extra2 = &one,
},
{
.procname = "sched_min_task_util_for_boost_colocation",
.data = &sysctl_sched_min_task_util_for_boost_colocation,
.procname = "sched_min_task_util_for_boost",
.data = &sysctl_sched_min_task_util_for_boost,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one_thousand,
},
{
.procname = "sched_min_task_util_for_colocation",
.data = &sysctl_sched_min_task_util_for_colocation,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,

Loading…
Cancel
Save