From 633cde1b93945bd5a7e426d138220e678f33aba1 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Wed, 15 Jul 2020 22:57:41 -0700 Subject: [PATCH] sched: fair: placement optimization for heavy load Previously we have used pure CFS wakeup in overutilized case. This is a tweaked version to activate the path only for important tasks. Bug: 161190988 Bug: 160883639 Test: boot and systrace Signed-off-by: Wei Wang Signed-off-by: Alexander Winkowski Change-Id: I2a27f241b3ba32a04cf6f88deb483d6636440dcf --- kernel/sched/fair.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3121ad039471..171af59af20c 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7711,6 +7711,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (best_idle_cpu != -1) continue; + /* + * Skip searching for active CPU for tasks have + * high priority & stune.boost. + */ + if (boosted && p->prio <= DEFAULT_PRIO) + continue; + /* * Case A.2: Target ACTIVE CPU * Favor CPUs with max spare capacity. @@ -7938,7 +7945,9 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, #ifdef CONFIG_SCHED_WALT if (target_cpu == -1 && most_spare_cap_cpu != -1 && /* ensure we use active cpu for active migration */ - !(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu))) + !(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu)) && + /* do not pick an overutilized most_spare_cap_cpu */ + !cpu_overutilized(most_spare_cap_cpu)) target_cpu = most_spare_cap_cpu; #endif