From ac745ec4c16a924a503fdc7a371e2d3ddfaa52f4 Mon Sep 17 00:00:00 2001 From: Diep Quynh Date: Mon, 12 Apr 2021 03:11:56 +0700 Subject: [PATCH] Revert "sched/fair: Don't let tasks slip away from gold to silver cluster" If we ever wanted to stop big tasks from being balanced out to smaller cluster, it had already been done by checking the tasks in can_migrate_task() with schedtune This commit, however, introduces a bug that light tasks including non-big tasks not being balanced out to smaller cluster, resulting in big cluster being occasionally overloaded This reverts commit d89e049987ba496c73a98a2aa060d2402adb8d0e. Change-Id: I72bbd526c8b4e0aabb7533cbdb5141593016dbb5 Signed-off-by: Diep Quynh Signed-off-by: Alexander Winkowski --- kernel/sched/fair.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b1508400c85c..0a23e23c4e62 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -203,7 +203,6 @@ unsigned int sysctl_sched_min_task_util_for_boost = 51; /* 0.68ms default for 20ms window size scaled to 1024 */ unsigned int sysctl_sched_min_task_util_for_colocation = 35; #endif -static unsigned int __maybe_unused sched_small_task_threshold = 102; static inline void update_load_add(struct load_weight *lw, unsigned long inc) { @@ -10876,8 +10875,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, */ if (env->sd->flags & SD_ASYM_CPUCAPACITY && capacity_of(env->dst_cpu) < capacity && - (rq->nr_running == 1 || (rq->nr_running == 2 && - task_util(rq->curr) < sched_small_task_threshold))) + rq->nr_running == 1) continue; wl = weighted_cpuload(rq);