sched: fair: avoid little cpus due to sync, prev bias

Important threads can get forced to little cpu's
when the sync or prev_bias hints are followed
blindly. This patch adds a check to see whether
those paths are forcing the task to a cpu that
has less capacity than other cpu's available for
the task. If so, we ignore the sync and prev_bias
and allow the scheduler to make a free decision.

Bug: 117438867
Change-Id: Ie5a99f9a8b65ba9382a8d0de2ae0aad843e558d1
Signed-off-by: Miguel de Dios <migueldedios@google.com>
Signed-off-by: Alexander Winkowski <dereference23@outlook.com>
fourteen
Miguel de Dios 6 years ago committed by Jenna
parent ac745ec4c1
commit 9b49c18f89
  1. 20
      kernel/sched/fair.c

@ -76,6 +76,8 @@ walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
#endif #endif
static inline bool cpu_is_in_target_set(struct task_struct *p, int cpu);
/* /*
* Targeted preemption latency for CPU-bound tasks: * Targeted preemption latency for CPU-bound tasks:
* *
@ -5806,7 +5808,8 @@ static inline bool
bias_to_this_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target) bias_to_this_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
{ {
bool base_test = cpumask_test_cpu(cpu, &p->cpus_allowed) && bool base_test = cpumask_test_cpu(cpu, &p->cpus_allowed) &&
cpu_active(cpu) && task_fits_max(p, cpu); cpu_active(cpu) && task_fits_max(p, cpu) &&
cpu_is_in_target_set(p, cpu);
bool rtg_test = rtg_target && cpumask_test_cpu(cpu, rtg_target); bool rtg_test = rtg_target && cpumask_test_cpu(cpu, rtg_target);
return base_test && (!rtg_target || rtg_test); return base_test && (!rtg_target || rtg_test);
@ -7442,6 +7445,21 @@ enum fastpaths {
MANY_WAKEUP, MANY_WAKEUP,
}; };
/*
* Check whether cpu is in the fastest set of cpu's that p should run on.
* If p is boosted, prefer that p runs on a faster cpu; otherwise, allow p
* to run on any cpu.
*/
static inline bool
cpu_is_in_target_set(struct task_struct *p, int cpu)
{
struct root_domain *rd = cpu_rq(cpu)->rd;
int first_cpu = (schedtune_task_boost(p)) ?
rd->mid_cap_orig_cpu : rd->min_cap_orig_cpu;
int next_usable_cpu = cpumask_next(first_cpu - 1, &p->cpus_allowed);
return cpu >= next_usable_cpu || next_usable_cpu >= nr_cpu_ids;
}
static inline int find_best_target(struct task_struct *p, int *backup_cpu, static inline int find_best_target(struct task_struct *p, int *backup_cpu,
bool boosted, bool prefer_idle, bool boosted, bool prefer_idle,
struct find_best_target_env *fbt_env) struct find_best_target_env *fbt_env)

Loading…
Cancel
Save