diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0a23e23c4e62..3f836ecd457a 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -76,6 +76,8 @@ walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {} #endif +static inline bool cpu_is_in_target_set(struct task_struct *p, int cpu); + /* * Targeted preemption latency for CPU-bound tasks: * @@ -5806,7 +5808,8 @@ static inline bool bias_to_this_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target) { bool base_test = cpumask_test_cpu(cpu, &p->cpus_allowed) && - cpu_active(cpu) && task_fits_max(p, cpu); + cpu_active(cpu) && task_fits_max(p, cpu) && + cpu_is_in_target_set(p, cpu); bool rtg_test = rtg_target && cpumask_test_cpu(cpu, rtg_target); return base_test && (!rtg_target || rtg_test); @@ -7442,6 +7445,21 @@ enum fastpaths { MANY_WAKEUP, }; +/* + * Check whether cpu is in the fastest set of cpu's that p should run on. + * If p is boosted, prefer that p runs on a faster cpu; otherwise, allow p + * to run on any cpu. + */ +static inline bool +cpu_is_in_target_set(struct task_struct *p, int cpu) +{ + struct root_domain *rd = cpu_rq(cpu)->rd; + int first_cpu = (schedtune_task_boost(p)) ? + rd->mid_cap_orig_cpu : rd->min_cap_orig_cpu; + int next_usable_cpu = cpumask_next(first_cpu - 1, &p->cpus_allowed); + return cpu >= next_usable_cpu || next_usable_cpu >= nr_cpu_ids; +} + static inline int find_best_target(struct task_struct *p, int *backup_cpu, bool boosted, bool prefer_idle, struct find_best_target_env *fbt_env)