@ -5968,7 +5968,10 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
# ifdef CONFIG_SCHED_WALT
util = max_t ( long , cpu_util ( cpu ) - task_util ( p ) , 0 ) ;
# else
# ifndef CONFIG_SCHED_WALT
struct cfs_rq * cfs_rq ;
# endif
cfs_rq = & cpu_rq ( cpu ) - > cfs ;
util = READ_ONCE ( cfs_rq - > avg . util_avg ) ;
@ -7795,7 +7798,9 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
if ( ! prefer_idle & & ! boosted & &
( target_cpu ! = - 1 | | best_idle_cpu ! = - 1 ) & &
( fbt_env - > placement_boost = = SCHED_BOOST_NONE | |
# ifdef CONFIG_SCHED_WALT
sched_boost ( ) ! = FULL_THROTTLE_BOOST | |
# endif
( fbt_env - > placement_boost = = SCHED_BOOST_ON_BIG & &
! next_group_higher_cap ) ) )
break ;
@ -9430,9 +9435,11 @@ redo:
continue ;
next :
# ifdef CONFIG_SCHED_WALT
trace_sched_load_balance_skip_tasks ( env - > src_cpu , env - > dst_cpu ,
env - > src_grp_type , p - > pid , load , task_util ( p ) ,
cpumask_bits ( & p - > cpus_allowed ) [ 0 ] ) ;
# endif
list_move_tail ( & p - > se . group_node , tasks ) ;
}