@ -199,7 +199,9 @@ unsigned int sched_capacity_margin_down[NR_CPUS] = {
# ifdef CONFIG_SCHED_WALT
/* 1ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_boost_colocation = 51 ;
unsigned int sysctl_sched_min_task_util_for_boost = 51 ;
/* 0.68ms default for 20ms window size scaled to 1024 */
unsigned int sysctl_sched_min_task_util_for_colocation = 35 ;
# endif
static inline void update_load_add ( struct load_weight * lw , unsigned long inc )
@ -7953,6 +7955,15 @@ static inline int wake_to_idle(struct task_struct *p)
}
# ifdef CONFIG_SCHED_WALT
static inline bool is_task_util_above_min_thresh ( struct task_struct * p )
{
unsigned int threshold = ( sysctl_sched_boost = = CONSERVATIVE_BOOST ) ?
sysctl_sched_min_task_util_for_boost :
sysctl_sched_min_task_util_for_colocation ;
return task_util ( p ) > threshold ;
}
static inline struct cpumask * find_rtg_target ( struct task_struct * p )
{
struct related_thread_group * grp ;
@ -7961,9 +7972,7 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p)
rcu_read_lock ( ) ;
grp = task_related_thread_group ( p ) ;
if ( grp & & grp - > preferred_cluster & &
( task_util ( p ) >
sysctl_sched_min_task_util_for_boost_colocation ) ) {
if ( grp & & grp - > preferred_cluster & & is_task_util_above_min_thresh ( p ) ) {
rtg_target = & grp - > preferred_cluster - > cpus ;
if ( ! task_fits_max ( p , cpumask_first ( rtg_target ) ) )
rtg_target = NULL ;