@ -1360,6 +1360,151 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
out :
return wake_idle ( new_cpu , p ) ;
}
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain .
*/
static struct sched_group *
find_idlest_group ( struct sched_domain * sd , struct task_struct * p , int this_cpu )
{
struct sched_group * idlest = NULL , * this = NULL , * group = sd - > groups ;
unsigned long min_load = ULONG_MAX , this_load = 0 ;
int load_idx = sd - > forkexec_idx ;
int imbalance = 100 + ( sd - > imbalance_pct - 100 ) / 2 ;
do {
unsigned long load , avg_load ;
int local_group ;
int i ;
/* Skip over this group if it has no CPUs allowed */
if ( ! cpumask_intersects ( sched_group_cpus ( group ) ,
& p - > cpus_allowed ) )
continue ;
local_group = cpumask_test_cpu ( this_cpu ,
sched_group_cpus ( group ) ) ;
/* Tally up the load of all CPUs in the group */
avg_load = 0 ;
for_each_cpu ( i , sched_group_cpus ( group ) ) {
/* Bias balancing toward cpus of our domain */
if ( local_group )
load = source_load ( i , load_idx ) ;
else
load = target_load ( i , load_idx ) ;
avg_load + = load ;
}
/* Adjust by relative CPU power of the group */
avg_load = ( avg_load * SCHED_LOAD_SCALE ) / group - > cpu_power ;
if ( local_group ) {
this_load = avg_load ;
this = group ;
} else if ( avg_load < min_load ) {
min_load = avg_load ;
idlest = group ;
}
} while ( group = group - > next , group ! = sd - > groups ) ;
if ( ! idlest | | 100 * this_load < imbalance * min_load )
return NULL ;
return idlest ;
}
/*
* find_idlest_cpu - find the idlest cpu among the cpus in group .
*/
static int
find_idlest_cpu ( struct sched_group * group , struct task_struct * p , int this_cpu )
{
unsigned long load , min_load = ULONG_MAX ;
int idlest = - 1 ;
int i ;
/* Traverse only the allowed CPUs */
for_each_cpu_and ( i , sched_group_cpus ( group ) , & p - > cpus_allowed ) {
load = weighted_cpuload ( i ) ;
if ( load < min_load | | ( load = = min_load & & i = = this_cpu ) ) {
min_load = load ;
idlest = i ;
}
}
return idlest ;
}
/*
* sched_balance_self : balance the current task ( running on cpu ) in domains
* that have the ' flag ' flag set . In practice , this is SD_BALANCE_FORK and
* SD_BALANCE_EXEC .
*
* Balance , ie . select the least loaded group .
*
* Returns the target CPU number , or the same CPU if no balancing is needed .
*
* preempt must be disabled .
*/
static int sched_balance_self ( int cpu , int flag )
{
struct task_struct * t = current ;
struct sched_domain * tmp , * sd = NULL ;
for_each_domain ( cpu , tmp ) {
/*
* If power savings logic is enabled for a domain , stop there .
*/
if ( tmp - > flags & SD_POWERSAVINGS_BALANCE )
break ;
if ( tmp - > flags & flag )
sd = tmp ;
}
if ( sd )
update_shares ( sd ) ;
while ( sd ) {
struct sched_group * group ;
int new_cpu , weight ;
if ( ! ( sd - > flags & flag ) ) {
sd = sd - > child ;
continue ;
}
group = find_idlest_group ( sd , t , cpu ) ;
if ( ! group ) {
sd = sd - > child ;
continue ;
}
new_cpu = find_idlest_cpu ( group , t , cpu ) ;
if ( new_cpu = = - 1 | | new_cpu = = cpu ) {
/* Now try balancing at a lower domain level of cpu */
sd = sd - > child ;
continue ;
}
/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu ;
weight = cpumask_weight ( sched_domain_span ( sd ) ) ;
sd = NULL ;
for_each_domain ( cpu , tmp ) {
if ( weight < = cpumask_weight ( sched_domain_span ( tmp ) ) )
break ;
if ( tmp - > flags & flag )
sd = tmp ;
}
/* while loop will break here if sd == NULL */
}
return cpu ;
}
# endif /* CONFIG_SMP */
/*