@ -3113,19 +3113,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/
update_sd_lb_stats ( sd , this_cpu , idle , cpus , balance , & sds ) ;
/* Cases where imbalance does not exist from POV of this_cpu */
/* 1) this_cpu is not the appropriate cpu to perform load balancing
* at this level .
* 2 ) There is no busy sibling group to pull from .
* 3 ) This group is the busiest group .
* 4 ) This group is more busy than the avg busieness at this
* sched_domain .
* 5 ) The imbalance is within the specified limit .
*
* Note : when doing newidle balance , if the local group has excess
* capacity ( i . e . nr_running < group_capacity ) and the busiest group
* does not have any capacity , we force a load balance to pull tasks
* to the local group . In this case , we skip past checks 3 , 4 and 5.
/*
* this_cpu is not the appropriate cpu to perform load balancing at
* this level .
*/
if ( ! ( * balance ) )
goto ret ;
@ -3134,19 +3124,27 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
check_asym_packing ( sd , & sds , this_cpu , imbalance ) )
return sds . busiest ;
/* There is no busy sibling group to pull tasks from */
if ( ! sds . busiest | | sds . busiest_nr_running = = 0 )
goto out_balanced ;
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
if ( idle = = CPU_NEWLY_IDLE & & sds . this_has_capacity & &
! sds . busiest_has_capacity )
goto force_balance ;
/*
* If the local group is more busy than the selected busiest group
* don ' t try and pull any tasks .
*/
if ( sds . this_load > = sds . max_load )
goto out_balanced ;
/*
* Don ' t pull any tasks if this group is already above the domain
* average load .
*/
sds . avg_load = ( SCHED_LOAD_SCALE * sds . total_load ) / sds . total_pwr ;
if ( sds . this_load > = sds . avg_load )
goto out_balanced ;