@ -540,7 +540,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
static void update_cfs_load ( struct cfs_rq * cfs_rq , int global_update ) ;
static void update_cfs_shares ( struct cfs_rq * cfs_rq , long weight_delta ) ;
static void update_cfs_shares ( struct cfs_rq * cfs_rq ) ;
/*
* Update the current task ' s runtime statistics . Skip current tasks that
@ -763,16 +763,15 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
list_del_leaf_cfs_rq ( cfs_rq ) ;
}
static long calc_cfs_shares ( struct cfs_rq * cfs_rq , struct task_group * tg ,
long weight_delta )
static long calc_cfs_shares ( struct cfs_rq * cfs_rq , struct task_group * tg )
{
long load_weight , load , shares ;
load = cfs_rq - > load . weight + weight_delta ;
load = cfs_rq - > load . weight ;
load_weight = atomic_read ( & tg - > load_weight ) ;
load_weight - = cfs_rq - > load_contribution ;
load_weight + = load ;
load_weight - = cfs_rq - > load_contribution ;
shares = ( tg - > shares * load ) ;
if ( load_weight )
@ -790,7 +789,7 @@ static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
{
if ( cfs_rq - > load_unacc_exec_time > sysctl_sched_shares_window ) {
update_cfs_load ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq ) ;
}
}
# else /* CONFIG_SMP */
@ -798,8 +797,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
}
static inline long calc_cfs_shares ( struct cfs_rq * cfs_rq , struct task_group * tg ,
long weight_delta )
static inline long calc_cfs_shares ( struct cfs_rq * cfs_rq , struct task_group * tg )
{
return tg - > shares ;
}
@ -824,7 +822,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
account_entity_enqueue ( cfs_rq , se ) ;
}
static void update_cfs_shares ( struct cfs_rq * cfs_rq , long weight_delta )
static void update_cfs_shares ( struct cfs_rq * cfs_rq )
{
struct task_group * tg ;
struct sched_entity * se ;
@ -838,7 +836,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
if ( likely ( se - > load . weight = = tg - > shares ) )
return ;
# endif
shares = calc_cfs_shares ( cfs_rq , tg , weight_delta ) ;
shares = calc_cfs_shares ( cfs_rq , tg ) ;
reweight_entity ( cfs_rq_of ( se ) , se , shares ) ;
}
@ -847,7 +845,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
{
}
static inline void update_cfs_shares ( struct cfs_rq * cfs_rq , long weight_delta )
static inline void update_cfs_shares ( struct cfs_rq * cfs_rq )
{
}
@ -978,8 +976,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
update_curr ( cfs_rq ) ;
update_cfs_load ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq , se - > load . weight ) ;
account_entity_enqueue ( cfs_rq , se ) ;
update_cfs_shares ( cfs_rq ) ;
if ( flags & ENQUEUE_WAKEUP ) {
place_entity ( cfs_rq , se , 0 ) ;
@ -1041,7 +1039,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_load ( cfs_rq , 0 ) ;
account_entity_dequeue ( cfs_rq , se ) ;
update_min_vruntime ( cfs_rq ) ;
update_cfs_shares ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq ) ;
/*
* Normalize the entity after updating the min_vruntime because the
@ -1282,7 +1280,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq * cfs_rq = cfs_rq_of ( se ) ;
update_cfs_load ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq ) ;
}
hrtick_update ( rq ) ;
@ -1312,7 +1310,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq * cfs_rq = cfs_rq_of ( se ) ;
update_cfs_load ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq ) ;
}
hrtick_update ( rq ) ;
@ -2123,7 +2121,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
* We need to update shares after updating tg - > load_weight in
* order to adjust the weight of groups with long running tasks .
*/
update_cfs_shares ( cfs_rq , 0 ) ;
update_cfs_shares ( cfs_rq ) ;
raw_spin_unlock_irqrestore ( & rq - > lock , flags ) ;