diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 171af59af20c..88bca7125b26 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8865,12 +8865,7 @@ again: set_next_entity(cfs_rq, se); } - if (hrtick_enabled(rq)) - hrtick_start_fair(rq, p); - - update_misfit_status(p, rq); - - return p; + goto done; simple: #endif @@ -8884,6 +8879,16 @@ simple: p = task_of(se); +done: __maybe_unused +#ifdef CONFIG_SMP + /* + * Move the next running task to the front of + * the list, so our cfs_tasks list becomes MRU + * one. + */ + list_move(&p->se.group_node, &rq->cfs_tasks); +#endif + if (hrtick_enabled(rq)) hrtick_start_fair(rq, p); @@ -9389,11 +9394,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env) */ static struct task_struct *detach_one_task(struct lb_env *env) { - struct task_struct *p, *n; + struct task_struct *p; lockdep_assert_held(&env->src_rq->lock); - list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) { + list_for_each_entry_reverse(p, + &env->src_rq->cfs_tasks, se.group_node) { if (!can_migrate_task(p, env)) continue; @@ -9447,7 +9453,7 @@ redo: if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) break; - p = list_first_entry(tasks, struct task_struct, se.group_node); + p = list_last_entry(tasks, struct task_struct, se.group_node); env->loop++; /* We've more or less seen every task there is, call it quits */ @@ -9520,7 +9526,7 @@ next: env->src_grp_type, p->pid, load, task_util(p), cpumask_bits(&p->cpus_allowed)[0]); #endif - list_move_tail(&p->se.group_node, tasks); + list_move(&p->se.group_node, tasks); } if (env->flags & (LBF_IGNORE_BIG_TASKS |