@ -1551,7 +1551,7 @@ static void task_numa_compare(struct task_numa_env *env,
*/
if ( cur ) {
/* Skip this swap candidate if cannot move to the source cpu */
if ( ! cpumask_test_cpu ( env - > src_cpu , tsk_cpus_allowed ( cur ) ) )
if ( ! cpumask_test_cpu ( env - > src_cpu , & cur - > cpus_allowed ) )
goto unlock ;
/*
@ -1661,7 +1661,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
for_each_cpu ( cpu , cpumask_of_node ( env - > dst_nid ) ) {
/* Skip this CPU if the source task cannot migrate */
if ( ! cpumask_test_cpu ( cpu , tsk_cpus_allowed ( env - > p ) ) )
if ( ! cpumask_test_cpu ( cpu , & env - > p - > cpus_allowed ) )
continue ;
env - > dst_cpu = cpu ;
@ -5458,7 +5458,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Skip over this group if it has no CPUs allowed */
if ( ! cpumask_intersects ( sched_group_cpus ( group ) ,
tsk_cpus_allowed ( p ) ) )
& p - > cpus_allowed ) )
continue ;
local_group = cpumask_test_cpu ( this_cpu ,
@ -5578,7 +5578,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
return cpumask_first ( sched_group_cpus ( group ) ) ;
/* Traverse only the allowed CPUs */
for_each_cpu_and ( i , sched_group_cpus ( group ) , tsk_cpus_allowed ( p ) ) {
for_each_cpu_and ( i , sched_group_cpus ( group ) , & p - > cpus_allowed ) {
if ( idle_cpu ( i ) ) {
struct rq * rq = cpu_rq ( i ) ;
struct cpuidle_state * idle = idle_get_state ( rq ) ;
@ -5717,7 +5717,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
if ( ! test_idle_cores ( target , false ) )
return - 1 ;
cpumask_and ( cpus , sched_domain_span ( sd ) , tsk_cpus_allowed ( p ) ) ;
cpumask_and ( cpus , sched_domain_span ( sd ) , & p - > cpus_allowed ) ;
for_each_cpu_wrap ( core , cpus , target , wrap ) {
bool idle = true ;
@ -5751,7 +5751,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return - 1 ;
for_each_cpu ( cpu , cpu_smt_mask ( target ) ) {
if ( ! cpumask_test_cpu ( cpu , tsk_cpus_allowed ( p ) ) )
if ( ! cpumask_test_cpu ( cpu , & p - > cpus_allowed ) )
continue ;
if ( idle_cpu ( cpu ) )
return cpu ;
@ -5803,7 +5803,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
time = local_clock ( ) ;
for_each_cpu_wrap ( cpu , sched_domain_span ( sd ) , target , wrap ) {
if ( ! cpumask_test_cpu ( cpu , tsk_cpus_allowed ( p ) ) )
if ( ! cpumask_test_cpu ( cpu , & p - > cpus_allowed ) )
continue ;
if ( idle_cpu ( cpu ) )
break ;
@ -5958,7 +5958,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if ( sd_flag & SD_BALANCE_WAKE ) {
record_wakee ( p ) ;
want_affine = ! wake_wide ( p ) & & ! wake_cap ( p , cpu , prev_cpu )
& & cpumask_test_cpu ( cpu , tsk_cpus_allowed ( p ) ) ;
& & cpumask_test_cpu ( cpu , & p - > cpus_allowed ) ;
}
rcu_read_lock ( ) ;
@ -6698,7 +6698,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if ( throttled_lb_pair ( task_group ( p ) , env - > src_cpu , env - > dst_cpu ) )
return 0 ;
if ( ! cpumask_test_cpu ( env - > dst_cpu , tsk_cpus_allowed ( p ) ) ) {
if ( ! cpumask_test_cpu ( env - > dst_cpu , & p - > cpus_allowed ) ) {
int cpu ;
schedstat_inc ( p - > se . statistics . nr_failed_migrations_affine ) ;
@ -6718,7 +6718,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and ( cpu , env - > dst_grpmask , env - > cpus ) {
if ( cpumask_test_cpu ( cpu , tsk_cpus_allowed ( p ) ) ) {
if ( cpumask_test_cpu ( cpu , & p - > cpus_allowed ) ) {
env - > flags | = LBF_DST_PINNED ;
env - > new_dst_cpu = cpu ;
break ;
@ -7252,7 +7252,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
/*
* Group imbalance indicates ( and tries to solve ) the problem where balancing
* groups is inadequate due to tsk_cpus_allowed ( ) constraints .
* groups is inadequate due to - > cpus_allowed constraints .
*
* Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
* cpumask covering 1 cpu of the first group and 3 cpus of the second group .
@ -8211,8 +8211,7 @@ more_balance:
* if the curr task on busiest cpu can ' t be
* moved to this_cpu
*/
if ( ! cpumask_test_cpu ( this_cpu ,
tsk_cpus_allowed ( busiest - > curr ) ) ) {
if ( ! cpumask_test_cpu ( this_cpu , & busiest - > curr - > cpus_allowed ) ) {
raw_spin_unlock_irqrestore ( & busiest - > lock ,
flags ) ;
env . flags | = LBF_ALL_PINNED ;