@ -100,16 +100,16 @@ void __init call_function_init(void)
* previous function call . For multi - cpu calls its even more interesting
* as we ' ll have to ensure no other cpu is observing our csd .
*/
static void csd_lock_wait ( struct call_single_data * data )
static void csd_lock_wait ( struct call_single_data * cs d)
{
while ( data - > flags & CSD_FLAG_LOCK )
while ( cs d- > flags & CSD_FLAG_LOCK )
cpu_relax ( ) ;
}
static void csd_lock ( struct call_single_data * data )
static void csd_lock ( struct call_single_data * cs d)
{
csd_lock_wait ( data ) ;
data - > flags | = CSD_FLAG_LOCK ;
csd_lock_wait ( cs d) ;
cs d- > flags | = CSD_FLAG_LOCK ;
/*
* prevent CPU from reordering the above assignment
@ -119,16 +119,16 @@ static void csd_lock(struct call_single_data *data)
smp_mb ( ) ;
}
static void csd_unlock ( struct call_single_data * data )
static void csd_unlock ( struct call_single_data * cs d)
{
WARN_ON ( ! ( data - > flags & CSD_FLAG_LOCK ) ) ;
WARN_ON ( ! ( cs d- > flags & CSD_FLAG_LOCK ) ) ;
/*
* ensure we ' re all done before releasing data :
*/
smp_mb ( ) ;
data - > flags & = ~ CSD_FLAG_LOCK ;
cs d- > flags & = ~ CSD_FLAG_LOCK ;
}
/*
@ -137,7 +137,7 @@ static void csd_unlock(struct call_single_data *data)
* - > func , - > info , and - > flags set .
*/
static
void generic_exec_single ( int cpu , struct call_single_data * data , int wait )
void generic_exec_single ( int cpu , struct call_single_data * cs d, int wait )
{
struct call_single_queue * dst = & per_cpu ( call_single_queue , cpu ) ;
unsigned long flags ;
@ -145,7 +145,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
raw_spin_lock_irqsave ( & dst - > lock , flags ) ;
ipi = list_empty ( & dst - > list ) ;
list_add_tail ( & data - > list , & dst - > list ) ;
list_add_tail ( & cs d- > list , & dst - > list ) ;
raw_spin_unlock_irqrestore ( & dst - > lock , flags ) ;
/*
@ -163,7 +163,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
arch_send_call_function_single_ipi ( cpu ) ;
if ( wait )
csd_lock_wait ( data ) ;
csd_lock_wait ( cs d) ;
}
/*
@ -173,7 +173,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
void generic_smp_call_function_single_interrupt ( void )
{
struct call_single_queue * q = & __get_cpu_var ( call_single_queue ) ;
unsigned int data_flags ;
LIST_HEAD ( list ) ;
/*
@ -186,25 +185,26 @@ void generic_smp_call_function_single_interrupt(void)
raw_spin_unlock ( & q - > lock ) ;
while ( ! list_empty ( & list ) ) {
struct call_single_data * data ;
struct call_single_data * csd ;
unsigned int csd_flags ;
data = list_entry ( list . next , struct call_single_data , list ) ;
list_del ( & data - > list ) ;
cs d = list_entry ( list . next , struct call_single_data , list ) ;
list_del ( & cs d- > list ) ;
/*
* ' data ' can be invalid after this call if flags = = 0
* ' cs d' can be invalid after this call if flags = = 0
* ( when called through generic_exec_single ( ) ) ,
* so save them away before making the call :
*/
data _flags = data - > flags ;
cs d_flags = cs d- > flags ;
data - > func ( data - > info ) ;
cs d- > func ( cs d- > info ) ;
/*
* Unlocked CSDs are valid through generic_exec_single ( ) :
*/
if ( data _flags & CSD_FLAG_LOCK )
csd_unlock ( data ) ;
if ( cs d_flags & CSD_FLAG_LOCK )
csd_unlock ( cs d) ;
}
}
@ -249,16 +249,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
local_irq_restore ( flags ) ;
} else {
if ( ( unsigned ) cpu < nr_cpu_ids & & cpu_online ( cpu ) ) {
struct call_single_data * data = & d ;
struct call_single_data * cs d = & d ;
if ( ! wait )
data = & __get_cpu_var ( csd_data ) ;
cs d = & __get_cpu_var ( csd_data ) ;
csd_lock ( data ) ;
csd_lock ( cs d) ;
data - > func = func ;
data - > info = info ;
generic_exec_single ( cpu , data , wait ) ;
cs d- > func = func ;
cs d- > info = info ;
generic_exec_single ( cpu , cs d, wait ) ;
} else {
err = - ENXIO ; /* CPU not online */
}
@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
* pre - allocated data structure . Useful for embedding @ data inside
* other structures , for instance .
*/
void __smp_call_function_single ( int cpu , struct call_single_data * data ,
void __smp_call_function_single ( int cpu , struct call_single_data * cs d,
int wait )
{
unsigned int this_cpu ;
@ -343,11 +343,11 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
if ( cpu = = this_cpu ) {
local_irq_save ( flags ) ;
data - > func ( data - > info ) ;
cs d- > func ( cs d- > info ) ;
local_irq_restore ( flags ) ;
} else {
csd_lock ( data ) ;
generic_exec_single ( cpu , data , wait ) ;
csd_lock ( cs d) ;
generic_exec_single ( cpu , cs d, wait ) ;
}
put_cpu ( ) ;
}
@ -369,7 +369,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
void smp_call_function_many ( const struct cpumask * mask ,
smp_call_func_t func , void * info , bool wait )
{
struct call_function_data * data ;
struct call_function_data * cf d;
int cpu , next_cpu , this_cpu = smp_processor_id ( ) ;
/*
@ -401,24 +401,24 @@ void smp_call_function_many(const struct cpumask *mask,
return ;
}
data = & __get_cpu_var ( cfd_data ) ;
cf d = & __get_cpu_var ( cfd_data ) ;
cpumask_and ( data - > cpumask , mask , cpu_online_mask ) ;
cpumask_clear_cpu ( this_cpu , data - > cpumask ) ;
cpumask_and ( cf d- > cpumask , mask , cpu_online_mask ) ;
cpumask_clear_cpu ( this_cpu , cf d- > cpumask ) ;
/* Some callers race with other cpus changing the passed mask */
if ( unlikely ( ! cpumask_weight ( data - > cpumask ) ) )
if ( unlikely ( ! cpumask_weight ( cf d- > cpumask ) ) )
return ;
/*
* After we put an entry into the list , data - > cpumask
* may be cleared again when another CPU sends another IPI for
* a SMP function call , so data - > cpumask will be zero .
* After we put an entry into the list , cf d- > cpumask may be cleared
* again when another CPU sends another IPI for a SMP function call , so
* c fd- > cpumask will be zero .
*/
cpumask_copy ( data - > cpumask_ipi , data - > cpumask ) ;
cpumask_copy ( cf d- > cpumask_ipi , cf d- > cpumask ) ;
for_each_cpu ( cpu , data - > cpumask ) {
struct call_single_data * csd = per_cpu_ptr ( data - > csd , cpu ) ;
for_each_cpu ( cpu , cf d- > cpumask ) {
struct call_single_data * csd = per_cpu_ptr ( cf d- > csd , cpu ) ;
struct call_single_queue * dst =
& per_cpu ( call_single_queue , cpu ) ;
unsigned long flags ;
@ -433,12 +433,13 @@ void smp_call_function_many(const struct cpumask *mask,
}
/* Send a message to all CPUs in the map */
arch_send_call_function_ipi_mask ( data - > cpumask_ipi ) ;
arch_send_call_function_ipi_mask ( cf d- > cpumask_ipi ) ;
if ( wait ) {
for_each_cpu ( cpu , data - > cpumask ) {
struct call_single_data * csd =
per_cpu_ptr ( data - > csd , cpu ) ;
for_each_cpu ( cpu , cfd - > cpumask ) {
struct call_single_data * csd ;
csd = per_cpu_ptr ( cfd - > csd , cpu ) ;
csd_lock_wait ( csd ) ;
}
}