@ -1886,8 +1886,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
* Because we not have RCU_FAST_NO_HZ , just check whether this CPU needs
* any flavor of RCU .
*/
int rcu_needs_cpu ( int cpu )
int rcu_needs_cpu ( int cpu , unsigned long * delta_jiffies )
{
* delta_jiffies = ULONG_MAX ;
return rcu_cpu_has_callbacks ( cpu ) ;
}
@ -1962,41 +1963,6 @@ static void rcu_idle_count_callbacks_posted(void)
# define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
# define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
/* Loop counter for rcu_prepare_for_idle(). */
static DEFINE_PER_CPU ( int , rcu_dyntick_drain ) ;
/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
static DEFINE_PER_CPU ( unsigned long , rcu_dyntick_holdoff ) ;
/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
static DEFINE_PER_CPU ( struct timer_list , rcu_idle_gp_timer ) ;
/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
static DEFINE_PER_CPU ( unsigned long , rcu_idle_gp_timer_expires ) ;
/* Enable special processing on first attempt to enter dyntick-idle mode. */
static DEFINE_PER_CPU ( bool , rcu_idle_first_pass ) ;
/* Running count of non-lazy callbacks posted, never decremented. */
static DEFINE_PER_CPU ( unsigned long , rcu_nonlazy_posted ) ;
/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
static DEFINE_PER_CPU ( unsigned long , rcu_nonlazy_posted_snap ) ;
/*
* Allow the CPU to enter dyntick - idle mode if either : ( 1 ) There are no
* callbacks on this CPU , ( 2 ) this CPU has not yet attempted to enter
* dyntick - idle mode , or ( 3 ) this CPU is in the process of attempting to
* enter dyntick - idle mode . Otherwise , if we have recently tried and failed
* to enter dyntick - idle mode , we refuse to try to enter it . After all ,
* it is better to incur scheduling - clock interrupts than to spin
* continuously for the same time duration !
*/
int rcu_needs_cpu ( int cpu )
{
/* Flag a new idle sojourn to the idle-entry state machine. */
per_cpu ( rcu_idle_first_pass , cpu ) = 1 ;
/* If no callbacks, RCU doesn't need the CPU. */
if ( ! rcu_cpu_has_callbacks ( cpu ) )
return 0 ;
/* Otherwise, RCU needs the CPU only if it recently tried and failed. */
return per_cpu ( rcu_dyntick_holdoff , cpu ) = = jiffies ;
}
/*
* Does the specified flavor of RCU have non - lazy callbacks pending on
* the specified CPU ? Both RCU flavor and CPU are specified by the
@ -2039,6 +2005,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
rcu_preempt_cpu_has_nonlazy_callbacks ( cpu ) ;
}
/*
* Allow the CPU to enter dyntick - idle mode if either : ( 1 ) There are no
* callbacks on this CPU , ( 2 ) this CPU has not yet attempted to enter
* dyntick - idle mode , or ( 3 ) this CPU is in the process of attempting to
* enter dyntick - idle mode . Otherwise , if we have recently tried and failed
* to enter dyntick - idle mode , we refuse to try to enter it . After all ,
* it is better to incur scheduling - clock interrupts than to spin
* continuously for the same time duration !
*
* The delta_jiffies argument is used to store the time when RCU is
* going to need the CPU again if it still has callbacks . The reason
* for this is that rcu_prepare_for_idle ( ) might need to post a timer ,
* but if so , it will do so after tick_nohz_stop_sched_tick ( ) has set
* the wakeup time for this CPU . This means that RCU ' s timer can be
* delayed until the wakeup time , which defeats the purpose of posting
* a timer .
*/
int rcu_needs_cpu ( int cpu , unsigned long * delta_jiffies )
{
struct rcu_dynticks * rdtp = & per_cpu ( rcu_dynticks , cpu ) ;
/* Flag a new idle sojourn to the idle-entry state machine. */
rdtp - > idle_first_pass = 1 ;
/* If no callbacks, RCU doesn't need the CPU. */
if ( ! rcu_cpu_has_callbacks ( cpu ) ) {
* delta_jiffies = ULONG_MAX ;
return 0 ;
}
if ( rdtp - > dyntick_holdoff = = jiffies ) {
/* RCU recently tried and failed, so don't try again. */
* delta_jiffies = 1 ;
return 1 ;
}
/* Set up for the possibility that RCU will post a timer. */
if ( rcu_cpu_has_nonlazy_callbacks ( cpu ) )
* delta_jiffies = RCU_IDLE_GP_DELAY ;
else
* delta_jiffies = RCU_IDLE_LAZY_GP_DELAY ;
return 0 ;
}
/*
* Handler for smp_call_function_single ( ) . The only point of this
* handler is to wake the CPU up , so the handler does only tracing .
@ -2075,21 +2082,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in)
*/
static void rcu_prepare_for_idle_init ( int cpu )
{
per_cpu ( rcu_dyntick_holdoff , cpu ) = jiffies - 1 ;
setup_timer ( & per_cpu ( rcu_idle_gp_timer , cpu ) ,
rcu_idle_gp_timer_func , cpu ) ;
per_cpu ( rcu_idle_gp_timer_expires , cpu ) = jiffies - 1 ;
per_cpu ( rcu_idle_first_pass , cpu ) = 1 ;
struct rcu_dynticks * rdtp = & per_cpu ( rcu_dynticks , cpu ) ;
rdtp - > dyntick_holdoff = jiffies - 1 ;
setup_timer ( & rdtp - > idle_gp_timer , rcu_idle_gp_timer_func , cpu ) ;
rdtp - > idle_gp_timer_expires = jiffies - 1 ;
rdtp - > idle_first_pass = 1 ;
}
/*
* Clean up for exit from idle . Because we are exiting from idle , there
* is no longer any point to rcu_ idle_gp_timer, so cancel it . This will
* is no longer any point to - > idle_gp_timer , so cancel it . This will
* do nothing if this timer is not active , so just cancel it unconditionally .
*/
static void rcu_cleanup_after_idle ( int cpu )
{
del_timer ( & per_cpu ( rcu_idle_gp_timer , cpu ) ) ;
struct rcu_dynticks * rdtp = & per_cpu ( rcu_dynticks , cpu ) ;
del_timer ( & rdtp - > idle_gp_timer ) ;
trace_rcu_prep_idle ( " Cleanup after idle " ) ;
}
@ -2108,42 +2118,41 @@ static void rcu_cleanup_after_idle(int cpu)
* Because it is not legal to invoke rcu_process_callbacks ( ) with irqs
* disabled , we do one pass of force_quiescent_state ( ) , then do a
* invoke_rcu_core ( ) to cause rcu_process_callbacks ( ) to be invoked
* later . The per - cpu rcu_dyntick_drain variable controls the sequencing .
* later . The - > dyntick_drain field controls the sequencing .
*
* The caller must have disabled interrupts .
*/
static void rcu_prepare_for_idle ( int cpu )
{
struct timer_list * tp ;
struct rcu_dynticks * rdtp = & per_cpu ( rcu_dynticks , cpu ) ;
/*
* If this is an idle re - entry , for example , due to use of
* RCU_NONIDLE ( ) or the new idle - loop tracing API within the idle
* loop , then don ' t take any state - machine actions , unless the
* momentary exit from idle queued additional non - lazy callbacks .
* Instead , repost the rcu_ idle_gp_timer if this CPU has callbacks
* Instead , repost the - > idle_gp_timer if this CPU has callbacks
* pending .
*/
if ( ! per_cpu ( rcu_idle_first_pass , cpu ) & &
( per_cpu ( rcu_nonlazy_posted , cpu ) = =
per_cpu ( rcu_nonlazy_posted_snap , cpu ) ) ) {
if ( ! rdtp - > idle_first_pass & &
( rdtp - > nonlazy_posted = = rdtp - > nonlazy_posted_snap ) ) {
if ( rcu_cpu_has_callbacks ( cpu ) ) {
tp = & per_cpu ( rcu_idle_gp_timer , cpu ) ;
mod_timer_pinned ( tp , per_cpu ( rcu_idle_gp_timer_expires , cpu ) ) ;
tp = & rdtp - > idle_gp_timer ;
mod_timer_pinned ( tp , rdtp - > idle_gp_timer_expires ) ;
}
return ;
}
per_cpu ( rcu_idle_first_pass , cpu ) = 0 ;
per_cpu ( rcu_nonlazy_posted_snap , cpu ) =
per_cpu ( rcu_nonlazy_posted , cpu ) - 1 ;
rdtp - > idle_first_pass = 0 ;
rdtp - > nonlazy_posted_snap = rdtp - > nonlazy_posted - 1 ;
/*
* If there are no callbacks on this CPU , enter dyntick - idle mode .
* Also reset state to avoid prejudicing later attempts .
*/
if ( ! rcu_cpu_has_callbacks ( cpu ) ) {
per_cpu ( rcu_dyntick_holdoff , cpu ) = jiffies - 1 ;
per_cpu ( rcu_dyntick_drain , cpu ) = 0 ;
rdtp - > dyntick_holdoff = jiffies - 1 ;
rdtp - > dyntick_drain = 0 ;
trace_rcu_prep_idle ( " No callbacks " ) ;
return ;
}
@ -2152,36 +2161,37 @@ static void rcu_prepare_for_idle(int cpu)
* If in holdoff mode , just return . We will presumably have
* refrained from disabling the scheduling - clock tick .
*/
if ( per_cpu ( rcu_dyntick_holdoff , cpu ) = = jiffies ) {
if ( rdtp - > dyntick_holdoff = = jiffies ) {
trace_rcu_prep_idle ( " In holdoff " ) ;
return ;
}
/* Check and update the rcu_ dyntick_drain sequencing. */
if ( per_cpu ( rcu_dyntick_drain , cpu ) < = 0 ) {
/* Check and update the -> dyntick_drain sequencing. */
if ( rdtp - > dyntick_drain < = 0 ) {
/* First time through, initialize the counter. */
per_cpu ( rcu_dyntick_drain , cpu ) = RCU_IDLE_FLUSHES ;
} else if ( per_cpu ( rcu_dyntick_drain , cpu ) < = RCU_IDLE_OPT_FLUSHES & &
rdtp - > dyntick_drain = RCU_IDLE_FLUSHES ;
} else if ( rdtp - > dyntick_drain < = RCU_IDLE_OPT_FLUSHES & &
! rcu_pending ( cpu ) & &
! local_softirq_pending ( ) ) {
/* Can we go dyntick-idle despite still having callbacks? */
trace_rcu_prep_idle ( " Dyntick with callbacks " ) ;
per_cpu ( rcu_dyntick_drain , cpu ) = 0 ;
per_cpu ( rcu_dyntick_holdoff , cpu ) = jiffies ;
if ( rcu_cpu_has_nonlazy_callbacks ( cpu ) )
per_cpu ( rcu_idle_gp_timer_expires , cpu ) =
rdtp - > dyntick_drain = 0 ;
rdtp - > dyntick_holdoff = jiffies ;
if ( rcu_cpu_has_nonlazy_callbacks ( cpu ) ) {
trace_rcu_prep_idle ( " Dyntick with callbacks " ) ;
rdtp - > idle_gp_timer_expires =
jiffies + RCU_IDLE_GP_DELAY ;
else
per_cpu ( rcu_idle_gp_timer_expires , cpu ) =
} else {
rdtp - > idle_gp_timer_expires =
jiffies + RCU_IDLE_LAZY_GP_DELAY ;
tp = & per_cpu ( rcu_idle_gp_timer , cpu ) ;
mod_timer_pinned ( tp , per_cpu ( rcu_idle_gp_timer_expires , cpu ) ) ;
per_cpu ( rcu_nonlazy_posted_snap , cpu ) =
per_cpu ( rcu_nonlazy_posted , cpu ) ;
trace_rcu_prep_idle ( " Dyntick with lazy callbacks " ) ;
}
tp = & rdtp - > idle_gp_timer ;
mod_timer_pinned ( tp , rdtp - > idle_gp_timer_expires ) ;
rdtp - > nonlazy_posted_snap = rdtp - > nonlazy_posted ;
return ; /* Nothing more to do immediately. */
} else if ( - - per_cpu ( rcu_dyntick_drain , cpu ) < = 0 ) {
} else if ( - - ( rdtp - > dyntick_drain ) < = 0 ) {
/* We have hit the limit, so time to give up. */
per_cpu ( rcu_dyntick_holdoff , cpu ) = jiffies ;
rdtp - > dyntick_holdoff = jiffies ;
trace_rcu_prep_idle ( " Begin holdoff " ) ;
invoke_rcu_core ( ) ; /* Force the CPU out of dyntick-idle. */
return ;
@ -2227,7 +2237,7 @@ static void rcu_prepare_for_idle(int cpu)
*/
static void rcu_idle_count_callbacks_posted ( void )
{
__this_cpu_add ( rcu_nonlazy_posted , 1 ) ;
__this_cpu_add ( rcu_dynticks . nonlazy_posted , 1 ) ;
}
# endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@ -2238,11 +2248,12 @@ static void rcu_idle_count_callbacks_posted(void)
static void print_cpu_stall_fast_no_hz ( char * cp , int cpu )
{
struct timer_list * tltp = & per_cpu ( rcu_idle_gp_timer , cpu ) ;
struct rcu_dynticks * rdtp = & per_cpu ( rcu_dynticks , cpu ) ;
struct timer_list * tltp = & rdtp - > idle_gp_timer ;
sprintf ( cp , " drain=%d %c timer=%lu " ,
per_cpu ( rcu_dyntick_drain , cpu ) ,
per_cpu ( rcu_dyntick_holdoff , cpu ) = = jiffies ? ' H ' : ' . ' ,
rdtp - > dyntick_drain ,
rdtp - > dyntick_holdoff = = jiffies ? ' H ' : ' . ' ,
timer_pending ( tltp ) ? tltp - > expires - jiffies : - 1 ) ;
}