@ -886,6 +886,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
return 0 ;
}
# if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
/*
* Allocates and initializes node for a node on each slab cache , used for
* either memory or cpu hotplug . If memory is being hot - added , the kmem_cache_node
@ -908,6 +909,7 @@ static int init_cache_node_node(int node)
return 0 ;
}
# endif
static int setup_kmem_cache_node ( struct kmem_cache * cachep ,
int node , gfp_t gfp , bool force_change )
@ -975,6 +977,8 @@ fail:
return ret ;
}
# ifdef CONFIG_SMP
static void cpuup_canceled ( long cpu )
{
struct kmem_cache * cachep ;
@ -1075,65 +1079,54 @@ bad:
return - ENOMEM ;
}
static int cpuup_callback ( struct notifier_block * nfb ,
unsigned long action , void * hcpu )
int slab_prepare_cpu ( unsigned int cpu )
{
long cpu = ( long ) hcpu ;
int err = 0 ;
int err ;
switch ( action ) {
case CPU_UP_PREPARE :
case CPU_UP_PREPARE_FROZEN :
mutex_lock ( & slab_mutex ) ;
err = cpuup_prepare ( cpu ) ;
mutex_unlock ( & slab_mutex ) ;
break ;
case CPU_ONLINE :
case CPU_ONLINE_FROZEN :
start_cpu_timer ( cpu ) ;
break ;
# ifdef CONFIG_HOTPLUG_CPU
case CPU_DOWN_PREPARE :
case CPU_DOWN_PREPARE_FROZEN :
/*
* Shutdown cache reaper . Note that the slab_mutex is
* held so that if cache_reap ( ) is invoked it cannot do
* anything expensive but will only modify reap_work
* and reschedule the timer .
*/
cancel_delayed_work_sync ( & per_cpu ( slab_reap_work , cpu ) ) ;
/* Now the cache_reaper is guaranteed to be not running. */
per_cpu ( slab_reap_work , cpu ) . work . func = NULL ;
break ;
case CPU_DOWN_FAILED :
case CPU_DOWN_FAILED_FROZEN :
start_cpu_timer ( cpu ) ;
break ;
case CPU_DEAD :
case CPU_DEAD_FROZEN :
/*
* Even if all the cpus of a node are down , we don ' t free the
* kmem_cache_node of any cache . This to avoid a race between
* cpu_down , and a kmalloc allocation from another cpu for
* memory from the node of the cpu going down . The node
* structure is usually allocated from kmem_cache_create ( ) and
* gets destroyed at kmem_cache_destroy ( ) .
*/
/* fall through */
mutex_lock ( & slab_mutex ) ;
err = cpuup_prepare ( cpu ) ;
mutex_unlock ( & slab_mutex ) ;
return err ;
}
/*
* This is called for a failed online attempt and for a successful
* offline .
*
* Even if all the cpus of a node are down , we don ' t free the
* kmem_list3 of any cache . This to avoid a race between cpu_down , and
* a kmalloc allocation from another cpu for memory from the node of
* the cpu going down . The list3 structure is usually allocated from
* kmem_cache_create ( ) and gets destroyed at kmem_cache_destroy ( ) .
*/
int slab_dead_cpu ( unsigned int cpu )
{
mutex_lock ( & slab_mutex ) ;
cpuup_canceled ( cpu ) ;
mutex_unlock ( & slab_mutex ) ;
return 0 ;
}
# endif
case CPU_UP_CANCELED :
case CPU_UP_CANCELED_FROZEN :
mutex_lock ( & slab_mutex ) ;
cpuup_canceled ( cpu ) ;
mutex_unlock ( & slab_mutex ) ;
break ;
}
return notifier_from_errno ( err ) ;
static int slab_online_cpu ( unsigned int cpu )
{
start_cpu_timer ( cpu ) ;
return 0 ;
}
static struct notifier_block cpucache_notifier = {
& cpuup_callback , NULL , 0
} ;
static int slab_offline_cpu ( unsigned int cpu )
{
/*
* Shutdown cache reaper . Note that the slab_mutex is held so
* that if cache_reap ( ) is invoked it cannot do anything
* expensive but will only modify reap_work and reschedule the
* timer .
*/
cancel_delayed_work_sync ( & per_cpu ( slab_reap_work , cpu ) ) ;
/* Now the cache_reaper is guaranteed to be not running. */
per_cpu ( slab_reap_work , cpu ) . work . func = NULL ;
return 0 ;
}
# if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
/*
@ -1336,12 +1329,6 @@ void __init kmem_cache_init_late(void)
/* Done! */
slab_state = FULL ;
/*
* Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus
*/
register_cpu_notifier ( & cpucache_notifier ) ;
# ifdef CONFIG_NUMA
/*
* Register a memory hotplug callback that initializes and frees
@ -1358,13 +1345,14 @@ void __init kmem_cache_init_late(void)
static int __init cpucache_init ( void )
{
int cpu ;
int ret ;
/*
* Register the timers that return unneeded pages to the page allocator
*/
for_each_online_cpu ( cpu )
start_cpu_timer ( cpu ) ;
ret = cpuhp_setup_state ( CPUHP_AP_ONLINE_DYN , " SLAB online " ,
slab_online_cpu , slab_offline_cpu ) ;
WARN_ON ( ret < 0 ) ;
/* Done! */
slab_state = FULL ;