@ -29,6 +29,7 @@
# include <linux/math64.h>
# include <linux/fault-inject.h>
# include <linux/stacktrace.h>
# include <linux/prefetch.h>
# include <trace/events/kmem.h>
@ -269,6 +270,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
return * ( void * * ) ( object + s - > offset ) ;
}
static void prefetch_freepointer ( const struct kmem_cache * s , void * object )
{
prefetch ( object + s - > offset ) ;
}
static inline void * get_freepointer_safe ( struct kmem_cache * s , void * object )
{
void * p ;
@ -1560,6 +1566,7 @@ static void *get_partial_node(struct kmem_cache *s,
} else {
page - > freelist = t ;
available = put_cpu_partial ( s , page , 0 ) ;
stat ( s , CPU_PARTIAL_NODE ) ;
}
if ( kmem_cache_debug ( s ) | | available > s - > cpu_partial / 2 )
break ;
@ -1983,6 +1990,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_restore ( flags ) ;
pobjects = 0 ;
pages = 0 ;
stat ( s , CPU_PARTIAL_DRAIN ) ;
}
}
@ -1994,7 +2002,6 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page - > next = oldpage ;
} while ( this_cpu_cmpxchg ( s - > cpu_slab - > partial , oldpage , page ) ! = oldpage ) ;
stat ( s , CPU_PARTIAL_FREE ) ;
return pobjects ;
}
@ -2319,6 +2326,8 @@ redo:
object = __slab_alloc ( s , gfpflags , node , addr , c ) ;
else {
void * next_object = get_freepointer_safe ( s , object ) ;
/*
* The cmpxchg will only match if there was no additional
* operation and if we are on the right processor .
@ -2334,11 +2343,12 @@ redo:
if ( unlikely ( ! this_cpu_cmpxchg_double (
s - > cpu_slab - > freelist , s - > cpu_slab - > tid ,
object , tid ,
get_freepointer_safe ( s , object ) , next_tid ( tid ) ) ) ) {
next_object , next_tid ( tid ) ) ) ) {
note_cmpxchg_failure ( " slab_alloc " , s , tid ) ;
goto redo ;
}
prefetch_freepointer ( s , next_object ) ;
stat ( s , ALLOC_FASTPATH ) ;
}
@ -2475,9 +2485,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* If we just froze the page then put it onto the
* per cpu partial list .
*/
if ( new . frozen & & ! was_frozen )
if ( new . frozen & & ! was_frozen ) {
put_cpu_partial ( s , page , 1 ) ;
stat ( s , CPU_PARTIAL_FREE ) ;
}
/*
* The list lock was not taken therefore no list
* activity can be necessary .
@ -3939,13 +3950,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if ( kmem_cache_open ( s , n ,
size , align , flags , ctor ) ) {
list_add ( & s - > list , & slab_caches ) ;
up_write ( & slub_lock ) ;
if ( sysfs_slab_add ( s ) ) {
down_write ( & slub_lock ) ;
list_del ( & s - > list ) ;
kfree ( n ) ;
kfree ( s ) ;
goto err ;
}
up_write ( & slub_lock ) ;
return s ;
}
kfree ( n ) ;
@ -5069,6 +5081,8 @@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
STAT_ATTR ( CMPXCHG_DOUBLE_FAIL , cmpxchg_double_fail ) ;
STAT_ATTR ( CPU_PARTIAL_ALLOC , cpu_partial_alloc ) ;
STAT_ATTR ( CPU_PARTIAL_FREE , cpu_partial_free ) ;
STAT_ATTR ( CPU_PARTIAL_NODE , cpu_partial_node ) ;
STAT_ATTR ( CPU_PARTIAL_DRAIN , cpu_partial_drain ) ;
# endif
static struct attribute * slab_attrs [ ] = {
@ -5134,6 +5148,8 @@ static struct attribute *slab_attrs[] = {
& cmpxchg_double_cpu_fail_attr . attr ,
& cpu_partial_alloc_attr . attr ,
& cpu_partial_free_attr . attr ,
& cpu_partial_node_attr . attr ,
& cpu_partial_drain_attr . attr ,
# endif
# ifdef CONFIG_FAILSLAB
& failslab_attr . attr ,