|
|
|
@ -37,6 +37,7 @@ |
|
|
|
|
#include <linux/rculist_bl.h> |
|
|
|
|
#include <linux/prefetch.h> |
|
|
|
|
#include <linux/ratelimit.h> |
|
|
|
|
#include <linux/list_lru.h> |
|
|
|
|
#include "internal.h" |
|
|
|
|
#include "mount.h" |
|
|
|
|
|
|
|
|
@ -356,28 +357,17 @@ static void dentry_unlink_inode(struct dentry * dentry) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* dentry_lru_(add|del|move_list) must be called with d_lock held. |
|
|
|
|
* dentry_lru_(add|del)_list) must be called with d_lock held. |
|
|
|
|
*/ |
|
|
|
|
static void dentry_lru_add(struct dentry *dentry) |
|
|
|
|
{ |
|
|
|
|
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { |
|
|
|
|
spin_lock(&dentry->d_sb->s_dentry_lru_lock); |
|
|
|
|
if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) |
|
|
|
|
this_cpu_inc(nr_dentry_unused); |
|
|
|
|
dentry->d_flags |= DCACHE_LRU_LIST; |
|
|
|
|
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); |
|
|
|
|
dentry->d_sb->s_nr_dentry_unused++; |
|
|
|
|
this_cpu_inc(nr_dentry_unused); |
|
|
|
|
spin_unlock(&dentry->d_sb->s_dentry_lru_lock); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void __dentry_lru_del(struct dentry *dentry) |
|
|
|
|
{ |
|
|
|
|
list_del_init(&dentry->d_lru); |
|
|
|
|
dentry->d_flags &= ~DCACHE_LRU_LIST; |
|
|
|
|
dentry->d_sb->s_nr_dentry_unused--; |
|
|
|
|
this_cpu_dec(nr_dentry_unused); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Remove a dentry with references from the LRU. |
|
|
|
|
* |
|
|
|
@ -393,27 +383,9 @@ static void dentry_lru_del(struct dentry *dentry) |
|
|
|
|
return; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (!list_empty(&dentry->d_lru)) { |
|
|
|
|
spin_lock(&dentry->d_sb->s_dentry_lru_lock); |
|
|
|
|
__dentry_lru_del(dentry); |
|
|
|
|
spin_unlock(&dentry->d_sb->s_dentry_lru_lock); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) |
|
|
|
|
{ |
|
|
|
|
BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST); |
|
|
|
|
|
|
|
|
|
spin_lock(&dentry->d_sb->s_dentry_lru_lock); |
|
|
|
|
if (list_empty(&dentry->d_lru)) { |
|
|
|
|
dentry->d_flags |= DCACHE_LRU_LIST; |
|
|
|
|
list_add_tail(&dentry->d_lru, list); |
|
|
|
|
} else { |
|
|
|
|
list_move_tail(&dentry->d_lru, list); |
|
|
|
|
dentry->d_sb->s_nr_dentry_unused--; |
|
|
|
|
if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) |
|
|
|
|
this_cpu_dec(nr_dentry_unused); |
|
|
|
|
} |
|
|
|
|
spin_unlock(&dentry->d_sb->s_dentry_lru_lock); |
|
|
|
|
dentry->d_flags &= ~DCACHE_LRU_LIST; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -901,12 +873,72 @@ static void shrink_dentry_list(struct list_head *list) |
|
|
|
|
rcu_read_unlock(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static enum lru_status |
|
|
|
|
dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) |
|
|
|
|
{ |
|
|
|
|
struct list_head *freeable = arg; |
|
|
|
|
struct dentry *dentry = container_of(item, struct dentry, d_lru); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* we are inverting the lru lock/dentry->d_lock here, |
|
|
|
|
* so use a trylock. If we fail to get the lock, just skip |
|
|
|
|
* it |
|
|
|
|
*/ |
|
|
|
|
if (!spin_trylock(&dentry->d_lock)) |
|
|
|
|
return LRU_SKIP; |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Referenced dentries are still in use. If they have active |
|
|
|
|
* counts, just remove them from the LRU. Otherwise give them |
|
|
|
|
* another pass through the LRU. |
|
|
|
|
*/ |
|
|
|
|
if (dentry->d_lockref.count) { |
|
|
|
|
list_del_init(&dentry->d_lru); |
|
|
|
|
spin_unlock(&dentry->d_lock); |
|
|
|
|
return LRU_REMOVED; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (dentry->d_flags & DCACHE_REFERENCED) { |
|
|
|
|
dentry->d_flags &= ~DCACHE_REFERENCED; |
|
|
|
|
spin_unlock(&dentry->d_lock); |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The list move itself will be made by the common LRU code. At |
|
|
|
|
* this point, we've dropped the dentry->d_lock but keep the |
|
|
|
|
* lru lock. This is safe to do, since every list movement is |
|
|
|
|
* protected by the lru lock even if both locks are held. |
|
|
|
|
* |
|
|
|
|
* This is guaranteed by the fact that all LRU management |
|
|
|
|
* functions are intermediated by the LRU API calls like |
|
|
|
|
* list_lru_add and list_lru_del. List movement in this file |
|
|
|
|
* only ever occur through this functions or through callbacks |
|
|
|
|
* like this one, that are called from the LRU API. |
|
|
|
|
* |
|
|
|
|
* The only exceptions to this are functions like |
|
|
|
|
* shrink_dentry_list, and code that first checks for the |
|
|
|
|
* DCACHE_SHRINK_LIST flag. Those are guaranteed to be |
|
|
|
|
* operating only with stack provided lists after they are |
|
|
|
|
* properly isolated from the main list. It is thus, always a |
|
|
|
|
* local access. |
|
|
|
|
*/ |
|
|
|
|
return LRU_ROTATE; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
dentry->d_flags |= DCACHE_SHRINK_LIST; |
|
|
|
|
list_move_tail(&dentry->d_lru, freeable); |
|
|
|
|
this_cpu_dec(nr_dentry_unused); |
|
|
|
|
spin_unlock(&dentry->d_lock); |
|
|
|
|
|
|
|
|
|
return LRU_REMOVED; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* prune_dcache_sb - shrink the dcache |
|
|
|
|
* @sb: superblock |
|
|
|
|
* @count: number of entries to try to free |
|
|
|
|
* @nr_to_scan : number of entries to try to free |
|
|
|
|
* |
|
|
|
|
* Attempt to shrink the superblock dcache LRU by @count entries. This is |
|
|
|
|
* Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is |
|
|
|
|
* done when we need more memory an called from the superblock shrinker |
|
|
|
|
* function. |
|
|
|
|
* |
|
|
|
@ -915,45 +947,12 @@ static void shrink_dentry_list(struct list_head *list) |
|
|
|
|
*/ |
|
|
|
|
long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan) |
|
|
|
|
{ |
|
|
|
|
struct dentry *dentry; |
|
|
|
|
LIST_HEAD(referenced); |
|
|
|
|
LIST_HEAD(tmp); |
|
|
|
|
long freed = 0; |
|
|
|
|
LIST_HEAD(dispose); |
|
|
|
|
long freed; |
|
|
|
|
|
|
|
|
|
relock: |
|
|
|
|
spin_lock(&sb->s_dentry_lru_lock); |
|
|
|
|
while (!list_empty(&sb->s_dentry_lru)) { |
|
|
|
|
dentry = list_entry(sb->s_dentry_lru.prev, |
|
|
|
|
struct dentry, d_lru); |
|
|
|
|
BUG_ON(dentry->d_sb != sb); |
|
|
|
|
|
|
|
|
|
if (!spin_trylock(&dentry->d_lock)) { |
|
|
|
|
spin_unlock(&sb->s_dentry_lru_lock); |
|
|
|
|
cpu_relax(); |
|
|
|
|
goto relock; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (dentry->d_flags & DCACHE_REFERENCED) { |
|
|
|
|
dentry->d_flags &= ~DCACHE_REFERENCED; |
|
|
|
|
list_move(&dentry->d_lru, &referenced); |
|
|
|
|
spin_unlock(&dentry->d_lock); |
|
|
|
|
} else { |
|
|
|
|
list_move(&dentry->d_lru, &tmp); |
|
|
|
|
dentry->d_flags |= DCACHE_SHRINK_LIST; |
|
|
|
|
this_cpu_dec(nr_dentry_unused); |
|
|
|
|
sb->s_nr_dentry_unused--; |
|
|
|
|
spin_unlock(&dentry->d_lock); |
|
|
|
|
freed++; |
|
|
|
|
if (!--nr_to_scan) |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
cond_resched_lock(&sb->s_dentry_lru_lock); |
|
|
|
|
} |
|
|
|
|
if (!list_empty(&referenced)) |
|
|
|
|
list_splice(&referenced, &sb->s_dentry_lru); |
|
|
|
|
spin_unlock(&sb->s_dentry_lru_lock); |
|
|
|
|
|
|
|
|
|
shrink_dentry_list(&tmp); |
|
|
|
|
freed = list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate, |
|
|
|
|
&dispose, nr_to_scan); |
|
|
|
|
shrink_dentry_list(&dispose); |
|
|
|
|
return freed; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -987,24 +986,10 @@ shrink_dcache_list( |
|
|
|
|
*/ |
|
|
|
|
void shrink_dcache_sb(struct super_block *sb) |
|
|
|
|
{ |
|
|
|
|
LIST_HEAD(tmp); |
|
|
|
|
|
|
|
|
|
spin_lock(&sb->s_dentry_lru_lock); |
|
|
|
|
while (!list_empty(&sb->s_dentry_lru)) { |
|
|
|
|
/*
|
|
|
|
|
* account for removal here so we don't need to handle it later |
|
|
|
|
* even though the dentry is no longer on the lru list. |
|
|
|
|
*/ |
|
|
|
|
list_splice_init(&sb->s_dentry_lru, &tmp); |
|
|
|
|
this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused); |
|
|
|
|
sb->s_nr_dentry_unused = 0; |
|
|
|
|
spin_unlock(&sb->s_dentry_lru_lock); |
|
|
|
|
long disposed; |
|
|
|
|
|
|
|
|
|
shrink_dcache_list(&tmp); |
|
|
|
|
|
|
|
|
|
spin_lock(&sb->s_dentry_lru_lock); |
|
|
|
|
} |
|
|
|
|
spin_unlock(&sb->s_dentry_lru_lock); |
|
|
|
|
disposed = list_lru_dispose_all(&sb->s_dentry_lru, shrink_dcache_list); |
|
|
|
|
this_cpu_sub(nr_dentry_unused, disposed); |
|
|
|
|
} |
|
|
|
|
EXPORT_SYMBOL(shrink_dcache_sb); |
|
|
|
|
|
|
|
|
@ -1366,7 +1351,8 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) |
|
|
|
|
if (dentry->d_lockref.count) { |
|
|
|
|
dentry_lru_del(dentry); |
|
|
|
|
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { |
|
|
|
|
dentry_lru_move_list(dentry, &data->dispose); |
|
|
|
|
dentry_lru_del(dentry); |
|
|
|
|
list_add_tail(&dentry->d_lru, &data->dispose); |
|
|
|
|
dentry->d_flags |= DCACHE_SHRINK_LIST; |
|
|
|
|
data->found++; |
|
|
|
|
ret = D_WALK_NORETRY; |
|
|
|
|