|
|
|
@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c) |
|
|
|
|
bch_moving_gc(c); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int bch_gc_thread(void *arg) |
|
|
|
|
static bool gc_should_run(struct cache_set *c) |
|
|
|
|
{ |
|
|
|
|
struct cache_set *c = arg; |
|
|
|
|
struct cache *ca; |
|
|
|
|
unsigned i; |
|
|
|
|
|
|
|
|
|
while (1) { |
|
|
|
|
again: |
|
|
|
|
bch_btree_gc(c); |
|
|
|
|
for_each_cache(ca, c, i) |
|
|
|
|
if (ca->invalidate_needs_gc) |
|
|
|
|
return true; |
|
|
|
|
|
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE); |
|
|
|
|
if (kthread_should_stop()) |
|
|
|
|
break; |
|
|
|
|
if (atomic_read(&c->sectors_to_gc) < 0) |
|
|
|
|
return true; |
|
|
|
|
|
|
|
|
|
mutex_lock(&c->bucket_lock); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
for_each_cache(ca, c, i) |
|
|
|
|
if (ca->invalidate_needs_gc) { |
|
|
|
|
mutex_unlock(&c->bucket_lock); |
|
|
|
|
set_current_state(TASK_RUNNING); |
|
|
|
|
goto again; |
|
|
|
|
} |
|
|
|
|
static int bch_gc_thread(void *arg) |
|
|
|
|
{ |
|
|
|
|
struct cache_set *c = arg; |
|
|
|
|
|
|
|
|
|
mutex_unlock(&c->bucket_lock); |
|
|
|
|
while (1) { |
|
|
|
|
wait_event_interruptible(c->gc_wait, |
|
|
|
|
kthread_should_stop() || gc_should_run(c)); |
|
|
|
|
|
|
|
|
|
schedule(); |
|
|
|
|
if (kthread_should_stop()) |
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
set_gc_sectors(c); |
|
|
|
|
bch_btree_gc(c); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
@ -1790,11 +1792,10 @@ again: |
|
|
|
|
|
|
|
|
|
int bch_gc_thread_start(struct cache_set *c) |
|
|
|
|
{ |
|
|
|
|
c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); |
|
|
|
|
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); |
|
|
|
|
if (IS_ERR(c->gc_thread)) |
|
|
|
|
return PTR_ERR(c->gc_thread); |
|
|
|
|
|
|
|
|
|
set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|