@ -396,11 +396,11 @@ static unsigned long wp_next_time(unsigned long cur_time)
* Increment the BDI ' s writeout completion count and the global writeout
* completion count . Called from test_clear_page_writeback ( ) .
*/
static inline void __bdi _writeout_inc ( struct backing_dev_info * bdi )
static inline void __w b_writeout_inc ( struct bdi_writeb ack * w b)
{
__inc_bdi _stat ( bdi , BDI _WRITTEN ) ;
__fprop_inc_percpu_max ( & writeout_completions , & bdi - > completions ,
bdi - > max_prop_frac ) ;
__inc_w b_stat ( w b, W B_WRITTEN) ;
__fprop_inc_percpu_max ( & writeout_completions , & wb - > bdi - > completions ,
wb - > bdi - > max_prop_frac ) ;
/* First event after period switching was turned off? */
if ( ! unlikely ( writeout_period_time ) ) {
/*
@ -414,15 +414,15 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
}
}
void bdi _writeout_inc ( struct backing_dev_info * bdi )
void w b_writeout_inc( struct bdi_writeb ack * w b)
{
unsigned long flags ;
local_irq_save ( flags ) ;
__bdi _writeout_inc ( bdi ) ;
__w b_writeout_inc ( w b) ;
local_irq_restore ( flags ) ;
}
EXPORT_SYMBOL_GPL ( bdi _writeout_inc ) ;
EXPORT_SYMBOL_GPL ( w b_writeout_inc) ;
/*
* Obtain an accurate fraction of the BDI ' s portion .
@ -1130,8 +1130,8 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
if ( elapsed < BANDWIDTH_INTERVAL )
return ;
dirtied = percpu_counter_read ( & bdi - > bdi_ stat[ BDI _DIRTIED ] ) ;
written = percpu_counter_read ( & bdi - > bdi_ stat[ BDI _WRITTEN ] ) ;
dirtied = percpu_counter_read ( & bdi - > wb . stat [ W B_DIRTIED] ) ;
written = percpu_counter_read ( & bdi - > wb . stat [ W B_WRITTEN] ) ;
/*
* Skip quiet periods when disk bandwidth is under - utilized .
@ -1288,7 +1288,8 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
unsigned long * bdi_thresh ,
unsigned long * bdi_bg_thresh )
{
unsigned long bdi_reclaimable ;
struct bdi_writeback * wb = & bdi - > wb ;
unsigned long wb_reclaimable ;
/*
* bdi_thresh is not treated as some limiting factor as
@ -1320,14 +1321,12 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
* actually dirty ; with m + n sitting in the percpu
* deltas .
*/
if ( * bdi_thresh < 2 * bdi_stat_error ( bdi ) ) {
bdi_reclaimable = bdi_stat_sum ( bdi , BDI_RECLAIMABLE ) ;
* bdi_dirty = bdi_reclaimable +
bdi_stat_sum ( bdi , BDI_WRITEBACK ) ;
if ( * bdi_thresh < 2 * wb_stat_error ( wb ) ) {
wb_reclaimable = wb_stat_sum ( wb , WB_RECLAIMABLE ) ;
* bdi_dirty = wb_reclaimable + wb_stat_sum ( wb , WB_WRITEBACK ) ;
} else {
bdi_reclaimable = bdi_stat ( bdi , BDI_RECLAIMABLE ) ;
* bdi_dirty = bdi_reclaimable +
bdi_stat ( bdi , BDI_WRITEBACK ) ;
wb_reclaimable = wb_stat ( wb , WB_RECLAIMABLE ) ;
* bdi_dirty = wb_reclaimable + wb_stat ( wb , WB_WRITEBACK ) ;
}
}
@ -1514,9 +1513,9 @@ pause:
* In theory 1 page is enough to keep the comsumer - producer
* pipe going : the flusher cleans 1 page = > the task dirties 1
* more page . However bdi_dirty has accounting errors . So use
* the larger and more IO friendly bdi _stat_error .
* the larger and more IO friendly w b_stat_error.
*/
if ( bdi_dirty < = bdi _stat_error ( bdi ) )
if ( bdi_dirty < = w b_stat_error( & bdi - > wb ) )
break ;
if ( fatal_signal_pending ( current ) )
@ -2106,8 +2105,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping,
mem_cgroup_inc_page_stat ( memcg , MEM_CGROUP_STAT_DIRTY ) ;
__inc_zone_page_state ( page , NR_FILE_DIRTY ) ;
__inc_zone_page_state ( page , NR_DIRTIED ) ;
__inc_bdi _stat ( bdi , BDI _RECLAIMABLE ) ;
__inc_bdi _stat ( bdi , BDI _DIRTIED ) ;
__inc_w b_stat ( & bdi - > wb , W B_RECLAIMABLE) ;
__inc_w b_stat ( & bdi - > wb , W B_DIRTIED) ;
task_io_account_write ( PAGE_CACHE_SIZE ) ;
current - > nr_dirtied + + ;
this_cpu_inc ( bdp_ratelimits ) ;
@ -2126,7 +2125,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
if ( mapping_cap_account_dirty ( mapping ) ) {
mem_cgroup_dec_page_stat ( memcg , MEM_CGROUP_STAT_DIRTY ) ;
dec_zone_page_state ( page , NR_FILE_DIRTY ) ;
dec_bdi _stat ( inode_to_bdi ( mapping - > host ) , BDI _RECLAIMABLE ) ;
dec_w b_stat ( & inode_to_bdi ( mapping - > host ) - > wb , W B_RECLAIMABLE) ;
task_io_account_cancelled_write ( PAGE_CACHE_SIZE ) ;
}
}
@ -2190,7 +2189,7 @@ void account_page_redirty(struct page *page)
if ( mapping & & mapping_cap_account_dirty ( mapping ) ) {
current - > nr_dirtied - - ;
dec_zone_page_state ( page , NR_DIRTIED ) ;
dec_bdi _stat ( inode_to_bdi ( mapping - > host ) , BDI _DIRTIED ) ;
dec_w b_stat ( & inode_to_bdi ( mapping - > host ) - > wb , W B_DIRTIED) ;
}
}
EXPORT_SYMBOL ( account_page_redirty ) ;
@ -2369,8 +2368,8 @@ int clear_page_dirty_for_io(struct page *page)
if ( TestClearPageDirty ( page ) ) {
mem_cgroup_dec_page_stat ( memcg , MEM_CGROUP_STAT_DIRTY ) ;
dec_zone_page_state ( page , NR_FILE_DIRTY ) ;
dec_bdi _stat ( inode_to_bdi ( mapping - > host ) ,
BDI _RECLAIMABLE ) ;
dec_w b_stat ( & inode_to_bdi ( mapping - > host ) - > wb ,
WB _RECLAIMABLE ) ;
ret = 1 ;
}
mem_cgroup_end_page_stat ( memcg ) ;
@ -2398,8 +2397,8 @@ int test_clear_page_writeback(struct page *page)
page_index ( page ) ,
PAGECACHE_TAG_WRITEBACK ) ;
if ( bdi_cap_account_writeback ( bdi ) ) {
__dec_bdi _stat ( bdi , BDI _WRITEBACK ) ;
__bdi _writeout_inc ( bdi ) ;
__dec_w b_stat ( & bdi - > wb , W B_WRITEBACK) ;
__w b_writeout_inc ( & bdi - > wb ) ;
}
}
spin_unlock_irqrestore ( & mapping - > tree_lock , flags ) ;
@ -2433,7 +2432,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
page_index ( page ) ,
PAGECACHE_TAG_WRITEBACK ) ;
if ( bdi_cap_account_writeback ( bdi ) )
__inc_bdi _stat ( bdi , BDI _WRITEBACK ) ;
__inc_w b_stat ( & bdi - > wb , W B_WRITEBACK) ;
}
if ( ! PageDirty ( page ) )
radix_tree_tag_clear ( & mapping - > page_tree ,