@ -191,26 +191,42 @@ static bool sane_reclaim(struct scan_control *sc)
}
# endif
/*
* This misses isolated pages which are not accounted for to save counters .
* As the data only determines if reclaim or compaction continues , it is
* not expected that isolated pages will be a dominating factor .
*/
unsigned long zone_reclaimable_pages ( struct zone * zone )
{
unsigned long nr ;
nr = zone_page_state_snapshot ( zone , NR_ACTIVE_FILE ) +
zone_page_state_snapshot ( zone , NR_INACTIVE_FILE ) +
zone_page_state_snapshot ( zone , NR_ISOLATED_FILE ) ;
nr = zone_page_state_snapshot ( zone , NR_ZONE_LRU_FILE ) ;
if ( get_nr_swap_pages ( ) > 0 )
nr + = zone_page_state_snapshot ( zone , NR_ZONE_LRU_ANON ) ;
return nr ;
}
unsigned long pgdat_reclaimable_pages ( struct pglist_data * pgdat )
{
unsigned long nr ;
nr = node_page_state_snapshot ( pgdat , NR_ACTIVE_FILE ) +
node_page_state_snapshot ( pgdat , NR_INACTIVE_FILE ) +
node_page_state_snapshot ( pgdat , NR_ISOLATED_FILE ) ;
if ( get_nr_swap_pages ( ) > 0 )
nr + = zone_page_state_snapshot ( zone , NR_ACTIVE_ANON ) +
zone_page_state_snapshot ( zone , NR_INACTIVE_ANON ) +
zone_page_state_snapshot ( zone , NR_ISOLATED_ANON ) ;
nr + = nod e_page_state_snapshot ( pgdat , NR_ACTIVE_ANON ) +
nod e_page_state_snapshot ( pgdat , NR_INACTIVE_ANON ) +
nod e_page_state_snapshot ( pgdat , NR_ISOLATED_ANON ) ;
return nr ;
}
bool zone_reclaimable ( struct zone * zone )
bool pgdat_reclaimable ( struct pglist_data * pgdat )
{
return zo ne_page_state_snapshot( zone , NR_PAGES_SCANNED ) <
zone_reclaimable_pages ( zone ) * 6 ;
return nod e_page_state_snapshot ( pgdat , NR_PAGES_SCANNED ) <
pgdat_reclaimable_pages ( pgdat ) * 6 ;
}
unsigned long lruvec_lru_size ( struct lruvec * lruvec , enum lru_list lru )
@ -218,7 +234,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
if ( ! mem_cgroup_disabled ( ) )
return mem_cgroup_get_lru_size ( lruvec , lru ) ;
return zo ne_page_state( lruvec_zone ( lruvec ) , NR_LRU_BASE + lru ) ;
return nod e_page_state ( lruvec_pgdat ( lruvec ) , NR_LRU_BASE + lru ) ;
}
/*
@ -877,7 +893,7 @@ static void page_check_dirty_writeback(struct page *page,
* shrink_page_list ( ) returns the number of reclaimed pages
*/
static unsigned long shrink_page_list ( struct list_head * page_list ,
struct zone * zone ,
struct pglist_data * pgdat ,
struct scan_control * sc ,
enum ttu_flags ttu_flags ,
unsigned long * ret_nr_dirty ,
@ -917,7 +933,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep ;
VM_BUG_ON_PAGE ( PageActive ( page ) , page ) ;
VM_BUG_ON_PAGE ( page_zone ( page ) ! = zone , page ) ;
sc - > nr_scanned + + ;
@ -996,7 +1011,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Case 1 above */
if ( current_is_kswapd ( ) & &
PageReclaim ( page ) & &
test_bit ( ZONE_WRITEBACK , & zone - > flags ) ) {
test_bit ( PGDAT_WRITEBACK , & pgdat - > flags ) ) {
nr_immediate + + ;
goto keep_locked ;
@ -1092,7 +1107,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
if ( page_is_file_cache ( page ) & &
( ! current_is_kswapd ( ) | |
! test_bit ( ZONE_DIRTY , & zone - > flags ) ) ) {
! test_bit ( PGDAT_DIRTY , & pgdat - > flags ) ) ) {
/*
* Immediately reclaim when written back .
* Similar in principal to deactivate_page ( )
@ -1266,11 +1281,11 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
}
}
ret = shrink_page_list ( & clean_pages , zone , & sc ,
ret = shrink_page_list ( & clean_pages , zone - > zone_pgdat , & sc ,
TTU_UNMAP | TTU_IGNORE_ACCESS ,
& dummy1 , & dummy2 , & dummy3 , & dummy4 , & dummy5 , true ) ;
list_splice ( & clean_pages , page_list ) ;
mod_zo ne_page_state ( zone , NR_ISOLATED_FILE , - ret ) ;
mod_nod e_page_state ( zone - > zone_pgdat , NR_ISOLATED_FILE , - ret ) ;
return ret ;
}
@ -1375,7 +1390,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
{
struct list_head * src = & lruvec - > lists [ lru ] ;
unsigned long nr_taken = 0 ;
unsigned long scan ;
unsigned long nr_zone_taken [ MAX_NR_ZONES ] = { 0 } ;
unsigned long scan , nr_pages ;
for ( scan = 0 ; scan < nr_to_scan & & nr_taken < nr_to_scan & &
! list_empty ( src ) ; scan + + ) {
@ -1388,7 +1404,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
switch ( __isolate_lru_page ( page , mode ) ) {
case 0 :
nr_taken + = hpage_nr_pages ( page ) ;
nr_pages = hpage_nr_pages ( page ) ;
nr_taken + = nr_pages ;
nr_zone_taken [ page_zonenum ( page ) ] + = nr_pages ;
list_move ( & page - > lru , dst ) ;
break ;
@ -1405,6 +1423,13 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* nr_scanned = scan ;
trace_mm_vmscan_lru_isolate ( sc - > order , nr_to_scan , scan ,
nr_taken , mode , is_file_lru ( lru ) ) ;
for ( scan = 0 ; scan < MAX_NR_ZONES ; scan + + ) {
nr_pages = nr_zone_taken [ scan ] ;
if ( ! nr_pages )
continue ;
update_lru_size ( lruvec , lru , scan , - nr_pages ) ;
}
return nr_taken ;
}
@ -1445,7 +1470,7 @@ int isolate_lru_page(struct page *page)
struct lruvec * lruvec ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
lruvec = mem_cgroup_page_lruvec ( page , zone ) ;
lruvec = mem_cgroup_page_lruvec ( page , zone - > zone_pgdat ) ;
if ( PageLRU ( page ) ) {
int lru = page_lru ( page ) ;
get_page ( page ) ;
@ -1465,7 +1490,7 @@ int isolate_lru_page(struct page *page)
* the LRU list will go small and be scanned faster than necessary , leading to
* unnecessary swapping , thrashing and OOM .
*/
static int too_many_isolated ( struct zone * zone , int file ,
static int too_many_isolated ( struct pglist_data * pgdat , int file ,
struct scan_control * sc )
{
unsigned long inactive , isolated ;
@ -1477,11 +1502,11 @@ static int too_many_isolated(struct zone *zone, int file,
return 0 ;
if ( file ) {
inactive = zo ne_page_state( zone , NR_INACTIVE_FILE ) ;
isolated = zo ne_page_state( zone , NR_ISOLATED_FILE ) ;
inactive = nod e_page_state ( pgdat , NR_INACTIVE_FILE ) ;
isolated = nod e_page_state ( pgdat , NR_ISOLATED_FILE ) ;
} else {
inactive = zo ne_page_state( zone , NR_INACTIVE_ANON ) ;
isolated = zo ne_page_state( zone , NR_ISOLATED_ANON ) ;
inactive = nod e_page_state ( pgdat , NR_INACTIVE_ANON ) ;
isolated = nod e_page_state ( pgdat , NR_ISOLATED_ANON ) ;
}
/*
@ -1499,7 +1524,7 @@ static noinline_for_stack void
putback_inactive_pages ( struct lruvec * lruvec , struct list_head * page_list )
{
struct zone_reclaim_stat * reclaim_stat = & lruvec - > reclaim_stat ;
struct zone * zone = lruvec_zone ( lruvec ) ;
struct pglist_data * pgdat = lruvec_pgdat ( lruvec ) ;
LIST_HEAD ( pages_to_free ) ;
/*
@ -1512,13 +1537,13 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
VM_BUG_ON_PAGE ( PageLRU ( page ) , page ) ;
list_del ( & page - > lru ) ;
if ( unlikely ( ! page_evictable ( page ) ) ) {
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
putback_lru_page ( page ) ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
continue ;
}
lruvec = mem_cgroup_page_lruvec ( page , zone ) ;
lruvec = mem_cgroup_page_lruvec ( page , pgdat ) ;
SetPageLRU ( page ) ;
lru = page_lru ( page ) ;
@ -1535,10 +1560,10 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
del_page_from_lru_list ( page , lruvec , lru ) ;
if ( unlikely ( PageCompound ( page ) ) ) {
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
mem_cgroup_uncharge ( page ) ;
( * get_compound_page_dtor ( page ) ) ( page ) ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
} else
list_add ( & page - > lru , & pages_to_free ) ;
}
@ -1582,10 +1607,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
unsigned long nr_immediate = 0 ;
isolate_mode_t isolate_mode = 0 ;
int file = is_file_lru ( lru ) ;
struct zone * zone = lruvec_zone ( lruvec ) ;
struct pglist_data * pgdat = lruvec_pgdat ( lruvec ) ;
struct zone_reclaim_stat * reclaim_stat = & lruvec - > reclaim_stat ;
while ( unlikely ( too_many_isolated ( zone , file , sc ) ) ) {
while ( unlikely ( too_many_isolated ( pgdat , file , sc ) ) ) {
congestion_wait ( BLK_RW_ASYNC , HZ / 10 ) ;
/* We are about to die and free our memory. Return now. */
@ -1600,48 +1625,45 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if ( ! sc - > may_writepage )
isolate_mode | = ISOLATE_CLEAN ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
nr_taken = isolate_lru_pages ( nr_to_scan , lruvec , & page_list ,
& nr_scanned , sc , isolate_mode , lru ) ;
update_lru_size ( lruvec , lru , - nr_taken ) ;
__mod_zone_page_state ( zone , NR_ISOLATED_ANON + file , nr_taken ) ;
__mod_node_page_state ( pgdat , NR_ISOLATED_ANON + file , nr_taken ) ;
reclaim_stat - > recent_scanned [ file ] + = nr_taken ;
if ( global_reclaim ( sc ) ) {
__mod_zo ne_page_state ( zone , NR_PAGES_SCANNED , nr_scanned ) ;
__mod_nod e_page_state ( pgdat , NR_PAGES_SCANNED , nr_scanned ) ;
if ( current_is_kswapd ( ) )
__count_zone_ vm_events ( PGSCAN_KSWAPD , zone , nr_scanned ) ;
__count_vm_events ( PGSCAN_KSWAPD , nr_scanned ) ;
else
__count_zone_ vm_events ( PGSCAN_DIRECT , zone , nr_scanned ) ;
__count_vm_events ( PGSCAN_DIRECT , nr_scanned ) ;
}
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
if ( nr_taken = = 0 )
return 0 ;
nr_reclaimed = shrink_page_list ( & page_list , zone , sc , TTU_UNMAP ,
nr_reclaimed = shrink_page_list ( & page_list , pgdat , sc , TTU_UNMAP ,
& nr_dirty , & nr_unqueued_dirty , & nr_congested ,
& nr_writeback , & nr_immediate ,
false ) ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
if ( global_reclaim ( sc ) ) {
if ( current_is_kswapd ( ) )
__count_zone_vm_events ( PGSTEAL_KSWAPD , zone ,
nr_reclaimed ) ;
__count_vm_events ( PGSTEAL_KSWAPD , nr_reclaimed ) ;
else
__count_zone_vm_events ( PGSTEAL_DIRECT , zone ,
nr_reclaimed ) ;
__count_vm_events ( PGSTEAL_DIRECT , nr_reclaimed ) ;
}
putback_inactive_pages ( lruvec , & page_list ) ;
__mod_zo ne_page_state ( zone , NR_ISOLATED_ANON + file , - nr_taken ) ;
__mod_nod e_page_state ( pgdat , NR_ISOLATED_ANON + file , - nr_taken ) ;
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
mem_cgroup_uncharge_list ( & page_list ) ;
free_hot_cold_page_list ( & page_list , true ) ;
@ -1661,7 +1683,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* are encountered in the nr_immediate check below .
*/
if ( nr_writeback & & nr_writeback = = nr_taken )
set_bit ( ZONE_WRITEBACK , & zone - > flags ) ;
set_bit ( PGDAT_WRITEBACK , & pgdat - > flags ) ;
/*
* Legacy memcg will stall in page writeback so avoid forcibly
@ -1673,16 +1695,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* backed by a congested BDI and wait_iff_congested will stall .
*/
if ( nr_dirty & & nr_dirty = = nr_congested )
set_bit ( ZONE_CONGESTED , & zone - > flags ) ;
set_bit ( PGDAT_CONGESTED , & pgdat - > flags ) ;
/*
* If dirty pages are scanned that are not queued for IO , it
* implies that flushers are not keeping up . In this case , flag
* the zone ZONE _DIRTY and kswapd will start writing pages from
* the pgdat PGDAT _DIRTY and kswapd will start writing pages from
* reclaim context .
*/
if ( nr_unqueued_dirty = = nr_taken )
set_bit ( ZONE_DIRTY , & zone - > flags ) ;
set_bit ( PGDAT_DIRTY , & pgdat - > flags ) ;
/*
* If kswapd scans pages marked marked for immediate
@ -1701,9 +1723,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
*/
if ( ! sc - > hibernation_mode & & ! current_is_kswapd ( ) & &
current_may_throttle ( ) )
wait_iff_congested ( zone , BLK_RW_ASYNC , HZ / 10 ) ;
wait_iff_congested ( pgdat , BLK_RW_ASYNC , HZ / 10 ) ;
trace_mm_vmscan_lru_shrink_inactive ( zone , nr_scanned , nr_reclaimed ,
trace_mm_vmscan_lru_shrink_inactive ( pgdat - > node_id ,
nr_scanned , nr_reclaimed ,
sc - > priority , file ) ;
return nr_reclaimed ;
}
@ -1731,20 +1754,20 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head * pages_to_free ,
enum lru_list lru )
{
struct zone * zone = lruvec_zone ( lruvec ) ;
struct pglist_data * pgdat = lruvec_pgdat ( lruvec ) ;
unsigned long pgmoved = 0 ;
struct page * page ;
int nr_pages ;
while ( ! list_empty ( list ) ) {
page = lru_to_page ( list ) ;
lruvec = mem_cgroup_page_lruvec ( page , zone ) ;
lruvec = mem_cgroup_page_lruvec ( page , pgdat ) ;
VM_BUG_ON_PAGE ( PageLRU ( page ) , page ) ;
SetPageLRU ( page ) ;
nr_pages = hpage_nr_pages ( page ) ;
update_lru_size ( lruvec , lru , nr_pages ) ;
update_lru_size ( lruvec , lru , page_zonenum ( page ) , nr_pages ) ;
list_move ( & page - > lru , & lruvec - > lists [ lru ] ) ;
pgmoved + = nr_pages ;
@ -1754,10 +1777,10 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
del_page_from_lru_list ( page , lruvec , lru ) ;
if ( unlikely ( PageCompound ( page ) ) ) {
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
mem_cgroup_uncharge ( page ) ;
( * get_compound_page_dtor ( page ) ) ( page ) ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
} else
list_add ( & page - > lru , pages_to_free ) ;
}
@ -1783,7 +1806,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
unsigned long nr_rotated = 0 ;
isolate_mode_t isolate_mode = 0 ;
int file = is_file_lru ( lru ) ;
struct zone * zone = lruvec_zone ( lruvec ) ;
struct pglist_data * pgdat = lruvec_pgdat ( lruvec ) ;
lru_add_drain ( ) ;
@ -1792,20 +1815,19 @@ static void shrink_active_list(unsigned long nr_to_scan,
if ( ! sc - > may_writepage )
isolate_mode | = ISOLATE_CLEAN ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
nr_taken = isolate_lru_pages ( nr_to_scan , lruvec , & l_hold ,
& nr_scanned , sc , isolate_mode , lru ) ;
update_lru_size ( lruvec , lru , - nr_taken ) ;
__mod_zone_page_state ( zone , NR_ISOLATED_ANON + file , nr_taken ) ;
__mod_node_page_state ( pgdat , NR_ISOLATED_ANON + file , nr_taken ) ;
reclaim_stat - > recent_scanned [ file ] + = nr_taken ;
if ( global_reclaim ( sc ) )
__mod_zo ne_page_state ( zone , NR_PAGES_SCANNED , nr_scanned ) ;
__count_zone_ vm_events ( PGREFILL , zone , nr_scanned ) ;
__mod_nod e_page_state ( pgdat , NR_PAGES_SCANNED , nr_scanned ) ;
__count_vm_events ( PGREFILL , nr_scanned ) ;
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
while ( ! list_empty ( & l_hold ) ) {
cond_resched ( ) ;
@ -1850,7 +1872,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
/*
* Move pages back to the lru list .
*/
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
/*
* Count referenced pages from currently used mappings as rotated ,
* even though only some of them are actually re - activated . This
@ -1861,8 +1883,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
move_active_pages_to_lru ( lruvec , & l_active , & l_hold , lru ) ;
move_active_pages_to_lru ( lruvec , & l_inactive , & l_hold , lru - LRU_ACTIVE ) ;
__mod_zo ne_page_state ( zone , NR_ISOLATED_ANON + file , - nr_taken ) ;
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
__mod_nod e_page_state ( pgdat , NR_ISOLATED_ANON + file , - nr_taken ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
mem_cgroup_uncharge_list ( & l_hold ) ;
free_hot_cold_page_list ( & l_hold , true ) ;
@ -1956,7 +1978,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
struct zone_reclaim_stat * reclaim_stat = & lruvec - > reclaim_stat ;
u64 fraction [ 2 ] ;
u64 denominator = 0 ; /* gcc */
struct zone * zone = lruvec_zone ( lruvec ) ;
struct pglist_data * pgdat = lruvec_pgdat ( lruvec ) ;
unsigned long anon_prio , file_prio ;
enum scan_balance scan_balance ;
unsigned long anon , file ;
@ -1977,7 +1999,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* well .
*/
if ( current_is_kswapd ( ) ) {
if ( ! zone_reclaimable ( zone ) )
if ( ! pgdat_reclaimable ( pgdat ) )
force_scan = true ;
if ( ! mem_cgroup_online ( memcg ) )
force_scan = true ;
@ -2023,14 +2045,24 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* anon pages . Try to detect this based on file LRU size .
*/
if ( global_reclaim ( sc ) ) {
unsigned long zonefile ;
unsigned long zonefree ;
unsigned long pgdatfile ;
unsigned long pgdatfree ;
int z ;
unsigned long total_high_wmark = 0 ;
zonefree = zone_page_state ( zone , NR_FREE_PAGES ) ;
zonefile = zone_page_state ( zone , NR_ACTIVE_FILE ) +
zone_page_state ( zone , NR_INACTIVE_FILE ) ;
pgdatfree = sum_zone_node_page_state ( pgdat - > node_id , NR_FREE_PAGES ) ;
pgdatfile = node_page_state ( pgdat , NR_ACTIVE_FILE ) +
node_page_state ( pgdat , NR_INACTIVE_FILE ) ;
for ( z = 0 ; z < MAX_NR_ZONES ; z + + ) {
struct zone * zone = & pgdat - > node_zones [ z ] ;
if ( ! populated_zone ( zone ) )
continue ;
total_high_wmark + = high_wmark_pages ( zone ) ;
}
if ( unlikely ( zonefile + zonefree < = high_wmark_pages ( zone ) ) ) {
if ( unlikely ( pgdatfile + pgdat free < = total_ high_wmark) ) {
scan_balance = SCAN_ANON ;
goto out ;
}
@ -2077,7 +2109,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
file = lruvec_lru_size ( lruvec , LRU_ACTIVE_FILE ) +
lruvec_lru_size ( lruvec , LRU_INACTIVE_FILE ) ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
spin_lock_irq ( & pgdat - > lru_lock ) ;
if ( unlikely ( reclaim_stat - > recent_scanned [ 0 ] > anon / 4 ) ) {
reclaim_stat - > recent_scanned [ 0 ] / = 2 ;
reclaim_stat - > recent_rotated [ 0 ] / = 2 ;
@ -2098,7 +2130,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
fp = file_prio * ( reclaim_stat - > recent_scanned [ 1 ] + 1 ) ;
fp / = reclaim_stat - > recent_rotated [ 1 ] + 1 ;
spin_unlock_irq ( zone_lru_lock ( zone ) ) ;
spin_unlock_irq ( & pgdat - > lru_lock ) ;
fraction [ 0 ] = ap ;
fraction [ 1 ] = fp ;
@ -2352,9 +2384,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
* inactive lists are large enough , continue reclaiming
*/
pages_for_compaction = ( 2UL < < sc - > order ) ;
inactive_lru_pages = zo ne_page_state( zone , NR_INACTIVE_FILE ) ;
inactive_lru_pages = nod e_page_state ( zone - > zone_pgdat , NR_INACTIVE_FILE ) ;
if ( get_nr_swap_pages ( ) > 0 )
inactive_lru_pages + = zo ne_page_state( zone , NR_INACTIVE_ANON ) ;
inactive_lru_pages + = nod e_page_state ( zone - > zone_pgdat , NR_INACTIVE_ANON ) ;
if ( sc - > nr_reclaimed < pages_for_compaction & &
inactive_lru_pages > pages_for_compaction )
return true ;
@ -2554,7 +2586,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
continue ;
if ( sc - > priority ! = DEF_PRIORITY & &
! zone _reclaimable( zone ) )
! pgdat _reclaimable( zone - > zone_pgdat ) )
continue ; /* Let kswapd poll it */
/*
@ -2692,7 +2724,7 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
for ( i = 0 ; i < = ZONE_NORMAL ; i + + ) {
zone = & pgdat - > node_zones [ i ] ;
if ( ! populated_zone ( zone ) | |
zone_reclaimable_pages ( zone ) = = 0 )
pgdat_reclaimable_pages ( pgdat ) = = 0 )
continue ;
pfmemalloc_reserve + = min_wmark_pages ( zone ) ;
@ -3000,7 +3032,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
* DEF_PRIORITY . Effectively , it considers them balanced so
* they must be considered balanced here as well !
*/
if ( ! zone _reclaimable( zone ) ) {
if ( ! pgdat _reclaimable( zone - > zone_pgdat ) ) {
balanced_pages + = zone - > managed_pages ;
continue ;
}
@ -3063,6 +3095,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
{
unsigned long balance_gap ;
bool lowmem_pressure ;
struct pglist_data * pgdat = zone - > zone_pgdat ;
/* Reclaim above the high watermark. */
sc - > nr_to_reclaim = max ( SWAP_CLUSTER_MAX , high_wmark_pages ( zone ) ) ;
@ -3087,7 +3120,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
shrink_zone ( zone , sc , zone_idx ( zone ) = = classzone_idx ) ;
clear_bit ( ZONE_WRITEBACK , & zone - > flags ) ;
/* TODO: ANOMALY */
clear_bit ( PGDAT_WRITEBACK , & pgdat - > flags ) ;
/*
* If a zone reaches its high watermark , consider it to be no longer
@ -3095,10 +3129,10 @@ static bool kswapd_shrink_zone(struct zone *zone,
* BDIs but as pressure is relieved , speculatively avoid congestion
* waits .
*/
if ( zone _reclaimable( zone ) & &
if ( pgdat _reclaimable( zone - > zone_pgdat ) & &
zone_balanced ( zone , sc - > order , false , 0 , classzone_idx ) ) {
clear_bit ( ZONE_CONGESTED , & zone - > flags ) ;
clear_bit ( ZONE_DIRTY , & zone - > flags ) ;
clear_bit ( PGDAT_CONGESTED , & pgdat - > flags ) ;
clear_bit ( PGDAT_DIRTY , & pgdat - > flags ) ;
}
return sc - > nr_scanned > = sc - > nr_to_reclaim ;
@ -3157,7 +3191,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
continue ;
if ( sc . priority ! = DEF_PRIORITY & &
! zone _reclaimable( zone ) )
! pgdat _reclaimable( zone - > zone_pgdat ) )
continue ;
/*
@ -3184,9 +3218,11 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
/*
* If balanced , clear the dirty and congested
* flags
*
* TODO : ANOMALY
*/
clear_bit ( ZONE_CONGESTED , & zone - > flags ) ;
clear_bit ( ZONE _DIRTY, & zone - > flags ) ;
clear_bit ( PGDAT _CONGESTED, & zone - > zone_pgdat - > flags ) ;
clear_bit ( PGDAT _DIRTY, & zone - > zone_pgdat - > flags ) ;
}
}
@ -3216,7 +3252,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
continue ;
if ( sc . priority ! = DEF_PRIORITY & &
! zone _reclaimable( zone ) )
! pgdat _reclaimable( zone - > zone_pgdat ) )
continue ;
sc . nr_scanned = 0 ;
@ -3612,8 +3648,8 @@ int sysctl_min_slab_ratio = 5;
static inline unsigned long zone_unmapped_file_pages ( struct zone * zone )
{
unsigned long file_mapped = zone_page_state ( zone , NR_FILE_MAPPED ) ;
unsigned long file_lru = zo ne_page_state( zone , NR_INACTIVE_FILE ) +
zo ne_page_state( zone , NR_ACTIVE_FILE ) ;
unsigned long file_lru = nod e_page_state ( zone - > zone_pgdat , NR_INACTIVE_FILE ) +
nod e_page_state ( zone - > zone_pgdat , NR_ACTIVE_FILE ) ;
/*
* It ' s possible for there to be more file mapped pages than
@ -3716,7 +3752,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
zone_page_state ( zone , NR_SLAB_RECLAIMABLE ) < = zone - > min_slab_pages )
return ZONE_RECLAIM_FULL ;
if ( ! zone _reclaimable( zone ) )
if ( ! pgdat _reclaimable( zone - > zone_pgdat ) )
return ZONE_RECLAIM_FULL ;
/*
@ -3795,7 +3831,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
zone = pagezone ;
spin_lock_irq ( zone_lru_lock ( zone ) ) ;
}
lruvec = mem_cgroup_page_lruvec ( page , zone ) ;
lruvec = mem_cgroup_page_lruvec ( page , zone - > zone_pgdat ) ;
if ( ! PageLRU ( page ) | | ! PageUnevictable ( page ) )
continue ;