@ -1025,12 +1025,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long * nr_scanned , struct scan_control * sc ,
isolate_mode_t mode , enum lru_list lru )
{
struct list_head * src ;
struct list_head * src = & lruvec - > lists [ lru ] ;
unsigned long nr_taken = 0 ;
unsigned long scan ;
int file = is_file_lru ( lru ) ;
src = & lruvec - > lists [ lru ] ;
for ( scan = 0 ; scan < nr_to_scan & & ! list_empty ( src ) ; scan + + ) {
struct page * page ;
@ -1058,11 +1055,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
}
* nr_scanned = scan ;
trace_mm_vmscan_lru_isolate ( sc - > order ,
nr_to_scan , scan ,
nr_taken ,
mode , file ) ;
trace_mm_vmscan_lru_isolate ( sc - > order , nr_to_scan , scan ,
nr_taken , mode , is_file_lru ( lru ) ) ;
return nr_taken ;
}
@ -1140,8 +1134,7 @@ static int too_many_isolated(struct zone *zone, int file,
}
static noinline_for_stack void
putback_inactive_pages ( struct lruvec * lruvec ,
struct list_head * page_list )
putback_inactive_pages ( struct lruvec * lruvec , struct list_head * page_list )
{
struct zone_reclaim_stat * reclaim_stat = & lruvec - > reclaim_stat ;
struct zone * zone = lruvec_zone ( lruvec ) ;
@ -1235,11 +1228,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if ( global_reclaim ( sc ) ) {
zone - > pages_scanned + = nr_scanned ;
if ( current_is_kswapd ( ) )
__count_zone_vm_events ( PGSCAN_KSWAPD , zone ,
nr_scanned ) ;
__count_zone_vm_events ( PGSCAN_KSWAPD , zone , nr_scanned ) ;
else
__count_zone_vm_events ( PGSCAN_DIRECT , zone ,
nr_scanned ) ;
__count_zone_vm_events ( PGSCAN_DIRECT , zone , nr_scanned ) ;
}
spin_unlock_irq ( & zone - > lru_lock ) ;
@ -1534,9 +1525,9 @@ static int inactive_file_is_low(struct lruvec *lruvec)
return inactive_file_is_low_global ( lruvec_zone ( lruvec ) ) ;
}
static int inactive_list_is_low ( struct lruvec * lruvec , int file )
static int inactive_list_is_low ( struct lruvec * lruvec , enum lru_list lru )
{
if ( file )
if ( is_ file_lru ( lru ) )
return inactive_file_is_low ( lruvec ) ;
else
return inactive_anon_is_low ( lruvec ) ;
@ -1545,10 +1536,8 @@ static int inactive_list_is_low(struct lruvec *lruvec, int file)
static unsigned long shrink_list ( enum lru_list lru , unsigned long nr_to_scan ,
struct lruvec * lruvec , struct scan_control * sc )
{
int file = is_file_lru ( lru ) ;
if ( is_active_lru ( lru ) ) {
if ( inactive_list_is_low ( lruvec , file ) )
if ( inactive_list_is_low ( lruvec , lru ) )
shrink_active_list ( nr_to_scan , lruvec , sc , lru ) ;
return 0 ;
}
@ -1630,7 +1619,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
* This scanning priority is essentially the inverse of IO cost .
*/
anon_prio = vmscan_swappiness ( sc ) ;
file_prio = 200 - vmscan_swappiness ( sc ) ;
file_prio = 200 - anon_prio ;
/*
* OK , so we have swap space and a fair amount of page cache