@ -2808,7 +2808,7 @@ static unsigned int nr_free_zone_pages(int offset)
struct zonelist * zonelist = node_zonelist ( numa_node_id ( ) , GFP_KERNEL ) ;
for_each_zone_zonelist ( zone , z , zonelist , offset ) {
unsigned long size = zone - > present _pages;
unsigned long size = zone - > managed _pages;
unsigned long high = high_wmark_pages ( zone ) ;
if ( size > high )
sum + = size - high ;
@ -2861,7 +2861,7 @@ void si_meminfo_node(struct sysinfo *val, int nid)
val - > totalram = pgdat - > node_present_pages ;
val - > freeram = node_page_state ( nid , NR_FREE_PAGES ) ;
# ifdef CONFIG_HIGHMEM
val - > totalhigh = pgdat - > node_zones [ ZONE_HIGHMEM ] . present _pages;
val - > totalhigh = pgdat - > node_zones [ ZONE_HIGHMEM ] . managed _pages;
val - > freehigh = zone_page_state ( & pgdat - > node_zones [ ZONE_HIGHMEM ] ,
NR_FREE_PAGES ) ;
# else
@ -3939,7 +3939,7 @@ static int __meminit zone_batchsize(struct zone *zone)
*
* OK , so we don ' t know how big the cache is . So guess .
*/
batch = zone - > present _pages / 1024 ;
batch = zone - > managed _pages / 1024 ;
if ( batch * PAGE_SIZE > 512 * 1024 )
batch = ( 512 * 1024 ) / PAGE_SIZE ;
batch / = 4 ; /* We effectively *= 4 below */
@ -4023,7 +4023,7 @@ static void __meminit setup_zone_pageset(struct zone *zone)
if ( percpu_pagelist_fraction )
setup_pagelist_highmark ( pcp ,
( zone - > present _pages /
( zone - > managed _pages /
percpu_pagelist_fraction ) ) ;
}
}
@ -5435,8 +5435,8 @@ static void calculate_totalreserve_pages(void)
/* we treat the high watermark as reserved pages. */
max + = high_wmark_pages ( zone ) ;
if ( max > zone - > present _pages)
max = zone - > present _pages;
if ( max > zone - > managed _pages)
max = zone - > managed _pages;
reserve_pages + = max ;
/*
* Lowmem reserves are not available to
@ -5468,7 +5468,7 @@ static void setup_per_zone_lowmem_reserve(void)
for_each_online_pgdat ( pgdat ) {
for ( j = 0 ; j < MAX_NR_ZONES ; j + + ) {
struct zone * zone = pgdat - > node_zones + j ;
unsigned long present _pages = zone - > present _pages;
unsigned long managed _pages = zone - > managed _pages;
zone - > lowmem_reserve [ j ] = 0 ;
@ -5482,9 +5482,9 @@ static void setup_per_zone_lowmem_reserve(void)
sysctl_lowmem_reserve_ratio [ idx ] = 1 ;
lower_zone = pgdat - > node_zones + idx ;
lower_zone - > lowmem_reserve [ j ] = present _pages /
lower_zone - > lowmem_reserve [ j ] = managed _pages /
sysctl_lowmem_reserve_ratio [ idx ] ;
present _pages + = lower_zone - > present _pages;
managed _pages + = lower_zone - > managed _pages;
}
}
}
@ -5503,14 +5503,14 @@ static void __setup_per_zone_wmarks(void)
/* Calculate total number of !ZONE_HIGHMEM pages */
for_each_zone ( zone ) {
if ( ! is_highmem ( zone ) )
lowmem_pages + = zone - > present _pages;
lowmem_pages + = zone - > managed _pages;
}
for_each_zone ( zone ) {
u64 tmp ;
spin_lock_irqsave ( & zone - > lock , flags ) ;
tmp = ( u64 ) pages_min * zone - > present _pages;
tmp = ( u64 ) pages_min * zone - > managed _pages;
do_div ( tmp , lowmem_pages ) ;
if ( is_highmem ( zone ) ) {
/*
@ -5524,7 +5524,7 @@ static void __setup_per_zone_wmarks(void)
*/
unsigned long min_pages ;
min_pages = zone - > present _pages / 1024 ;
min_pages = zone - > managed _pages / 1024 ;
min_pages = clamp ( min_pages , SWAP_CLUSTER_MAX , 128UL ) ;
zone - > watermark [ WMARK_MIN ] = min_pages ;
} else {
@ -5586,7 +5586,7 @@ static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
unsigned int gb , ratio ;
/* Zone size in gigabytes */
gb = zone - > present _pages > > ( 30 - PAGE_SHIFT ) ;
gb = zone - > managed _pages > > ( 30 - PAGE_SHIFT ) ;
if ( gb )
ratio = int_sqrt ( 10 * gb ) ;
else
@ -5672,7 +5672,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
return rc ;
for_each_zone ( zone )
zone - > min_unmapped_pages = ( zone - > present _pages *
zone - > min_unmapped_pages = ( zone - > managed _pages *
sysctl_min_unmapped_ratio ) / 100 ;
return 0 ;
}
@ -5688,7 +5688,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
return rc ;
for_each_zone ( zone )
zone - > min_slab_pages = ( zone - > present _pages *
zone - > min_slab_pages = ( zone - > managed _pages *
sysctl_min_slab_ratio ) / 100 ;
return 0 ;
}
@ -5730,7 +5730,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
for_each_populated_zone ( zone ) {
for_each_possible_cpu ( cpu ) {
unsigned long high ;
high = zone - > present _pages / percpu_pagelist_fraction ;
high = zone - > managed _pages / percpu_pagelist_fraction ;
setup_pagelist_highmark (
per_cpu_ptr ( zone - > pageset , cpu ) , high ) ;
}