@ -1525,7 +1525,7 @@ static inline bool free_pages_prezeroed(bool poisoned)
}
static int prep_new_page ( struct page * page , unsigned int order , gfp_t gfp_flags ,
int alloc_flags )
unsigned int alloc_flags )
{
int i ;
bool poisoned = true ;
@ -2391,7 +2391,8 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
static inline
struct page * buffered_rmqueue ( struct zone * preferred_zone ,
struct zone * zone , unsigned int order ,
gfp_t gfp_flags , int alloc_flags , int migratetype )
gfp_t gfp_flags , unsigned int alloc_flags ,
int migratetype )
{
unsigned long flags ;
struct page * page ;
@ -2545,12 +2546,13 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
* to check in the allocation paths if no pages are free .
*/
static bool __zone_watermark_ok ( struct zone * z , unsigned int order ,
unsigned long mark , int classzone_idx , int alloc_flags ,
unsigned long mark , int classzone_idx ,
unsigned int alloc_flags ,
long free_pages )
{
long min = mark ;
int o ;
const int alloc_harder = ( alloc_flags & ALLOC_HARDER ) ;
const bool alloc_harder = ( alloc_flags & ALLOC_HARDER ) ;
/* free_pages may go negative - that's OK */
free_pages - = ( 1 < < order ) - 1 ;
@ -2613,7 +2615,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
}
bool zone_watermark_ok ( struct zone * z , unsigned int order , unsigned long mark ,
int classzone_idx , int alloc_flags )
int classzone_idx , unsigned int alloc_flags )
{
return __zone_watermark_ok ( z , order , mark , classzone_idx , alloc_flags ,
zone_page_state ( z , NR_FREE_PAGES ) ) ;
@ -2957,7 +2959,7 @@ out:
/* Try memory compaction for high-order allocations before reclaim */
static struct page *
__alloc_pages_direct_compact ( gfp_t gfp_mask , unsigned int order ,
int alloc_flags , const struct alloc_context * ac ,
unsigned int alloc_flags , const struct alloc_context * ac ,
enum migrate_mode mode , int * contended_compaction ,
bool * deferred_compaction )
{
@ -3013,7 +3015,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
# else
static inline struct page *
__alloc_pages_direct_compact ( gfp_t gfp_mask , unsigned int order ,
int alloc_flags , const struct alloc_context * ac ,
unsigned int alloc_flags , const struct alloc_context * ac ,
enum migrate_mode mode , int * contended_compaction ,
bool * deferred_compaction )
{
@ -3053,7 +3055,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
/* The really slow allocator path where we enter direct reclaim */
static inline struct page *
__alloc_pages_direct_reclaim ( gfp_t gfp_mask , unsigned int order ,
int alloc_flags , const struct alloc_context * ac ,
unsigned int alloc_flags , const struct alloc_context * ac ,
unsigned long * did_some_progress )
{
struct page * page = NULL ;
@ -3092,10 +3094,10 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
wakeup_kswapd ( zone , order , zone_idx ( ac - > preferred_zone ) ) ;
}
static inline int
static inline unsigned int
gfp_to_alloc_flags ( gfp_t gfp_mask )
{
int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET ;
unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET ;
/* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
BUILD_BUG_ON ( __GFP_HIGH ! = ( __force gfp_t ) ALLOC_HIGH ) ;
@ -3156,7 +3158,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
{
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM ;
struct page * page = NULL ;
int alloc_flags ;
unsigned int alloc_flags ;
unsigned long pages_reclaimed = 0 ;
unsigned long did_some_progress ;
enum migrate_mode migration_mode = MIGRATE_ASYNC ;
@ -3348,7 +3350,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zoneref * preferred_zoneref ;
struct page * page = NULL ;
unsigned int cpuset_mems_cookie ;
int alloc_flags = ALLOC_WMARK_LOW | ALLOC_FAIR ;
unsigned int alloc_flags = ALLOC_WMARK_LOW | ALLOC_FAIR ;
gfp_t alloc_mask ; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = {
. high_zoneidx = gfp_zone ( gfp_mask ) ,