From 8375786e2bddb047138263e374a690d614720610 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Wed, 20 May 2020 09:55:17 -0700 Subject: [PATCH] mm: Don't stop kswapd on a per-node basis when there are no waiters The page allocator wakes all kswapds in an allocation context's allowed nodemask in the slow path, so it doesn't make sense to have the kswapd- waiter count per each NUMA node. Instead, it should be a global counter to stop all kswapds when there are no failed allocation requests. Signed-off-by: Sultan Alsawaf --- include/linux/mmzone.h | 1 - mm/page_alloc.c | 2 -- 2 files changed, 3 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c4dae0bcc43b..a6f1148085c2 100755 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -664,7 +664,6 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; - atomic_t kswapd_waiters; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3d8cbb397f63..51de840d394d 100755 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4089,7 +4089,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, u64 utime, stime_s, stime_e, stime_d; task_cputime(current, &utime, &stime_s); - pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat; bool woke_kswapd = false; /* @@ -6337,7 +6336,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) pgdat_page_ext_init(pgdat); spin_lock_init(&pgdat->lru_lock); lruvec_init(node_lruvec(pgdat)); - pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0); pgdat->per_cpu_nodestats = &boot_nodestats;