mm: Don't stop kswapd on a per-node basis when there are no waiters

The page allocator wakes all kswapds in an allocation context's allowed
nodemask in the slow path, so it doesn't make sense to have the kswapd-
waiter count per each NUMA node. Instead, it should be a global counter
to stop all kswapds when there are no failed allocation requests.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
fourteen
Sultan Alsawaf 5 years ago committed by Jenna
parent f8ea27c696
commit 8375786e2b
  1. 1
      include/linux/mmzone.h
  2. 2
      mm/page_alloc.c

@ -664,7 +664,6 @@ typedef struct pglist_data {
unsigned long node_spanned_pages; /* total size of physical page
range, including holes */
int node_id;
atomic_t kswapd_waiters;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd; /* Protected by

@ -4089,7 +4089,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
u64 utime, stime_s, stime_e, stime_d;
task_cputime(current, &utime, &stime_s);
pg_data_t *pgdat = ac->preferred_zoneref->zone->zone_pgdat;
bool woke_kswapd = false;
/*
@ -6337,7 +6336,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
pgdat_page_ext_init(pgdat);
spin_lock_init(&pgdat->lru_lock);
lruvec_init(node_lruvec(pgdat));
pgdat->kswapd_waiters = (atomic_t)ATOMIC_INIT(0);
pgdat->per_cpu_nodestats = &boot_nodestats;

Loading…
Cancel
Save