From d454e4a8e2d6700f851e34a31ef7b00d582d81a8 Mon Sep 17 00:00:00 2001 From: "Se Wang (Patrick) Oh" Date: Thu, 25 Jun 2015 15:55:04 -0700 Subject: [PATCH] mm: slub: call kasan_alloc_pages before freeing pages in slub KASan marks slub objects as redzone and free and the bitmasks for that region are not cleared until the pages are freed. When CONFIG_PAGE_POISONING is enabled, as the pages still have special bitmasks, KAsan report arises during pages poisoning. So mark the pages as alloc status before poisoning the pages. ================================================================== BUG: KASan: use after free in memset+0x24/0x44 at addr ffffffc0bb628000 Write of size 4096 by task kworker/u8:0/6 page:ffffffbacc51d900 count:0 mapcount:0 mapping: (null) index:0x0 flags: 0x4000000000000000() page dumped because: kasan: bad access detected Call trace: [] dump_backtrace+0x0/0x250 [] show_stack+0x10/0x1c [] dump_stack+0x74/0xfc [] kasan_report_error+0x2b0/0x408 [] kasan_report+0x34/0x40 [] __asan_storeN+0x15c/0x168 [] memset+0x20/0x44 [] kernel_map_pages+0x2e8/0x384 [] free_pages_prepare+0x340/0x3a0 [] __free_pages_ok+0x20/0x12c [] __free_pages+0x34/0x44 [] __free_kmem_pages+0x8/0x14 [] kfree+0x114/0x254 [] devres_free+0x48/0x5c [] devres_destroy+0x10/0x28 [] devm_kfree+0x1c/0x3c Memory state around the faulty address: ffffffc0bb627f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffffffc0bb627f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffffffc0bb628000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ^ ffffffc0bb628080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffffffc0bb628100: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ================================================================== BUG: KASan: use after free in memset+0x24/0x44 at addr ffffffc0bb2fe000 Write of size 4096 by task swapper/0/1 page:ffffffbacc4fdec0 count:0 mapcount:0 mapping: (null) index:0xffffffc0bb2fe6a0 flags: 0x4000000000000000() page dumped because: kasan: bad access detected Call trace: [] dump_backtrace+0x0/0x250 [] show_stack+0x10/0x1c [] dump_stack+0x74/0xfc [] kasan_report_error+0x2b0/0x408 [] kasan_report+0x34/0x40 [] __asan_storeN+0x15c/0x168 [] memset+0x20/0x44 [] kernel_map_pages+0x2e8/0x384 [] free_pages_prepare+0x340/0x3a0 [] __free_pages_ok+0x20/0x12c [] __free_pages+0x34/0x44 [] __free_slab+0x15c/0x178 [] discard_slab+0x60/0x6c [] __slab_free+0x320/0x340 [] kmem_cache_free+0x1d0/0x25c [] kernfs_put+0x2a0/0x3d8 Memory state around the buggy address: ffffffc0bb2fdf00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffffc0bb2fdf80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffffffc0bb2fe000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fc ^ fffffc0bb2fe080: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffffc0bb2fe100: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ================================================================== Change-Id: Id963b9439685f94a022dcdd60b59aaf126610387 Signed-off-by: Se Wang (Patrick) Oh Signed-off-by: Patrick Daly Signed-off-by: Sudarshan Rajagopalan --- mm/slub.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/slub.c b/mm/slub.c index 59c33eec7728..18f5352f5e5c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1690,6 +1690,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; memcg_uncharge_slab(page, order, s); + kasan_alloc_pages(page, order); __free_pages(page, order); } @@ -3903,6 +3904,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kfree_hook(object); + kasan_alloc_pages(page, compound_order(page)); __free_pages(page, compound_order(page)); return; }