@ -19,6 +19,8 @@
# include <linux/sysfs.h>
# include <linux/slab.h>
# include <linux/rmap.h>
# include <linux/swap.h>
# include <linux/swapops.h>
# include <asm/page.h>
# include <asm/pgtable.h>
@ -2149,6 +2151,19 @@ nomem:
return - ENOMEM ;
}
static int is_hugetlb_entry_hwpoisoned ( pte_t pte )
{
swp_entry_t swp ;
if ( huge_pte_none ( pte ) | | pte_present ( pte ) )
return 0 ;
swp = pte_to_swp_entry ( pte ) ;
if ( non_swap_entry ( swp ) & & is_hwpoison_entry ( swp ) ) {
return 1 ;
} else
return 0 ;
}
void __unmap_hugepage_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end , struct page * ref_page )
{
@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
if ( huge_pte_none ( pte ) )
continue ;
/*
* HWPoisoned hugepage is already unmapped and dropped reference
*/
if ( unlikely ( is_hugetlb_entry_hwpoisoned ( pte ) ) )
continue ;
page = pte_page ( pte ) ;
if ( pte_dirty ( pte ) )
set_page_dirty ( page ) ;
@ -2490,6 +2511,18 @@ retry:
page_dup_rmap ( page ) ;
}
/*
* Since memory error handler replaces pte into hwpoison swap entry
* at the time of error handling , a process which reserved but not have
* the mapping to the error hugepage does not have hwpoison swap entry .
* So we need to block accesses from such a process by checking
* PG_hwpoison bit here .
*/
if ( unlikely ( PageHWPoison ( page ) ) ) {
ret = VM_FAULT_HWPOISON ;
goto backout_unlocked ;
}
/*
* If we are going to COW a private mapping later , we examine the
* pending reservations for this page now . This will ensure that
@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX ( hugetlb_instantiation_mutex ) ;
struct hstate * h = hstate_vma ( vma ) ;
ptep = huge_pte_offset ( mm , address ) ;
if ( ptep ) {
entry = huge_ptep_get ( ptep ) ;
if ( unlikely ( is_hugetlb_entry_hwpoisoned ( entry ) ) )
return VM_FAULT_HWPOISON ;
}
ptep = huge_pte_alloc ( mm , address , huge_page_size ( h ) ) ;
if ( ! ptep )
return VM_FAULT_OOM ;