@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct mm_struct * mm = tlb - > mm ;
int ret = 0 ;
if ( ! pmd_trans_huge_lock ( pmd , vma , & ptl ) )
ptl = pmd_trans_huge_lock ( pmd , vma ) ;
if ( ! ptl )
goto out_unlocked ;
orig_pmd = * pmd ;
@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd ;
spinlock_t * ptl ;
if ( ! __pmd_trans_huge_lock ( pmd , vma , & ptl ) )
ptl = __pmd_trans_huge_lock ( pmd , vma ) ;
if ( ! ptl )
return 0 ;
/*
* For architectures like ppc64 we look at deposited pgtable
@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
* We don ' t have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_sem prevents deadlock .
*/
if ( __pmd_trans_huge_lock ( old_pmd , vma , & old_ptl ) ) {
old_ptl = __pmd_trans_huge_lock ( old_pmd , vma ) ;
if ( old_ptl ) {
new_ptl = pmd_lockptr ( mm , new_pmd ) ;
if ( new_ptl ! = old_ptl )
spin_lock_nested ( new_ptl , SINGLE_DEPTH_NESTING ) ;
@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t * ptl ;
int ret = 0 ;
if ( __pmd_trans_huge_lock ( pmd , vma , & ptl ) ) {
ptl = __pmd_trans_huge_lock ( pmd , vma ) ;
if ( ptl ) {
pmd_t entry ;
bool preserve_write = prot_numa & & pmd_write ( * pmd ) ;
ret = 1 ;
@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* Note that if it returns true , this routine returns without unlocking page
* table lock . So callers must unlock it .
*/
bool __pmd_trans_huge_lock ( pmd_t * pmd , struct vm_area_struct * vma ,
spinlock_t * * ptl )
spinlock_t * __pmd_trans_huge_lock ( pmd_t * pmd , struct vm_area_struct * vma )
{
* ptl = pmd_lock ( vma - > vm_mm , pmd ) ;
spinlock_t * ptl ;
ptl = pmd_lock ( vma - > vm_mm , pmd ) ;
if ( likely ( pmd_trans_huge ( * pmd ) | | pmd_devmap ( * pmd ) ) )
return true ;
spin_unlock ( * ptl ) ;
return false ;
return ptl ;
spin_unlock ( ptl ) ;
return NULL ;
}
# define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
@ -2068,7 +2072,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
if ( likely ( writable ) ) {
if ( likely ( referenced ) ) {
result = SCAN_SUCCEED ;
trace_mm_collapse_huge_page_isolate ( page_to_pfn ( page ) , none_or_zero ,
trace_mm_collapse_huge_page_isolate ( page , none_or_zero ,
referenced , writable , result ) ;
return 1 ;
}
@ -2078,7 +2082,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
out :
release_pte_pages ( pte , _pte ) ;
trace_mm_collapse_huge_page_isolate ( page_to_pfn ( page ) , none_or_zero ,
trace_mm_collapse_huge_page_isolate ( page , none_or_zero ,
referenced , writable , result ) ;
return 0 ;
}
@ -2576,7 +2580,7 @@ out_unmap:
collapse_huge_page ( mm , address , hpage , vma , node ) ;
}
out :
trace_mm_khugepaged_scan_pmd ( mm , page_to_pfn ( page ) , writable , referenced ,
trace_mm_khugepaged_scan_pmd ( mm , page , writable , referenced ,
none_or_zero , result ) ;
return ret ;
}