@ -30,55 +30,28 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
unsigned long pgoff ,
unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
unsigned long task_size = TASK_SIZE ;
unsigned long start_addr ;
struct vm_unmapped_area_info info ;
if ( test_thread_flag ( TIF_32BIT ) )
task_size = STACK_TOP32 ;
if ( unlikely ( len > = VA_EXCLUDE_START ) )
return - ENOMEM ;
if ( len > mm - > cached_hole_size ) {
start_addr = addr = mm - > free_area_cache ;
} else {
start_addr = addr = TASK_UNMAPPED_BASE ;
mm - > cached_hole_size = 0 ;
info . flags = 0 ;
info . length = len ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = min ( task_size , VA_EXCLUDE_START ) ;
info . align_mask = PAGE_MASK & ~ HPAGE_MASK ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
if ( ( addr & ~ PAGE_MASK ) & & task_size > VA_EXCLUDE_END ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . low_limit = VA_EXCLUDE_END ;
info . high_limit = task_size ;
addr = vm_unmapped_area ( & info ) ;
}
task_size - = len ;
full_search :
addr = ALIGN ( addr , HPAGE_SIZE ) ;
for ( vma = find_vma ( mm , addr ) ; ; vma = vma - > vm_next ) {
/* At this point: (!vma || addr < vma->vm_end). */
if ( addr < VA_EXCLUDE_START & &
( addr + len ) > = VA_EXCLUDE_START ) {
addr = VA_EXCLUDE_END ;
vma = find_vma ( mm , VA_EXCLUDE_END ) ;
}
if ( unlikely ( task_size < addr ) ) {
if ( start_addr ! = TASK_UNMAPPED_BASE ) {
start_addr = addr = TASK_UNMAPPED_BASE ;
mm - > cached_hole_size = 0 ;
goto full_search ;
}
return - ENOMEM ;
}
if ( likely ( ! vma | | addr + len < = vma - > vm_start ) ) {
/*
* Remember the place where we stopped the search :
*/
mm - > free_area_cache = addr + len ;
return addr ;
}
if ( addr + mm - > cached_hole_size < vma - > vm_start )
mm - > cached_hole_size = vma - > vm_start - addr ;
addr = ALIGN ( vma - > vm_end , HPAGE_SIZE ) ;
}
return addr ;
}
static unsigned long
@ -87,71 +60,34 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long pgoff ,
const unsigned long flags )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
struct vm_unmapped_area_info info ;
/* This should only ever run for 32-bit processes. */
BUG_ON ( ! test_thread_flag ( TIF_32BIT ) ) ;
/* check if free_area_cache is useful for us */
if ( len < = mm - > cached_hole_size ) {
mm - > cached_hole_size = 0 ;
mm - > free_area_cache = mm - > mmap_base ;
}
/* either no address requested or can't fit in requested address hole */
addr = mm - > free_area_cache & HPAGE_MASK ;
/* make sure it can fit in the remaining address space */
if ( likely ( addr > len ) ) {
vma = find_vma ( mm , addr - len ) ;
if ( ! vma | | addr < = vma - > vm_start ) {
/* remember the address as a hint for next time */
return ( mm - > free_area_cache = addr - len ) ;
}
}
if ( unlikely ( mm - > mmap_base < len ) )
goto bottomup ;
addr = ( mm - > mmap_base - len ) & HPAGE_MASK ;
do {
/*
* Lookup failure means no vma is above this address ,
* else if new region fits below vma - > vm_start ,
* return with success :
*/
vma = find_vma ( mm , addr ) ;
if ( likely ( ! vma | | addr + len < = vma - > vm_start ) ) {
/* remember the address as a hint for next time */
return ( mm - > free_area_cache = addr ) ;
}
/* remember the largest hole we saw so far */
if ( addr + mm - > cached_hole_size < vma - > vm_start )
mm - > cached_hole_size = vma - > vm_start - addr ;
/* try just below the current vma->vm_start */
addr = ( vma - > vm_start - len ) & HPAGE_MASK ;
} while ( likely ( len < vma - > vm_start ) ) ;
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = PAGE_SIZE ;
info . high_limit = mm - > mmap_base ;
info . align_mask = PAGE_MASK & ~ HPAGE_MASK ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
bottomup :
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
mm - > cached_hole_size = ~ 0UL ;
mm - > free_area_cache = TASK_UNMAPPED_BASE ;
addr = arch_get_unmapped_area ( filp , addr0 , len , pgoff , flags ) ;
/*
* Restore the topdown base :
*/
mm - > free_area_cache = mm - > mmap_base ;
mm - > cached_hole_size = ~ 0UL ;
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = STACK_TOP32 ;
addr = vm_unmapped_area ( & info ) ;
}
return addr ;
}