@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
unsigned long pgoff , unsigned long flags )
{
long map_shared = ( flags & MAP_SHARED ) ;
unsigned long start_addr , align_mask = PAGE_SIZE - 1 ;
unsigned long align_mask = 0 ;
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
struct vm_unmapped_area_info info ;
if ( len > RGN_MAP_LIMIT )
return - ENOMEM ;
@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
addr = 0 ;
# endif
if ( ! addr )
addr = mm - > free_area_cache ;
addr = TASK_UNMAPPED_BASE ;
if ( map_shared & & ( TASK_SIZE > 0xfffffffful ) )
/*
@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
* tasks , we prefer to avoid exhausting the address space too quickly by
* limiting alignment to a single page .
*/
align_mask = SHMLBA - 1 ;
full_search :
start_addr = addr = ( addr + align_mask ) & ~ align_mask ;
for ( vma = find_vma ( mm , addr ) ; ; vma = vma - > vm_next ) {
/* At this point: (!vma || addr < vma->vm_end). */
if ( TASK_SIZE - len < addr | | RGN_MAP_LIMIT - len < REGION_OFFSET ( addr ) ) {
if ( start_addr ! = TASK_UNMAPPED_BASE ) {
/* Start a new search --- just in case we missed some holes. */
addr = TASK_UNMAPPED_BASE ;
goto full_search ;
}
return - ENOMEM ;
}
if ( ! vma | | addr + len < = vma - > vm_start ) {
/* Remember the address where we stopped this search: */
mm - > free_area_cache = addr + len ;
return addr ;
}
addr = ( vma - > vm_end + align_mask ) & ~ align_mask ;
}
align_mask = PAGE_MASK & ( SHMLBA - 1 ) ;
info . flags = 0 ;
info . length = len ;
info . low_limit = addr ;
info . high_limit = TASK_SIZE ;
info . align_mask = align_mask ;
info . align_offset = 0 ;
return vm_unmapped_area ( & info ) ;
}
asmlinkage long