@ -1227,7 +1227,12 @@ out:
return retval ;
}
/*
/**
* vm_insert_page - insert single page into user vma
* @ vma : user vma to map to
* @ addr : target user address of this page
* @ page : source kernel page
*
* This allows drivers to insert individual pages they ' ve allocated
* into a user vma .
*
@ -1319,7 +1324,16 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
return 0 ;
}
/* Note: this is only safe if the mm semaphore is held when called. */
/**
* remap_pfn_range - remap kernel memory to userspace
* @ vma : user vma to map to
* @ addr : target user address to start at
* @ pfn : physical address of kernel memory
* @ size : size of map area
* @ prot : page protection flags for this mapping
*
* Note : this is only safe if the mm semaphore is held when called .
*/
int remap_pfn_range ( struct vm_area_struct * vma , unsigned long addr ,
unsigned long pfn , unsigned long size , pgprot_t prot )
{
@ -1801,9 +1815,10 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL ( unmap_mapping_range ) ;
/*
* Handle all mappings that got truncated by a " truncate() "
* system call .
/**
* vmtruncate - unmap mappings " freed " by truncate ( ) syscall
* @ inode : inode of the file used
* @ offset : file offset to start truncating
*
* NOTE ! We have to be ready to update the memory sharing
* between the file and the memory map for a potential last
@ -1872,11 +1887,16 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
}
EXPORT_UNUSED_SYMBOL ( vmtruncate_range ) ; /* June 2006 */
/*
/**
* swapin_readahead - swap in pages in hope we need them soon
* @ entry : swap entry of this memory
* @ addr : address to start
* @ vma : user vma this addresses belong to
*
* Primitive swap readahead code . We simply read an aligned block of
* ( 1 < < page_cluster ) entries in the swap area . This method is chosen
* because it doesn ' t cost us any seek time . We also make sure to queue
* the ' original ' request together with the readahead ones . . .
* the ' original ' request together with the readahead ones . . .
*
* This has been extended to use the NUMA policies from the mm triggering
* the readahead .