@ -14,21 +14,8 @@
* case the overhead consists of a few additional pages that are
* allocated to create a view of memory for vmemmap .
*
* Special Kconfig settings :
*
* CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP
*
* The architecture has its own functions to populate the memory
* map and provides a vmemmap_populate function .
*
* CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD
*
* The architecture provides functions to populate the pmd level
* of the vmemmap mappings . Allowing mappings using large pages
* where available .
*
* If neither are set then PAGE_SIZE mappings are generated which
* require one PTE / TLB per PAGE_SIZE chunk of the virtual memory map .
* The architecture is expected to provide a vmemmap_populate ( ) function
* to instantiate the mapping .
*/
# include <linux/mm.h>
# include <linux/mmzone.h>
@ -60,7 +47,6 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
__pa ( MAX_DMA_ADDRESS ) ) ;
}
# ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP
void __meminit vmemmap_verify ( pte_t * pte , int node ,
unsigned long start , unsigned long end )
{
@ -72,103 +58,84 @@ void __meminit vmemmap_verify(pte_t *pte, int node,
" page_structs \n " , start , end - 1 ) ;
}
# ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD
static int __meminit vmemmap_populate_pte ( pmd_t * pmd , unsigned long addr ,
unsigned long end , int node )
pte_t * __meminit vmemmap_pte_populate ( pmd_t * pmd , unsigned long addr , int node )
{
pte_t * pte ;
for ( pte = pte_offset_kernel ( pmd , addr ) ; addr < end ;
pte + + , addr + = PAGE_SIZE )
if ( pte_none ( * pte ) ) {
pte_t entry ;
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return - ENOMEM ;
entry = pfn_pte ( __pa ( p ) > > PAGE_SHIFT , PAGE_KERNEL ) ;
set_pte ( pte , entry ) ;
} else
vmemmap_verify ( pte , node , addr + PAGE_SIZE , end ) ;
return 0 ;
pte_t * pte = pte_offset_kernel ( pmd , addr ) ;
if ( pte_none ( * pte ) ) {
pte_t entry ;
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
entry = pfn_pte ( __pa ( p ) > > PAGE_SHIFT , PAGE_KERNEL ) ;
set_pte_at ( & init_mm , addr , pte , entry ) ;
}
return pte ;
}
int __meminit vmemmap_populate_pmd ( pud_t * pud , unsigned long addr ,
unsigned long end , int node )
pmd_t * __meminit vmemmap_pmd_populate ( pud_t * pud , unsigned long addr , int node )
{
pmd_t * pmd ;
int error = 0 ;
unsigned long next ;
for ( pmd = pmd_offset ( pud , addr ) ; addr < end & & ! error ;
pmd + + , addr = next ) {
if ( pmd_none ( * pmd ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return - ENOMEM ;
pmd_populate_kernel ( & init_mm , pmd , p ) ;
} else
vmemmap_verify ( ( pte_t * ) pmd , node ,
pmd_addr_end ( addr , end ) , end ) ;
next = pmd_addr_end ( addr , end ) ;
error = vmemmap_populate_pte ( pmd , addr , next , node ) ;
pmd_t * pmd = pmd_offset ( pud , addr ) ;
if ( pmd_none ( * pmd ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
pmd_populate_kernel ( & init_mm , pmd , p ) ;
}
return error ;
return pmd ;
}
# endif /* CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD */
static int __meminit vmemmap_populate_pud ( pgd_t * pgd , unsigned long addr ,
unsigned long end , int node )
pud_t * __meminit vmemmap_pud_populate ( pgd_t * pgd , unsigned long addr , int node )
{
pud_t * pud ;
int error = 0 ;
unsigned long next ;
for ( pud = pud_offset ( pgd , addr ) ; addr < end & & ! error ;
pud + + , addr = next ) {
if ( pud_none ( * pud ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return - ENOMEM ;
pud_t * pud = pud_offset ( pgd , addr ) ;
if ( pud_none ( * pud ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
pud_populate ( & init_mm , pud , p ) ;
}
return pud ;
}
pud_populate ( & init_mm , pud , p ) ;
}
next = pud_addr_end ( addr , end ) ;
error = vmemmap_populate_pmd ( pud , addr , next , node ) ;
pgd_t * __meminit vmemmap_pgd_populate ( unsigned long addr , int node )
{
pgd_t * pgd = pgd_offset_k ( addr ) ;
if ( pgd_none ( * pgd ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
pgd_populate ( & init_mm , pgd , p ) ;
}
return error ;
return pgd ;
}
int __meminit vmemmap_populate ( struct page * start_page ,
unsigned long nr , int node )
int __meminit vmemmap_populate_basepages ( struct page * start_page ,
unsigned long size , int node )
{
pgd_t * pgd ;
unsigned long addr = ( unsigned long ) start_page ;
unsigned long end = ( unsigned long ) ( start_page + nr ) ;
unsigned long next ;
int error = 0 ;
printk ( KERN_DEBUG " [%lx-%lx] Virtual memory section "
" (%ld pages) node %d \n " , addr , end - 1 , nr , node ) ;
for ( pgd = pgd_offset_k ( addr ) ; addr < end & & ! error ;
pgd + + , addr = next ) {
if ( pgd_none ( * pgd ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return - ENOMEM ;
unsigned long end = ( unsigned long ) ( start_page + size ) ;
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte ;
pgd_populate ( & init_mm , pgd , p ) ;
}
next = pgd_addr_end ( addr , end ) ;
error = vmemmap_populate_pud ( pgd , addr , next , node ) ;
for ( ; addr < end ; addr + = PAGE_SIZE ) {
pgd = vmemmap_pgd_populate ( addr , node ) ;
if ( ! pgd )
return - ENOMEM ;
pud = vmemmap_pud_populate ( pgd , addr , node ) ;
if ( ! pud )
return - ENOMEM ;
pmd = vmemmap_pmd_populate ( pud , addr , node ) ;
if ( ! pmd )
return - ENOMEM ;
pte = vmemmap_pte_populate ( pmd , addr , node ) ;
if ( ! pte )
return - ENOMEM ;
vmemmap_verify ( pte , node , addr , addr + PAGE_SIZE ) ;
}
return error ;
return 0 ;
}
# endif /* !CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP */
struct page __init * sparse_early_mem_map_populate ( unsigned long pnum , int nid )
{