@ -1487,10 +1487,12 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
/*
* If the new pfn is within the range of the newly allocated
* kernel pagetable , and it isn ' t being mapped into an
* early_ioremap fixmap slot , make sure it is RO .
* early_ioremap fixmap slot as a freshly allocated page , make sure
* it is RO .
*/
if ( ! is_early_ioremap_ptep ( ptep ) & &
pfn > = pgt_buf_start & & pfn < pgt_buf_end )
if ( ( ( ! is_early_ioremap_ptep ( ptep ) & &
pfn > = pgt_buf_start & & pfn < pgt_buf_end ) ) | |
( is_early_ioremap_ptep ( ptep ) & & pfn ! = ( pgt_buf_end - 1 ) ) )
pte = pte_wrprotect ( pte ) ;
return pte ;
@ -1700,9 +1702,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for ( pteidx = 0 ; pteidx < PTRS_PER_PTE ; pteidx + + , pfn + + ) {
pte_t pte ;
if ( pfn > max_pfn_mapped )
max_pfn_mapped = pfn ;
if ( ! pte_none ( pte_page [ pteidx ] ) )
continue ;
@ -1760,6 +1759,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
pud_t * l3 ;
pmd_t * l2 ;
/* max_pfn_mapped is the last pfn mapped in the initial memory
* mappings . Considering that on Xen after the kernel mappings we
* have the mappings of some pages that don ' t exist in pfn space , we
* set max_pfn_mapped to the last real pfn mapped . */
max_pfn_mapped = PFN_DOWN ( __pa ( xen_start_info - > mfn_list ) ) ;
/* Zap identity mapping */
init_level4_pgt [ 0 ] = __pgd ( 0 ) ;
@ -1864,9 +1869,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
initial_kernel_pmd =
extend_brk ( sizeof ( pmd_t ) * PTRS_PER_PMD , PAGE_SIZE ) ;
max_pfn_mapped = PFN_DOWN ( __pa ( xen_start_info - > pt_base ) +
xen_start_info - > nr_pt_frames * PAGE_SIZE +
512 * 1024 ) ;
max_pfn_mapped = PFN_DOWN ( __pa ( xen_start_info - > mfn_list ) ) ;
kernel_pmd = m2v ( pgd [ KERNEL_PGD_BOUNDARY ] . pgd ) ;
memcpy ( initial_kernel_pmd , kernel_pmd , sizeof ( pmd_t ) * PTRS_PER_PMD ) ;