@ -143,7 +143,7 @@ module_param(oos_shadow, bool, 0644);
# define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
struct kvm_rmap_desc {
u64 * shadow_ ptes [ RMAP_EXT ] ;
u64 * sptes [ RMAP_EXT ] ;
struct kvm_rmap_desc * more ;
} ;
@ -262,7 +262,7 @@ static gfn_t pse36_gfn_delta(u32 gpte)
return ( gpte & PT32_DIR_PSE36_MASK ) < < shift ;
}
static void set_shadow_ pte ( u64 * sptep , u64 spte )
static void __ set_spte( u64 * sptep , u64 spte )
{
# ifdef CONFIG_X86_64
set_64bit ( ( unsigned long * ) sptep , spte ) ;
@ -514,23 +514,23 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
} else if ( ! ( * rmapp & 1 ) ) {
rmap_printk ( " rmap_add: %p %llx 1->many \n " , spte , * spte ) ;
desc = mmu_alloc_rmap_desc ( vcpu ) ;
desc - > shadow_ ptes [ 0 ] = ( u64 * ) * rmapp ;
desc - > shadow_ ptes [ 1 ] = spte ;
desc - > sptes [ 0 ] = ( u64 * ) * rmapp ;
desc - > sptes [ 1 ] = spte ;
* rmapp = ( unsigned long ) desc | 1 ;
} else {
rmap_printk ( " rmap_add: %p %llx many->many \n " , spte , * spte ) ;
desc = ( struct kvm_rmap_desc * ) ( * rmapp & ~ 1ul ) ;
while ( desc - > shadow_ ptes [ RMAP_EXT - 1 ] & & desc - > more ) {
while ( desc - > sptes [ RMAP_EXT - 1 ] & & desc - > more ) {
desc = desc - > more ;
count + = RMAP_EXT ;
}
if ( desc - > shadow_ ptes [ RMAP_EXT - 1 ] ) {
if ( desc - > sptes [ RMAP_EXT - 1 ] ) {
desc - > more = mmu_alloc_rmap_desc ( vcpu ) ;
desc = desc - > more ;
}
for ( i = 0 ; desc - > shadow_ ptes [ i ] ; + + i )
for ( i = 0 ; desc - > sptes [ i ] ; + + i )
;
desc - > shadow_ ptes [ i ] = spte ;
desc - > sptes [ i ] = spte ;
}
return count ;
}
@ -542,14 +542,14 @@ static void rmap_desc_remove_entry(unsigned long *rmapp,
{
int j ;
for ( j = RMAP_EXT - 1 ; ! desc - > shadow_ ptes [ j ] & & j > i ; - - j )
for ( j = RMAP_EXT - 1 ; ! desc - > sptes [ j ] & & j > i ; - - j )
;
desc - > shadow_ ptes [ i ] = desc - > shadow_ ptes [ j ] ;
desc - > shadow_ ptes [ j ] = NULL ;
desc - > sptes [ i ] = desc - > sptes [ j ] ;
desc - > sptes [ j ] = NULL ;
if ( j ! = 0 )
return ;
if ( ! prev_desc & & ! desc - > more )
* rmapp = ( unsigned long ) desc - > shadow_ ptes [ 0 ] ;
* rmapp = ( unsigned long ) desc - > sptes [ 0 ] ;
else
if ( prev_desc )
prev_desc - > more = desc - > more ;
@ -594,8 +594,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
desc = ( struct kvm_rmap_desc * ) ( * rmapp & ~ 1ul ) ;
prev_desc = NULL ;
while ( desc ) {
for ( i = 0 ; i < RMAP_EXT & & desc - > shadow_ ptes [ i ] ; + + i )
if ( desc - > shadow_ ptes [ i ] = = spte ) {
for ( i = 0 ; i < RMAP_EXT & & desc - > sptes [ i ] ; + + i )
if ( desc - > sptes [ i ] = = spte ) {
rmap_desc_remove_entry ( rmapp ,
desc , i ,
prev_desc ) ;
@ -626,10 +626,10 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
prev_desc = NULL ;
prev_spte = NULL ;
while ( desc ) {
for ( i = 0 ; i < RMAP_EXT & & desc - > shadow_ ptes [ i ] ; + + i ) {
for ( i = 0 ; i < RMAP_EXT & & desc - > sptes [ i ] ; + + i ) {
if ( prev_spte = = spte )
return desc - > shadow_ ptes [ i ] ;
prev_spte = desc - > shadow_ ptes [ i ] ;
return desc - > sptes [ i ] ;
prev_spte = desc - > sptes [ i ] ;
}
desc = desc - > more ;
}
@ -651,7 +651,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON ( ! ( * spte & PT_PRESENT_MASK ) ) ;
rmap_printk ( " rmap_write_protect: spte %p %llx \n " , spte , * spte ) ;
if ( is_writeble_pte ( * spte ) ) {
set_shadow_ pte ( spte , * spte & ~ PT_WRITABLE_MASK ) ;
__ set_spte( spte , * spte & ~ PT_WRITABLE_MASK ) ;
write_protected = 1 ;
}
spte = rmap_next ( kvm , rmapp , spte ) ;
@ -675,7 +675,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
if ( is_writeble_pte ( * spte ) ) {
rmap_remove ( kvm , spte ) ;
- - kvm - > stat . lpages ;
set_shadow_ pte ( spte , shadow_trap_nonpresent_pte ) ;
__ set_spte( spte , shadow_trap_nonpresent_pte ) ;
spte = NULL ;
write_protected = 1 ;
}
@ -694,7 +694,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
BUG_ON ( ! ( * spte & PT_PRESENT_MASK ) ) ;
rmap_printk ( " kvm_rmap_unmap_hva: spte %p %llx \n " , spte , * spte ) ;
rmap_remove ( kvm , spte ) ;
set_shadow_ pte ( spte , shadow_trap_nonpresent_pte ) ;
__ set_spte( spte , shadow_trap_nonpresent_pte ) ;
need_tlb_flush = 1 ;
}
return need_tlb_flush ;
@ -1369,7 +1369,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
}
BUG_ON ( ! parent_pte ) ;
kvm_mmu_put_page ( sp , parent_pte ) ;
set_shadow_ pte ( parent_pte , shadow_trap_nonpresent_pte ) ;
__ set_spte( parent_pte , shadow_trap_nonpresent_pte ) ;
}
}
@ -1517,7 +1517,7 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
for ( i = 0 ; i < PT64_ENT_PER_PAGE ; + + i ) {
if ( pt [ i ] = = shadow_notrap_nonpresent_pte )
set_shadow_ pte ( & pt [ i ] , shadow_trap_nonpresent_pte ) ;
__ set_spte( & pt [ i ] , shadow_trap_nonpresent_pte ) ;
}
}
@ -1683,7 +1683,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
return 0 ;
}
static int set_spte ( struct kvm_vcpu * vcpu , u64 * shadow_ pte ,
static int set_spte ( struct kvm_vcpu * vcpu , u64 * sptep ,
unsigned pte_access , int user_fault ,
int write_fault , int dirty , int largepage ,
gfn_t gfn , pfn_t pfn , bool speculative ,
@ -1733,7 +1733,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
* is responsibility of mmu_get_page / kvm_sync_page .
* Same reasoning can be applied to dirty page accounting .
*/
if ( ! can_unsync & & is_writeble_pte ( * shadow_ pte ) )
if ( ! can_unsync & & is_writeble_pte ( * sptep ) )
goto set_pte ;
if ( mmu_need_write_protect ( vcpu , gfn , can_unsync ) ) {
@ -1750,62 +1750,62 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
mark_page_dirty ( vcpu - > kvm , gfn ) ;
set_pte :
set_shadow_ pte ( shadow_ pte , spte ) ;
__ set_spte( sptep , spte ) ;
return ret ;
}
static void mmu_set_spte ( struct kvm_vcpu * vcpu , u64 * shadow_ pte ,
static void mmu_set_spte ( struct kvm_vcpu * vcpu , u64 * sptep ,
unsigned pt_access , unsigned pte_access ,
int user_fault , int write_fault , int dirty ,
int * ptwrite , int largepage , gfn_t gfn ,
pfn_t pfn , bool speculative )
{
int was_rmapped = 0 ;
int was_writeble = is_writeble_pte ( * shadow_ pte ) ;
int was_writeble = is_writeble_pte ( * sptep ) ;
int rmap_count ;
pgprintk ( " %s: spte %llx access %x write_fault %d "
" user_fault %d gfn %lx \n " ,
__func__ , * shadow_ pte , pt_access ,
__func__ , * sptep , pt_access ,
write_fault , user_fault , gfn ) ;
if ( is_rmap_spte ( * shadow_ pte ) ) {
if ( is_rmap_spte ( * sptep ) ) {
/*
* If we overwrite a PTE page pointer with a 2 MB PMD , unlink
* the parent of the now unreachable PTE .
*/
if ( largepage & & ! is_large_pte ( * shadow_ pte ) ) {
if ( largepage & & ! is_large_pte ( * sptep ) ) {
struct kvm_mmu_page * child ;
u64 pte = * shadow_ pte ;
u64 pte = * sptep ;
child = page_header ( pte & PT64_BASE_ADDR_MASK ) ;
mmu_page_remove_parent_pte ( child , shadow_ pte ) ;
} else if ( pfn ! = spte_to_pfn ( * shadow_ pte ) ) {
mmu_page_remove_parent_pte ( child , sptep ) ;
} else if ( pfn ! = spte_to_pfn ( * sptep ) ) {
pgprintk ( " hfn old %lx new %lx \n " ,
spte_to_pfn ( * shadow_ pte ) , pfn ) ;
rmap_remove ( vcpu - > kvm , shadow_ pte ) ;
spte_to_pfn ( * sptep ) , pfn ) ;
rmap_remove ( vcpu - > kvm , sptep ) ;
} else
was_rmapped = 1 ;
}
if ( set_spte ( vcpu , shadow_ pte , pte_access , user_fault , write_fault ,
if ( set_spte ( vcpu , sptep , pte_access , user_fault , write_fault ,
dirty , largepage , gfn , pfn , speculative , true ) ) {
if ( write_fault )
* ptwrite = 1 ;
kvm_x86_ops - > tlb_flush ( vcpu ) ;
}
pgprintk ( " %s: setting spte %llx \n " , __func__ , * shadow_ pte ) ;
pgprintk ( " %s: setting spte %llx \n " , __func__ , * sptep ) ;
pgprintk ( " instantiating %s PTE (%s) at %ld (%llx) addr %p \n " ,
is_large_pte ( * shadow_ pte ) ? " 2MB " : " 4kB " ,
is_present_pte ( * shadow_ pte ) ? " RW " : " R " , gfn ,
* shadow_pte , shadow_ pte ) ;
if ( ! was_rmapped & & is_large_pte ( * shadow_ pte ) )
is_large_pte ( * sptep ) ? " 2MB " : " 4kB " ,
is_present_pte ( * sptep ) ? " RW " : " R " , gfn ,
* shadow_pte , sptep ) ;
if ( ! was_rmapped & & is_large_pte ( * sptep ) )
+ + vcpu - > kvm - > stat . lpages ;
page_header_update_slot ( vcpu - > kvm , shadow_ pte , gfn ) ;
page_header_update_slot ( vcpu - > kvm , sptep , gfn ) ;
if ( ! was_rmapped ) {
rmap_count = rmap_add ( vcpu , shadow_ pte , gfn , largepage ) ;
if ( ! is_rmap_spte ( * shadow_ pte ) )
rmap_count = rmap_add ( vcpu , sptep , gfn , largepage ) ;
if ( ! is_rmap_spte ( * sptep ) )
kvm_release_pfn_clean ( pfn ) ;
if ( rmap_count > RMAP_RECYCLE_THRESHOLD )
rmap_recycle ( vcpu , gfn , largepage ) ;
@ -1816,7 +1816,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
kvm_release_pfn_clean ( pfn ) ;
}
if ( speculative ) {
vcpu - > arch . last_pte_updated = shadow_ pte ;
vcpu - > arch . last_pte_updated = sptep ;
vcpu - > arch . last_pte_gfn = gfn ;
}
}
@ -1854,10 +1854,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
return - ENOMEM ;
}
set_shadow_ pte ( iterator . sptep ,
__pa ( sp - > spt )
| PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask ) ;
__ set_spte( iterator . sptep ,
__pa ( sp - > spt )
| PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask ) ;
}
}
return pt_write ;
@ -2389,7 +2389,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
mmu_page_remove_parent_pte ( child , spte ) ;
}
}
set_shadow_ pte ( spte , shadow_trap_nonpresent_pte ) ;
__ set_spte( spte , shadow_trap_nonpresent_pte ) ;
if ( is_large_pte ( pte ) )
- - vcpu - > kvm - > stat . lpages ;
}
@ -3125,7 +3125,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
d = ( struct kvm_rmap_desc * ) ( * rmapp & ~ 1ul ) ;
while ( d ) {
for ( k = 0 ; k < RMAP_EXT ; + + k )
if ( d - > shadow_ ptes [ k ] )
if ( d - > sptes [ k ] )
+ + nmaps ;
else
break ;