@ -1449,6 +1449,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
pr_unimpl ( vcpu , " unimplemented perfctr wrmsr: "
pr_unimpl ( vcpu , " unimplemented perfctr wrmsr: "
" 0x%x data 0x%llx \n " , msr , data ) ;
" 0x%x data 0x%llx \n " , msr , data ) ;
break ;
break ;
case MSR_K7_CLK_CTL :
/*
* Ignore all writes to this no longer documented MSR .
* Writes are only relevant for old K7 processors ,
* all pre - dating SVM , but a recommended workaround from
* AMD for these chips . It is possible to speicify the
* affected processor models on the command line , hence
* the need to ignore the workaround .
*/
break ;
case HV_X64_MSR_GUEST_OS_ID . . . HV_X64_MSR_SINT15 :
case HV_X64_MSR_GUEST_OS_ID . . . HV_X64_MSR_SINT15 :
if ( kvm_hv_msr_partition_wide ( msr ) ) {
if ( kvm_hv_msr_partition_wide ( msr ) ) {
int r ;
int r ;
@ -1674,6 +1684,18 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MCG_STATUS :
case MSR_IA32_MCG_STATUS :
case MSR_IA32_MC0_CTL . . . MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1 :
case MSR_IA32_MC0_CTL . . . MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1 :
return get_msr_mce ( vcpu , msr , pdata ) ;
return get_msr_mce ( vcpu , msr , pdata ) ;
case MSR_K7_CLK_CTL :
/*
* Provide expected ramp - up count for K7 . All other
* are set to zero , indicating minimum divisors for
* every field .
*
* This prevents guest kernels on AMD host with CPU
* type 6 , model 8 and higher from exploding due to
* the rdmsr failing .
*/
data = 0x20000000 ;
break ;
case HV_X64_MSR_GUEST_OS_ID . . . HV_X64_MSR_SINT15 :
case HV_X64_MSR_GUEST_OS_ID . . . HV_X64_MSR_SINT15 :
if ( kvm_hv_msr_partition_wide ( msr ) ) {
if ( kvm_hv_msr_partition_wide ( msr ) ) {
int r ;
int r ;