@ -502,6 +502,106 @@ static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
return ret ;
}
static int kvm_s390_set_processor ( struct kvm * kvm , struct kvm_device_attr * attr )
{
struct kvm_s390_vm_cpu_processor * proc ;
int ret = 0 ;
mutex_lock ( & kvm - > lock ) ;
if ( atomic_read ( & kvm - > online_vcpus ) ) {
ret = - EBUSY ;
goto out ;
}
proc = kzalloc ( sizeof ( * proc ) , GFP_KERNEL ) ;
if ( ! proc ) {
ret = - ENOMEM ;
goto out ;
}
if ( ! copy_from_user ( proc , ( void __user * ) attr - > addr ,
sizeof ( * proc ) ) ) {
memcpy ( & kvm - > arch . model . cpu_id , & proc - > cpuid ,
sizeof ( struct cpuid ) ) ;
kvm - > arch . model . ibc = proc - > ibc ;
memcpy ( kvm - > arch . model . fac - > kvm , proc - > fac_list ,
S390_ARCH_FAC_LIST_SIZE_BYTE ) ;
} else
ret = - EFAULT ;
kfree ( proc ) ;
out :
mutex_unlock ( & kvm - > lock ) ;
return ret ;
}
static int kvm_s390_set_cpu_model ( struct kvm * kvm , struct kvm_device_attr * attr )
{
int ret = - ENXIO ;
switch ( attr - > attr ) {
case KVM_S390_VM_CPU_PROCESSOR :
ret = kvm_s390_set_processor ( kvm , attr ) ;
break ;
}
return ret ;
}
static int kvm_s390_get_processor ( struct kvm * kvm , struct kvm_device_attr * attr )
{
struct kvm_s390_vm_cpu_processor * proc ;
int ret = 0 ;
proc = kzalloc ( sizeof ( * proc ) , GFP_KERNEL ) ;
if ( ! proc ) {
ret = - ENOMEM ;
goto out ;
}
memcpy ( & proc - > cpuid , & kvm - > arch . model . cpu_id , sizeof ( struct cpuid ) ) ;
proc - > ibc = kvm - > arch . model . ibc ;
memcpy ( & proc - > fac_list , kvm - > arch . model . fac - > kvm , S390_ARCH_FAC_LIST_SIZE_BYTE ) ;
if ( copy_to_user ( ( void __user * ) attr - > addr , proc , sizeof ( * proc ) ) )
ret = - EFAULT ;
kfree ( proc ) ;
out :
return ret ;
}
static int kvm_s390_get_machine ( struct kvm * kvm , struct kvm_device_attr * attr )
{
struct kvm_s390_vm_cpu_machine * mach ;
int ret = 0 ;
mach = kzalloc ( sizeof ( * mach ) , GFP_KERNEL ) ;
if ( ! mach ) {
ret = - ENOMEM ;
goto out ;
}
get_cpu_id ( ( struct cpuid * ) & mach - > cpuid ) ;
mach - > ibc = sclp_get_ibc ( ) ;
memcpy ( & mach - > fac_mask , kvm_s390_fac_list_mask ,
kvm_s390_fac_list_mask_size ( ) * sizeof ( u64 ) ) ;
memcpy ( ( unsigned long * ) & mach - > fac_list , S390_lowcore . stfle_fac_list ,
S390_ARCH_FAC_LIST_SIZE_U64 ) ;
if ( copy_to_user ( ( void __user * ) attr - > addr , mach , sizeof ( * mach ) ) )
ret = - EFAULT ;
kfree ( mach ) ;
out :
return ret ;
}
static int kvm_s390_get_cpu_model ( struct kvm * kvm , struct kvm_device_attr * attr )
{
int ret = - ENXIO ;
switch ( attr - > attr ) {
case KVM_S390_VM_CPU_PROCESSOR :
ret = kvm_s390_get_processor ( kvm , attr ) ;
break ;
case KVM_S390_VM_CPU_MACHINE :
ret = kvm_s390_get_machine ( kvm , attr ) ;
break ;
}
return ret ;
}
static int kvm_s390_vm_set_attr ( struct kvm * kvm , struct kvm_device_attr * attr )
{
int ret ;
@ -513,6 +613,9 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_TOD :
ret = kvm_s390_set_tod ( kvm , attr ) ;
break ;
case KVM_S390_VM_CPU_MODEL :
ret = kvm_s390_set_cpu_model ( kvm , attr ) ;
break ;
case KVM_S390_VM_CRYPTO :
ret = kvm_s390_vm_set_crypto ( kvm , attr ) ;
break ;
@ -535,6 +638,9 @@ static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_TOD :
ret = kvm_s390_get_tod ( kvm , attr ) ;
break ;
case KVM_S390_VM_CPU_MODEL :
ret = kvm_s390_get_cpu_model ( kvm , attr ) ;
break ;
default :
ret = - ENXIO ;
break ;
@ -571,6 +677,17 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
break ;
}
break ;
case KVM_S390_VM_CPU_MODEL :
switch ( attr - > attr ) {
case KVM_S390_VM_CPU_PROCESSOR :
case KVM_S390_VM_CPU_MACHINE :
ret = 0 ;
break ;
default :
ret = - ENXIO ;
break ;
}
break ;
case KVM_S390_VM_CRYPTO :
switch ( attr - > attr ) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW :
@ -781,6 +898,17 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
memcpy ( kvm - > arch . model . fac - > kvm , S390_lowcore . stfle_fac_list ,
S390_ARCH_FAC_LIST_SIZE_U64 ) ;
/*
* If this KVM host runs * not * in a LPAR , relax the facility bits
* of the kvm facility mask by all missing facilities . This will allow
* to determine the right CPU model by means of the remaining facilities .
* Live guest migration must prohibit the migration of KVMs running in
* a LPAR to non LPAR hosts .
*/
if ( ! MACHINE_IS_LPAR )
for ( i = 0 ; i < kvm_s390_fac_list_mask_size ( ) ; i + + )
kvm_s390_fac_list_mask [ i ] & = kvm - > arch . model . fac - > kvm [ i ] ;
/*
* Apply the kvm facility mask to limit the kvm supported / tolerated
* facility list .
@ -793,6 +921,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
}
kvm_s390_get_cpu_id ( & kvm - > arch . model . cpu_id ) ;
kvm - > arch . model . ibc = sclp_get_ibc ( ) & 0x0fff ;
if ( kvm_s390_crypto_init ( kvm ) < 0 )
goto out_crypto ;
@ -1034,9 +1163,12 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init ( & vcpu - > arch . ckc_timer , CLOCK_MONOTONIC , HRTIMER_MODE_REL ) ;
vcpu - > arch . ckc_timer . function = kvm_s390_idle_wakeup ;
mutex_lock ( & vcpu - > kvm - > lock ) ;
vcpu - > arch . cpu_id = vcpu - > kvm - > arch . model . cpu_id ;
memcpy ( vcpu - > kvm - > arch . model . fac - > sie , vcpu - > kvm - > arch . model . fac - > kvm ,
S390_ARCH_FAC_LIST_SIZE_BYTE ) ;
vcpu - > arch . sie_block - > ibc = vcpu - > kvm - > arch . model . ibc ;
mutex_unlock ( & vcpu - > kvm - > lock ) ;
kvm_s390_vcpu_crypto_setup ( vcpu ) ;