@ -589,6 +589,20 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
return false ;
}
static bool handle_mmio_sgi_clear ( struct kvm_vcpu * vcpu ,
struct kvm_exit_mmio * mmio ,
phys_addr_t offset )
{
return false ;
}
static bool handle_mmio_sgi_set ( struct kvm_vcpu * vcpu ,
struct kvm_exit_mmio * mmio ,
phys_addr_t offset )
{
return false ;
}
/*
* I would have liked to use the kvm_bus_io_ * ( ) API instead , but it
* cannot cope with banked registers ( only the VM pointer is passed
@ -663,6 +677,16 @@ static const struct mmio_range vgic_dist_ranges[] = {
. len = 4 ,
. handle_mmio = handle_mmio_sgi_reg ,
} ,
{
. base = GIC_DIST_SGI_PENDING_CLEAR ,
. len = VGIC_NR_SGIS ,
. handle_mmio = handle_mmio_sgi_clear ,
} ,
{
. base = GIC_DIST_SGI_PENDING_SET ,
. len = VGIC_NR_SGIS ,
. handle_mmio = handle_mmio_sgi_set ,
} ,
{ }
} ;
@ -1557,6 +1581,114 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
return r ;
}
static bool handle_cpu_mmio_misc ( struct kvm_vcpu * vcpu ,
struct kvm_exit_mmio * mmio , phys_addr_t offset )
{
return true ;
}
static const struct mmio_range vgic_cpu_ranges [ ] = {
{
. base = GIC_CPU_CTRL ,
. len = 12 ,
. handle_mmio = handle_cpu_mmio_misc ,
} ,
{
. base = GIC_CPU_ALIAS_BINPOINT ,
. len = 4 ,
. handle_mmio = handle_cpu_mmio_misc ,
} ,
{
. base = GIC_CPU_ACTIVEPRIO ,
. len = 16 ,
. handle_mmio = handle_cpu_mmio_misc ,
} ,
{
. base = GIC_CPU_IDENT ,
. len = 4 ,
. handle_mmio = handle_cpu_mmio_misc ,
} ,
} ;
static int vgic_attr_regs_access ( struct kvm_device * dev ,
struct kvm_device_attr * attr ,
u32 * reg , bool is_write )
{
const struct mmio_range * r = NULL , * ranges ;
phys_addr_t offset ;
int ret , cpuid , c ;
struct kvm_vcpu * vcpu , * tmp_vcpu ;
struct vgic_dist * vgic ;
struct kvm_exit_mmio mmio ;
offset = attr - > attr & KVM_DEV_ARM_VGIC_OFFSET_MASK ;
cpuid = ( attr - > attr & KVM_DEV_ARM_VGIC_CPUID_MASK ) > >
KVM_DEV_ARM_VGIC_CPUID_SHIFT ;
mutex_lock ( & dev - > kvm - > lock ) ;
if ( cpuid > = atomic_read ( & dev - > kvm - > online_vcpus ) ) {
ret = - EINVAL ;
goto out ;
}
vcpu = kvm_get_vcpu ( dev - > kvm , cpuid ) ;
vgic = & dev - > kvm - > arch . vgic ;
mmio . len = 4 ;
mmio . is_write = is_write ;
if ( is_write )
mmio_data_write ( & mmio , ~ 0 , * reg ) ;
switch ( attr - > group ) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS :
mmio . phys_addr = vgic - > vgic_dist_base + offset ;
ranges = vgic_dist_ranges ;
break ;
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS :
mmio . phys_addr = vgic - > vgic_cpu_base + offset ;
ranges = vgic_cpu_ranges ;
break ;
default :
BUG ( ) ;
}
r = find_matching_range ( ranges , & mmio , offset ) ;
if ( unlikely ( ! r | | ! r - > handle_mmio ) ) {
ret = - ENXIO ;
goto out ;
}
spin_lock ( & vgic - > lock ) ;
/*
* Ensure that no other VCPU is running by checking the vcpu - > cpu
* field . If no other VPCUs are running we can safely access the VGIC
* state , because even if another VPU is run after this point , that
* VCPU will not touch the vgic state , because it will block on
* getting the vgic - > lock in kvm_vgic_sync_hwstate ( ) .
*/
kvm_for_each_vcpu ( c , tmp_vcpu , dev - > kvm ) {
if ( unlikely ( tmp_vcpu - > cpu ! = - 1 ) ) {
ret = - EBUSY ;
goto out_vgic_unlock ;
}
}
offset - = r - > base ;
r - > handle_mmio ( vcpu , & mmio , offset ) ;
if ( ! is_write )
* reg = mmio_data_read ( & mmio , ~ 0 ) ;
ret = 0 ;
out_vgic_unlock :
spin_unlock ( & vgic - > lock ) ;
out :
mutex_unlock ( & dev - > kvm - > lock ) ;
return ret ;
}
static int vgic_set_attr ( struct kvm_device * dev , struct kvm_device_attr * attr )
{
int r ;
@ -1573,6 +1705,18 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
r = kvm_vgic_addr ( dev - > kvm , type , & addr , true ) ;
return ( r = = - ENODEV ) ? - ENXIO : r ;
}
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS :
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS : {
u32 __user * uaddr = ( u32 __user * ) ( long ) attr - > addr ;
u32 reg ;
if ( get_user ( reg , uaddr ) )
return - EFAULT ;
return vgic_attr_regs_access ( dev , attr , & reg , true ) ;
}
}
return - ENXIO ;
@ -1594,14 +1738,42 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
if ( copy_to_user ( uaddr , & addr , sizeof ( addr ) ) )
return - EFAULT ;
break ;
}
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS :
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS : {
u32 __user * uaddr = ( u32 __user * ) ( long ) attr - > addr ;
u32 reg = 0 ;
r = vgic_attr_regs_access ( dev , attr , & reg , false ) ;
if ( r )
return r ;
r = put_user ( reg , uaddr ) ;
break ;
}
}
return r ;
}
static int vgic_has_attr_regs ( const struct mmio_range * ranges ,
phys_addr_t offset )
{
struct kvm_exit_mmio dev_attr_mmio ;
dev_attr_mmio . len = 4 ;
if ( find_matching_range ( ranges , & dev_attr_mmio , offset ) )
return 0 ;
else
return - ENXIO ;
}
static int vgic_has_attr ( struct kvm_device * dev , struct kvm_device_attr * attr )
{
phys_addr_t offset ;
switch ( attr - > group ) {
case KVM_DEV_ARM_VGIC_GRP_ADDR :
switch ( attr - > attr ) {
@ -1610,6 +1782,12 @@ static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return 0 ;
}
break ;
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS :
offset = attr - > attr & KVM_DEV_ARM_VGIC_OFFSET_MASK ;
return vgic_has_attr_regs ( vgic_dist_ranges , offset ) ;
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS :
offset = attr - > attr & KVM_DEV_ARM_VGIC_OFFSET_MASK ;
return vgic_has_attr_regs ( vgic_cpu_ranges , offset ) ;
}
return - ENXIO ;
}