@ -34,6 +34,7 @@
# include <linux/kvm_host.h>
# include <linux/slab.h>
# include <linux/workqueue.h>
# include "irq.h"
# include "i8254.h"
@ -244,11 +245,22 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
{
struct kvm_kpit_state * ps = container_of ( kian , struct kvm_kpit_state ,
irq_ack_notifier ) ;
raw_spin_lock ( & ps - > inject_lock ) ;
if ( atomic_dec_return ( & ps - > pit_timer . pending ) < 0 )
int value ;
spin_lock ( & ps - > inject_lock ) ;
value = atomic_dec_return ( & ps - > pit_timer . pending ) ;
if ( value < 0 )
/* spurious acks can be generated if, for example, the
* PIC is being reset . Handle it gracefully here
*/
atomic_inc ( & ps - > pit_timer . pending ) ;
else if ( value > 0 )
/* in this case, we had multiple outstanding pit interrupts
* that we needed to inject . Reinject
*/
queue_work ( ps - > pit - > wq , & ps - > pit - > expired ) ;
ps - > irq_ack = 1 ;
raw_spin_unlock ( & ps - > inject_lock ) ;
spin_unlock ( & ps - > inject_lock ) ;
}
void __kvm_migrate_pit_timer ( struct kvm_vcpu * vcpu )
@ -264,10 +276,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
hrtimer_start_expires ( timer , HRTIMER_MODE_ABS ) ;
}
static void destroy_pit_timer ( struct kvm_timer * p t )
static void destroy_pit_timer ( struct kvm_pit * pi t )
{
pr_debug ( " execute del timer! \n " ) ;
hrtimer_ cancel( & pt - > timer ) ;
hrtimer_cancel ( & pit - > pit_state . pit_timer . timer ) ;
cancel_work_sync ( & pi t - > expired ) ;
}
static bool kpit_is_periodic ( struct kvm_timer * ktimer )
@ -281,6 +293,60 @@ static struct kvm_timer_ops kpit_ops = {
. is_periodic = kpit_is_periodic ,
} ;
static void pit_do_work ( struct work_struct * work )
{
struct kvm_pit * pit = container_of ( work , struct kvm_pit , expired ) ;
struct kvm * kvm = pit - > kvm ;
struct kvm_vcpu * vcpu ;
int i ;
struct kvm_kpit_state * ps = & pit - > pit_state ;
int inject = 0 ;
/* Try to inject pending interrupts when
* last one has been acked .
*/
spin_lock ( & ps - > inject_lock ) ;
if ( ps - > irq_ack ) {
ps - > irq_ack = 0 ;
inject = 1 ;
}
spin_unlock ( & ps - > inject_lock ) ;
if ( inject ) {
kvm_set_irq ( kvm , kvm - > arch . vpit - > irq_source_id , 0 , 1 ) ;
kvm_set_irq ( kvm , kvm - > arch . vpit - > irq_source_id , 0 , 0 ) ;
/*
* Provides NMI watchdog support via Virtual Wire mode .
* The route is : PIT - > PIC - > LVT0 in NMI mode .
*
* Note : Our Virtual Wire implementation is simplified , only
* propagating PIT interrupts to all VCPUs when they have set
* LVT0 to NMI delivery . Other PIC interrupts are just sent to
* VCPU0 , and only if its LVT0 is in EXTINT mode .
*/
if ( kvm - > arch . vapics_in_nmi_mode > 0 )
kvm_for_each_vcpu ( i , vcpu , kvm )
kvm_apic_nmi_wd_deliver ( vcpu ) ;
}
}
static enum hrtimer_restart pit_timer_fn ( struct hrtimer * data )
{
struct kvm_timer * ktimer = container_of ( data , struct kvm_timer , timer ) ;
struct kvm_pit * pt = ktimer - > kvm - > arch . vpit ;
if ( ktimer - > reinject | | ! atomic_read ( & ktimer - > pending ) ) {
atomic_inc ( & ktimer - > pending ) ;
queue_work ( pt - > wq , & pt - > expired ) ;
}
if ( ktimer - > t_ops - > is_periodic ( ktimer ) ) {
hrtimer_add_expires_ns ( & ktimer - > timer , ktimer - > period ) ;
return HRTIMER_RESTART ;
} else
return HRTIMER_NORESTART ;
}
static void create_pit_timer ( struct kvm_kpit_state * ps , u32 val , int is_period )
{
struct kvm_timer * pt = & ps - > pit_timer ;
@ -292,13 +358,13 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
/* TODO The new value only affected after the retriggered */
hrtimer_cancel ( & pt - > timer ) ;
cancel_work_sync ( & ps - > pit - > expired ) ;
pt - > period = interval ;
ps - > is_periodic = is_period ;
pt - > timer . function = kvm _timer_fn;
pt - > timer . function = pit _timer_fn;
pt - > t_ops = & kpit_ops ;
pt - > kvm = ps - > pit - > kvm ;
pt - > vcpu = pt - > kvm - > bsp_vcpu ;
atomic_set ( & pt - > pending , 0 ) ;
ps - > irq_ack = 1 ;
@ -347,7 +413,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
}
break ;
default :
destroy_pit_timer ( & ps - > pit_timer ) ;
destroy_pit_timer ( kvm - > arch . v pit) ;
}
}
@ -626,7 +692,14 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
mutex_init ( & pit - > pit_state . lock ) ;
mutex_lock ( & pit - > pit_state . lock ) ;
raw_spin_lock_init ( & pit - > pit_state . inject_lock ) ;
spin_lock_init ( & pit - > pit_state . inject_lock ) ;
pit - > wq = create_singlethread_workqueue ( " kvm-pit-wq " ) ;
if ( ! pit - > wq ) {
kfree ( pit ) ;
return NULL ;
}
INIT_WORK ( & pit - > expired , pit_do_work ) ;
kvm - > arch . vpit = pit ;
pit - > kvm = kvm ;
@ -685,54 +758,10 @@ void kvm_free_pit(struct kvm *kvm)
mutex_lock ( & kvm - > arch . vpit - > pit_state . lock ) ;
timer = & kvm - > arch . vpit - > pit_state . pit_timer . timer ;
hrtimer_cancel ( timer ) ;
cancel_work_sync ( & kvm - > arch . vpit - > expired ) ;
kvm_free_irq_source_id ( kvm , kvm - > arch . vpit - > irq_source_id ) ;
mutex_unlock ( & kvm - > arch . vpit - > pit_state . lock ) ;
destroy_workqueue ( kvm - > arch . vpit - > wq ) ;
kfree ( kvm - > arch . vpit ) ;
}
}
static void __inject_pit_timer_intr ( struct kvm * kvm )
{
struct kvm_vcpu * vcpu ;
int i ;
kvm_set_irq ( kvm , kvm - > arch . vpit - > irq_source_id , 0 , 1 ) ;
kvm_set_irq ( kvm , kvm - > arch . vpit - > irq_source_id , 0 , 0 ) ;
/*
* Provides NMI watchdog support via Virtual Wire mode .
* The route is : PIT - > PIC - > LVT0 in NMI mode .
*
* Note : Our Virtual Wire implementation is simplified , only
* propagating PIT interrupts to all VCPUs when they have set
* LVT0 to NMI delivery . Other PIC interrupts are just sent to
* VCPU0 , and only if its LVT0 is in EXTINT mode .
*/
if ( kvm - > arch . vapics_in_nmi_mode > 0 )
kvm_for_each_vcpu ( i , vcpu , kvm )
kvm_apic_nmi_wd_deliver ( vcpu ) ;
}
void kvm_inject_pit_timer_irqs ( struct kvm_vcpu * vcpu )
{
struct kvm_pit * pit = vcpu - > kvm - > arch . vpit ;
struct kvm * kvm = vcpu - > kvm ;
struct kvm_kpit_state * ps ;
if ( pit ) {
int inject = 0 ;
ps = & pit - > pit_state ;
/* Try to inject pending interrupts when
* last one has been acked .
*/
raw_spin_lock ( & ps - > inject_lock ) ;
if ( atomic_read ( & ps - > pit_timer . pending ) & & ps - > irq_ack ) {
ps - > irq_ack = 0 ;
inject = 1 ;
}
raw_spin_unlock ( & ps - > inject_lock ) ;
if ( inject )
__inject_pit_timer_intr ( kvm ) ;
}
}