2008-01-28 00:10:22 +03:00
/*
* 8253 / 8254 interval timer emulation
*
* Copyright ( c ) 2003 - 2004 Fabrice Bellard
* Copyright ( c ) 2006 Intel Corporation
* Copyright ( c ) 2007 Keir Fraser , XenSource Inc
* Copyright ( c ) 2008 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this software and associated documentation files ( the " Software " ) , to deal
* in the Software without restriction , including without limitation the rights
* to use , copy , modify , merge , publish , distribute , sublicense , and / or sell
* copies of the Software , and to permit persons to whom the Software is
* furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE .
*
* Authors :
* Sheng Yang < sheng . yang @ intel . com >
* Based on QEMU and Xen .
*/
# include <linux/kvm_host.h>
# include "irq.h"
# include "i8254.h"
# ifndef CONFIG_X86_64
2008-05-01 15:34:28 +04:00
# define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
2008-01-28 00:10:22 +03:00
# else
# define mod_64(x, y) ((x) % (y))
# endif
# define RW_STATE_LSB 1
# define RW_STATE_MSB 2
# define RW_STATE_WORD0 3
# define RW_STATE_WORD1 4
/* Compute with 96 bit intermediate result: (a*b)/c */
static u64 muldiv64 ( u64 a , u32 b , u32 c )
{
union {
u64 ll ;
struct {
u32 low , high ;
} l ;
} u , res ;
u64 rl , rh ;
u . ll = a ;
rl = ( u64 ) u . l . low * ( u64 ) b ;
rh = ( u64 ) u . l . high * ( u64 ) b ;
rh + = ( rl > > 32 ) ;
2008-05-01 15:34:28 +04:00
res . l . high = div64_u64 ( rh , c ) ;
res . l . low = div64_u64 ( ( ( mod_64 ( rh , c ) < < 32 ) + ( rl & 0xffffffff ) ) , c ) ;
2008-01-28 00:10:22 +03:00
return res . ll ;
}
static void pit_set_gate ( struct kvm * kvm , int channel , u32 val )
{
struct kvm_kpit_channel_state * c =
& kvm - > arch . vpit - > pit_state . channels [ channel ] ;
WARN_ON ( ! mutex_is_locked ( & kvm - > arch . vpit - > pit_state . lock ) ) ;
switch ( c - > mode ) {
default :
case 0 :
case 4 :
/* XXX: just disable/enable counting */
break ;
case 1 :
case 2 :
case 3 :
case 5 :
/* Restart counting on rising edge. */
if ( c - > gate < val )
c - > count_load_time = ktime_get ( ) ;
break ;
}
c - > gate = val ;
}
2008-04-27 23:14:13 +04:00
static int pit_get_gate ( struct kvm * kvm , int channel )
2008-01-28 00:10:22 +03:00
{
WARN_ON ( ! mutex_is_locked ( & kvm - > arch . vpit - > pit_state . lock ) ) ;
return kvm - > arch . vpit - > pit_state . channels [ channel ] . gate ;
}
2009-02-23 16:57:40 +03:00
static s64 __kpit_elapsed ( struct kvm * kvm )
{
s64 elapsed ;
ktime_t remaining ;
struct kvm_kpit_state * ps = & kvm - > arch . vpit - > pit_state ;
2009-04-08 20:14:19 +04:00
/*
* The Counter does not stop when it reaches zero . In
* Modes 0 , 1 , 4 , and 5 the Counter ` ` wraps around ' ' to
* the highest count , either FFFF hex for binary counting
* or 9999 for BCD counting , and continues counting .
* Modes 2 and 3 are periodic ; the Counter reloads
* itself with the initial count and continues counting
* from there .
*/
2009-02-23 16:57:40 +03:00
remaining = hrtimer_expires_remaining ( & ps - > pit_timer . timer ) ;
2009-04-08 20:14:19 +04:00
elapsed = ps - > pit_timer . period - ktime_to_ns ( remaining ) ;
elapsed = mod_64 ( elapsed , ps - > pit_timer . period ) ;
2009-02-23 16:57:40 +03:00
return elapsed ;
}
static s64 kpit_elapsed ( struct kvm * kvm , struct kvm_kpit_channel_state * c ,
int channel )
{
if ( channel = = 0 )
return __kpit_elapsed ( kvm ) ;
return ktime_to_ns ( ktime_sub ( ktime_get ( ) , c - > count_load_time ) ) ;
}
2008-01-28 00:10:22 +03:00
static int pit_get_count ( struct kvm * kvm , int channel )
{
struct kvm_kpit_channel_state * c =
& kvm - > arch . vpit - > pit_state . channels [ channel ] ;
s64 d , t ;
int counter ;
WARN_ON ( ! mutex_is_locked ( & kvm - > arch . vpit - > pit_state . lock ) ) ;
2009-02-23 16:57:40 +03:00
t = kpit_elapsed ( kvm , c , channel ) ;
2008-01-28 00:10:22 +03:00
d = muldiv64 ( t , KVM_PIT_FREQ , NSEC_PER_SEC ) ;
switch ( c - > mode ) {
case 0 :
case 1 :
case 4 :
case 5 :
counter = ( c - > count - d ) & 0xffff ;
break ;
case 3 :
/* XXX: may be incorrect for odd counts */
counter = c - > count - ( mod_64 ( ( 2 * d ) , c - > count ) ) ;
break ;
default :
counter = c - > count - mod_64 ( d , c - > count ) ;
break ;
}
return counter ;
}
static int pit_get_out ( struct kvm * kvm , int channel )
{
struct kvm_kpit_channel_state * c =
& kvm - > arch . vpit - > pit_state . channels [ channel ] ;
s64 d , t ;
int out ;
WARN_ON ( ! mutex_is_locked ( & kvm - > arch . vpit - > pit_state . lock ) ) ;
2009-02-23 16:57:40 +03:00
t = kpit_elapsed ( kvm , c , channel ) ;
2008-01-28 00:10:22 +03:00
d = muldiv64 ( t , KVM_PIT_FREQ , NSEC_PER_SEC ) ;
switch ( c - > mode ) {
default :
case 0 :
out = ( d > = c - > count ) ;
break ;
case 1 :
out = ( d < c - > count ) ;
break ;
case 2 :
out = ( ( mod_64 ( d , c - > count ) = = 0 ) & & ( d ! = 0 ) ) ;
break ;
case 3 :
out = ( mod_64 ( d , c - > count ) < ( ( c - > count + 1 ) > > 1 ) ) ;
break ;
case 4 :
case 5 :
out = ( d = = c - > count ) ;
break ;
}
return out ;
}
static void pit_latch_count ( struct kvm * kvm , int channel )
{
struct kvm_kpit_channel_state * c =
& kvm - > arch . vpit - > pit_state . channels [ channel ] ;
WARN_ON ( ! mutex_is_locked ( & kvm - > arch . vpit - > pit_state . lock ) ) ;
if ( ! c - > count_latched ) {
c - > latched_count = pit_get_count ( kvm , channel ) ;
c - > count_latched = c - > rw_mode ;
}
}
static void pit_latch_status ( struct kvm * kvm , int channel )
{
struct kvm_kpit_channel_state * c =
& kvm - > arch . vpit - > pit_state . channels [ channel ] ;
WARN_ON ( ! mutex_is_locked ( & kvm - > arch . vpit - > pit_state . lock ) ) ;
if ( ! c - > status_latched ) {
/* TODO: Return NULL COUNT (bit 6). */
c - > status = ( ( pit_get_out ( kvm , channel ) < < 7 ) |
( c - > rw_mode < < 4 ) |
( c - > mode < < 1 ) |
c - > bcd ) ;
c - > status_latched = 1 ;
}
}
2008-04-11 21:53:26 +04:00
int pit_has_pending_timer ( struct kvm_vcpu * vcpu )
{
struct kvm_pit * pit = vcpu - > kvm - > arch . vpit ;
2008-07-27 00:01:01 +04:00
if ( pit & & vcpu - > vcpu_id = = 0 & & pit - > pit_state . irq_ack )
2008-04-11 21:53:26 +04:00
return atomic_read ( & pit - > pit_state . pit_timer . pending ) ;
return 0 ;
}
2008-08-12 03:54:20 +04:00
static void kvm_pit_ack_irq ( struct kvm_irq_ack_notifier * kian )
2008-07-27 00:01:01 +04:00
{
struct kvm_kpit_state * ps = container_of ( kian , struct kvm_kpit_state ,
irq_ack_notifier ) ;
spin_lock ( & ps - > inject_lock ) ;
if ( atomic_dec_return ( & ps - > pit_timer . pending ) < 0 )
2008-08-17 17:03:46 +04:00
atomic_inc ( & ps - > pit_timer . pending ) ;
2008-07-27 00:01:01 +04:00
ps - > irq_ack = 1 ;
spin_unlock ( & ps - > inject_lock ) ;
}
2008-05-27 19:10:20 +04:00
void __kvm_migrate_pit_timer ( struct kvm_vcpu * vcpu )
{
struct kvm_pit * pit = vcpu - > kvm - > arch . vpit ;
struct hrtimer * timer ;
if ( vcpu - > vcpu_id ! = 0 | | ! pit )
return ;
timer = & pit - > pit_state . pit_timer . timer ;
if ( hrtimer_cancel ( timer ) )
2008-09-02 01:55:57 +04:00
hrtimer_start_expires ( timer , HRTIMER_MODE_ABS ) ;
2008-05-27 19:10:20 +04:00
}
2009-02-23 16:57:41 +03:00
static void destroy_pit_timer ( struct kvm_timer * pt )
2008-01-28 00:10:22 +03:00
{
pr_debug ( " pit: execute del timer! \n " ) ;
hrtimer_cancel ( & pt - > timer ) ;
}
2009-02-23 16:57:41 +03:00
static bool kpit_is_periodic ( struct kvm_timer * ktimer )
{
struct kvm_kpit_state * ps = container_of ( ktimer , struct kvm_kpit_state ,
pit_timer ) ;
return ps - > is_periodic ;
}
2009-03-11 00:51:09 +03:00
static struct kvm_timer_ops kpit_ops = {
2009-02-23 16:57:41 +03:00
. is_periodic = kpit_is_periodic ,
} ;
2008-07-27 00:01:01 +04:00
static void create_pit_timer ( struct kvm_kpit_state * ps , u32 val , int is_period )
2008-01-28 00:10:22 +03:00
{
2009-02-23 16:57:41 +03:00
struct kvm_timer * pt = & ps - > pit_timer ;
2008-01-28 00:10:22 +03:00
s64 interval ;
interval = muldiv64 ( val , NSEC_PER_SEC , KVM_PIT_FREQ ) ;
pr_debug ( " pit: create pit timer, interval is %llu nsec \n " , interval ) ;
/* TODO The new value only affected after the retriggered */
hrtimer_cancel ( & pt - > timer ) ;
2009-04-08 20:14:19 +04:00
pt - > period = interval ;
2009-02-23 16:57:41 +03:00
ps - > is_periodic = is_period ;
pt - > timer . function = kvm_timer_fn ;
pt - > t_ops = & kpit_ops ;
pt - > kvm = ps - > pit - > kvm ;
pt - > vcpu_id = 0 ;
2008-01-28 00:10:22 +03:00
atomic_set ( & pt - > pending , 0 ) ;
2008-07-27 00:01:01 +04:00
ps - > irq_ack = 1 ;
2008-01-28 00:10:22 +03:00
hrtimer_start ( & pt - > timer , ktime_add_ns ( ktime_get ( ) , interval ) ,
HRTIMER_MODE_ABS ) ;
}
static void pit_load_count ( struct kvm * kvm , int channel , u32 val )
{
struct kvm_kpit_state * ps = & kvm - > arch . vpit - > pit_state ;
WARN_ON ( ! mutex_is_locked ( & ps - > lock ) ) ;
pr_debug ( " pit: load_count val is %d, channel is %d \n " , val , channel ) ;
/*
2009-04-08 20:14:19 +04:00
* The largest possible initial count is 0 ; this is equivalent
* to 216 for binary counting and 104 for BCD counting .
2008-01-28 00:10:22 +03:00
*/
if ( val = = 0 )
val = 0x10000 ;
ps - > channels [ channel ] . count = val ;
2009-02-23 16:57:40 +03:00
if ( channel ! = 0 ) {
ps - > channels [ channel ] . count_load_time = ktime_get ( ) ;
2008-01-28 00:10:22 +03:00
return ;
2009-02-23 16:57:40 +03:00
}
2008-01-28 00:10:22 +03:00
/* Two types of timer
* mode 1 is one shot , mode 2 is period , otherwise del timer */
switch ( ps - > channels [ 0 ] . mode ) {
2009-04-08 20:14:19 +04:00
case 0 :
2008-01-28 00:10:22 +03:00
case 1 :
2008-04-30 20:23:54 +04:00
/* FIXME: enhance mode 4 precision */
case 4 :
2008-07-27 00:01:01 +04:00
create_pit_timer ( ps , val , 0 ) ;
2008-01-28 00:10:22 +03:00
break ;
case 2 :
2008-05-02 19:02:23 +04:00
case 3 :
2008-07-27 00:01:01 +04:00
create_pit_timer ( ps , val , 1 ) ;
2008-01-28 00:10:22 +03:00
break ;
default :
destroy_pit_timer ( & ps - > pit_timer ) ;
}
}
2008-03-03 19:50:59 +03:00
void kvm_pit_load_count ( struct kvm * kvm , int channel , u32 val )
{
mutex_lock ( & kvm - > arch . vpit - > pit_state . lock ) ;
pit_load_count ( kvm , channel , val ) ;
mutex_unlock ( & kvm - > arch . vpit - > pit_state . lock ) ;
}
2008-01-28 00:10:22 +03:00
static void pit_ioport_write ( struct kvm_io_device * this ,
gpa_t addr , int len , const void * data )
{
struct kvm_pit * pit = ( struct kvm_pit * ) this - > private ;
struct kvm_kpit_state * pit_state = & pit - > pit_state ;
struct kvm * kvm = pit - > kvm ;
int channel , access ;
struct kvm_kpit_channel_state * s ;
u32 val = * ( u32 * ) data ;
val & = 0xff ;
addr & = KVM_PIT_CHANNEL_MASK ;
mutex_lock ( & pit_state - > lock ) ;
if ( val ! = 0 )
pr_debug ( " pit: write addr is 0x%x, len is %d, val is 0x%x \n " ,
( unsigned int ) addr , len , val ) ;
if ( addr = = 3 ) {
channel = val > > 6 ;
if ( channel = = 3 ) {
/* Read-Back Command. */
for ( channel = 0 ; channel < 3 ; channel + + ) {
s = & pit_state - > channels [ channel ] ;
if ( val & ( 2 < < channel ) ) {
if ( ! ( val & 0x20 ) )
pit_latch_count ( kvm , channel ) ;
if ( ! ( val & 0x10 ) )
pit_latch_status ( kvm , channel ) ;
}
}
} else {
/* Select Counter <channel>. */
s = & pit_state - > channels [ channel ] ;
access = ( val > > 4 ) & KVM_PIT_CHANNEL_MASK ;
if ( access = = 0 ) {
pit_latch_count ( kvm , channel ) ;
} else {
s - > rw_mode = access ;
s - > read_state = access ;
s - > write_state = access ;
s - > mode = ( val > > 1 ) & 7 ;
if ( s - > mode > 5 )
s - > mode - = 4 ;
s - > bcd = val & 1 ;
}
}
} else {
/* Write Count. */
s = & pit_state - > channels [ addr ] ;
switch ( s - > write_state ) {
default :
case RW_STATE_LSB :
pit_load_count ( kvm , addr , val ) ;
break ;
case RW_STATE_MSB :
pit_load_count ( kvm , addr , val < < 8 ) ;
break ;
case RW_STATE_WORD0 :
s - > write_latch = val ;
s - > write_state = RW_STATE_WORD1 ;
break ;
case RW_STATE_WORD1 :
pit_load_count ( kvm , addr , s - > write_latch | ( val < < 8 ) ) ;
s - > write_state = RW_STATE_WORD0 ;
break ;
}
}
mutex_unlock ( & pit_state - > lock ) ;
}
static void pit_ioport_read ( struct kvm_io_device * this ,
gpa_t addr , int len , void * data )
{
struct kvm_pit * pit = ( struct kvm_pit * ) this - > private ;
struct kvm_kpit_state * pit_state = & pit - > pit_state ;
struct kvm * kvm = pit - > kvm ;
int ret , count ;
struct kvm_kpit_channel_state * s ;
addr & = KVM_PIT_CHANNEL_MASK ;
s = & pit_state - > channels [ addr ] ;
mutex_lock ( & pit_state - > lock ) ;
if ( s - > status_latched ) {
s - > status_latched = 0 ;
ret = s - > status ;
} else if ( s - > count_latched ) {
switch ( s - > count_latched ) {
default :
case RW_STATE_LSB :
ret = s - > latched_count & 0xff ;
s - > count_latched = 0 ;
break ;
case RW_STATE_MSB :
ret = s - > latched_count > > 8 ;
s - > count_latched = 0 ;
break ;
case RW_STATE_WORD0 :
ret = s - > latched_count & 0xff ;
s - > count_latched = RW_STATE_MSB ;
break ;
}
} else {
switch ( s - > read_state ) {
default :
case RW_STATE_LSB :
count = pit_get_count ( kvm , addr ) ;
ret = count & 0xff ;
break ;
case RW_STATE_MSB :
count = pit_get_count ( kvm , addr ) ;
ret = ( count > > 8 ) & 0xff ;
break ;
case RW_STATE_WORD0 :
count = pit_get_count ( kvm , addr ) ;
ret = count & 0xff ;
s - > read_state = RW_STATE_WORD1 ;
break ;
case RW_STATE_WORD1 :
count = pit_get_count ( kvm , addr ) ;
ret = ( count > > 8 ) & 0xff ;
s - > read_state = RW_STATE_WORD0 ;
break ;
}
}
if ( len > sizeof ( ret ) )
len = sizeof ( ret ) ;
memcpy ( data , ( char * ) & ret , len ) ;
mutex_unlock ( & pit_state - > lock ) ;
}
2008-05-30 18:05:53 +04:00
static int pit_in_range ( struct kvm_io_device * this , gpa_t addr ,
int len , int is_write )
2008-01-28 00:10:22 +03:00
{
return ( ( addr > = KVM_PIT_BASE_ADDRESS ) & &
( addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH ) ) ;
}
static void speaker_ioport_write ( struct kvm_io_device * this ,
gpa_t addr , int len , const void * data )
{
struct kvm_pit * pit = ( struct kvm_pit * ) this - > private ;
struct kvm_kpit_state * pit_state = & pit - > pit_state ;
struct kvm * kvm = pit - > kvm ;
u32 val = * ( u32 * ) data ;
mutex_lock ( & pit_state - > lock ) ;
pit_state - > speaker_data_on = ( val > > 1 ) & 1 ;
pit_set_gate ( kvm , 2 , val & 1 ) ;
mutex_unlock ( & pit_state - > lock ) ;
}
static void speaker_ioport_read ( struct kvm_io_device * this ,
gpa_t addr , int len , void * data )
{
struct kvm_pit * pit = ( struct kvm_pit * ) this - > private ;
struct kvm_kpit_state * pit_state = & pit - > pit_state ;
struct kvm * kvm = pit - > kvm ;
unsigned int refresh_clock ;
int ret ;
/* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
refresh_clock = ( ( unsigned int ) ktime_to_ns ( ktime_get ( ) ) > > 14 ) & 1 ;
mutex_lock ( & pit_state - > lock ) ;
ret = ( ( pit_state - > speaker_data_on < < 1 ) | pit_get_gate ( kvm , 2 ) |
( pit_get_out ( kvm , 2 ) < < 5 ) | ( refresh_clock < < 4 ) ) ;
if ( len > sizeof ( ret ) )
len = sizeof ( ret ) ;
memcpy ( data , ( char * ) & ret , len ) ;
mutex_unlock ( & pit_state - > lock ) ;
}
2008-05-30 18:05:53 +04:00
static int speaker_in_range ( struct kvm_io_device * this , gpa_t addr ,
int len , int is_write )
2008-01-28 00:10:22 +03:00
{
return ( addr = = KVM_SPEAKER_BASE_ADDRESS ) ;
}
2008-03-13 05:22:26 +03:00
void kvm_pit_reset ( struct kvm_pit * pit )
2008-01-28 00:10:22 +03:00
{
int i ;
2008-03-13 05:22:26 +03:00
struct kvm_kpit_channel_state * c ;
mutex_lock ( & pit - > pit_state . lock ) ;
for ( i = 0 ; i < 3 ; i + + ) {
c = & pit - > pit_state . channels [ i ] ;
c - > mode = 0xff ;
c - > gate = ( i ! = 2 ) ;
pit_load_count ( pit - > kvm , i , 0 ) ;
}
mutex_unlock ( & pit - > pit_state . lock ) ;
atomic_set ( & pit - > pit_state . pit_timer . pending , 0 ) ;
2008-07-27 00:01:01 +04:00
pit - > pit_state . irq_ack = 1 ;
2008-03-13 05:22:26 +03:00
}
2009-01-04 19:06:06 +03:00
static void pit_mask_notifer ( struct kvm_irq_mask_notifier * kimn , bool mask )
{
struct kvm_pit * pit = container_of ( kimn , struct kvm_pit , mask_notifier ) ;
if ( ! mask ) {
atomic_set ( & pit - > pit_state . pit_timer . pending , 0 ) ;
pit - > pit_state . irq_ack = 1 ;
}
}
2008-03-13 05:22:26 +03:00
struct kvm_pit * kvm_create_pit ( struct kvm * kvm )
{
2008-01-28 00:10:22 +03:00
struct kvm_pit * pit ;
struct kvm_kpit_state * pit_state ;
pit = kzalloc ( sizeof ( struct kvm_pit ) , GFP_KERNEL ) ;
if ( ! pit )
return NULL ;
2008-10-15 16:15:06 +04:00
pit - > irq_source_id = kvm_request_irq_source_id ( kvm ) ;
2008-11-11 14:09:36 +03:00
if ( pit - > irq_source_id < 0 ) {
kfree ( pit ) ;
2008-10-15 16:15:06 +04:00
return NULL ;
2008-11-11 14:09:36 +03:00
}
2008-10-15 16:15:06 +04:00
2008-01-28 00:10:22 +03:00
mutex_init ( & pit - > pit_state . lock ) ;
mutex_lock ( & pit - > pit_state . lock ) ;
2008-07-27 00:01:01 +04:00
spin_lock_init ( & pit - > pit_state . inject_lock ) ;
2008-01-28 00:10:22 +03:00
/* Initialize PIO device */
pit - > dev . read = pit_ioport_read ;
pit - > dev . write = pit_ioport_write ;
pit - > dev . in_range = pit_in_range ;
pit - > dev . private = pit ;
kvm_io_bus_register_dev ( & kvm - > pio_bus , & pit - > dev ) ;
pit - > speaker_dev . read = speaker_ioport_read ;
pit - > speaker_dev . write = speaker_ioport_write ;
pit - > speaker_dev . in_range = speaker_in_range ;
pit - > speaker_dev . private = pit ;
kvm_io_bus_register_dev ( & kvm - > pio_bus , & pit - > speaker_dev ) ;
kvm - > arch . vpit = pit ;
pit - > kvm = kvm ;
pit_state = & pit - > pit_state ;
pit_state - > pit = pit ;
hrtimer_init ( & pit_state - > pit_timer . timer ,
CLOCK_MONOTONIC , HRTIMER_MODE_ABS ) ;
2008-07-27 00:01:01 +04:00
pit_state - > irq_ack_notifier . gsi = 0 ;
pit_state - > irq_ack_notifier . irq_acked = kvm_pit_ack_irq ;
kvm_register_irq_ack_notifier ( kvm , & pit_state - > irq_ack_notifier ) ;
2008-12-30 20:55:06 +03:00
pit_state - > pit_timer . reinject = true ;
2008-01-28 00:10:22 +03:00
mutex_unlock ( & pit - > pit_state . lock ) ;
2008-03-13 05:22:26 +03:00
kvm_pit_reset ( pit ) ;
2008-01-28 00:10:22 +03:00
2009-01-04 19:06:06 +03:00
pit - > mask_notifier . func = pit_mask_notifer ;
kvm_register_irq_mask_notifier ( kvm , 0 , & pit - > mask_notifier ) ;
2008-01-28 00:10:22 +03:00
return pit ;
}
void kvm_free_pit ( struct kvm * kvm )
{
struct hrtimer * timer ;
if ( kvm - > arch . vpit ) {
2009-01-04 19:06:06 +03:00
kvm_unregister_irq_mask_notifier ( kvm , 0 ,
& kvm - > arch . vpit - > mask_notifier ) ;
2008-01-28 00:10:22 +03:00
mutex_lock ( & kvm - > arch . vpit - > pit_state . lock ) ;
timer = & kvm - > arch . vpit - > pit_state . pit_timer . timer ;
hrtimer_cancel ( timer ) ;
2008-10-15 16:15:06 +04:00
kvm_free_irq_source_id ( kvm , kvm - > arch . vpit - > irq_source_id ) ;
2008-01-28 00:10:22 +03:00
mutex_unlock ( & kvm - > arch . vpit - > pit_state . lock ) ;
kfree ( kvm - > arch . vpit ) ;
}
}
2008-04-27 23:14:13 +04:00
static void __inject_pit_timer_intr ( struct kvm * kvm )
2008-01-28 00:10:22 +03:00
{
2008-09-26 11:30:52 +04:00
struct kvm_vcpu * vcpu ;
int i ;
2008-01-28 00:10:22 +03:00
mutex_lock ( & kvm - > lock ) ;
2008-10-15 16:15:06 +04:00
kvm_set_irq ( kvm , kvm - > arch . vpit - > irq_source_id , 0 , 1 ) ;
kvm_set_irq ( kvm , kvm - > arch . vpit - > irq_source_id , 0 , 0 ) ;
2008-01-28 00:10:22 +03:00
mutex_unlock ( & kvm - > lock ) ;
2008-09-26 11:30:52 +04:00
/*
2008-10-20 12:20:02 +04:00
* Provides NMI watchdog support via Virtual Wire mode .
* The route is : PIT - > PIC - > LVT0 in NMI mode .
*
* Note : Our Virtual Wire implementation is simplified , only
* propagating PIT interrupts to all VCPUs when they have set
* LVT0 to NMI delivery . Other PIC interrupts are just sent to
* VCPU0 , and only if its LVT0 is in EXTINT mode .
2008-09-26 11:30:52 +04:00
*/
2008-10-20 12:20:03 +04:00
if ( kvm - > arch . vapics_in_nmi_mode > 0 )
for ( i = 0 ; i < KVM_MAX_VCPUS ; + + i ) {
vcpu = kvm - > vcpus [ i ] ;
if ( vcpu )
kvm_apic_nmi_wd_deliver ( vcpu ) ;
}
2008-01-28 00:10:22 +03:00
}
void kvm_inject_pit_timer_irqs ( struct kvm_vcpu * vcpu )
{
struct kvm_pit * pit = vcpu - > kvm - > arch . vpit ;
struct kvm * kvm = vcpu - > kvm ;
struct kvm_kpit_state * ps ;
if ( vcpu & & pit ) {
2008-07-27 00:01:01 +04:00
int inject = 0 ;
2008-01-28 00:10:22 +03:00
ps = & pit - > pit_state ;
2008-07-27 00:01:01 +04:00
/* Try to inject pending interrupts when
* last one has been acked .
*/
spin_lock ( & ps - > inject_lock ) ;
if ( atomic_read ( & ps - > pit_timer . pending ) & & ps - > irq_ack ) {
ps - > irq_ack = 0 ;
inject = 1 ;
2008-01-28 00:10:22 +03:00
}
2008-07-27 00:01:01 +04:00
spin_unlock ( & ps - > inject_lock ) ;
if ( inject )
__inject_pit_timer_intr ( kvm ) ;
2008-01-28 00:10:22 +03:00
}
}