2008-01-28 05:10:22 +08:00
# ifndef __I8254_H
# define __I8254_H
2012-04-24 16:40:17 +02:00
# include <linux/kthread.h>
2015-03-26 14:39:29 +00:00
# include <kvm/iodev.h>
2008-01-28 05:10:22 +08:00
struct kvm_kpit_channel_state {
u32 count ; /* can be 65536 */
u16 latched_count ;
u8 count_latched ;
u8 status_latched ;
u8 status ;
u8 read_state ;
u8 write_state ;
u8 write_latch ;
u8 rw_mode ;
u8 mode ;
u8 bcd ; /* not supported */
u8 gate ; /* timer start */
ktime_t count_load_time ;
} ;
struct kvm_kpit_state {
2016-03-02 22:56:44 +01:00
/* All members before "struct mutex lock" are protected by the lock. */
2008-01-28 05:10:22 +08:00
struct kvm_kpit_channel_state channels [ 3 ] ;
2009-07-07 11:50:38 -04:00
u32 flags ;
2009-02-23 10:57:41 -03:00
bool is_periodic ;
2012-07-26 18:01:53 +03:00
s64 period ; /* unit: ns */
struct hrtimer timer ;
2008-01-28 05:10:22 +08:00
u32 speaker_data_on ;
2016-03-02 22:56:44 +01:00
2008-01-28 05:10:22 +08:00
struct mutex lock ;
2016-03-02 22:56:52 +01:00
atomic_t reinject ;
2016-03-02 22:56:44 +01:00
atomic_t pending ; /* accumulated triggered timers */
KVM: i8254: use atomic_t instead of pit.inject_lock
The lock was an overkill, the same can be done with atomics.
A mb() was added in kvm_pit_ack_irq, to pair with implicit barrier
between pit_timer_fn and pit_do_work. The mb() prevents a race that
could happen if pending == 0 and irq_ack == 0:
kvm_pit_ack_irq: | pit_timer_fn:
p = atomic_read(&ps->pending); |
| atomic_inc(&ps->pending);
| queue_work(pit_do_work);
| pit_do_work:
| atomic_xchg(&ps->irq_ack, 0);
| return;
atomic_set(&ps->irq_ack, 1); |
if (p == 0) return; |
where the interrupt would not be delivered in this tick of pit_timer_fn.
PIT would have eventually delivered the interrupt, but we sacrifice
perofmance to make sure that interrupts are not needlessly delayed.
sfence isn't enough: atomic_dec_if_positive does atomic_read first and
x86 can reorder loads before stores. lfence isn't enough: store can
pass lfence, turning it into a nop. A compiler barrier would be more
than enough as CPU needs to stall for unbelievably long to use fences.
This patch doesn't do anything in kvm_pit_reset_reinject, because any
order of resets can race, but the result differs by at most one
interrupt, which is ok, because it's the same result as if the reset
happened at a slightly different time. (Original code didn't protect
the reset path with a proper lock, so users have to be robust.)
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-02 22:56:41 +01:00
atomic_t irq_ack ;
2008-07-26 17:01:01 -03:00
struct kvm_irq_ack_notifier irq_ack_notifier ;
2008-01-28 05:10:22 +08:00
} ;
struct kvm_pit {
struct kvm_io_device dev ;
struct kvm_io_device speaker_dev ;
struct kvm * kvm ;
struct kvm_kpit_state pit_state ;
2008-10-15 20:15:06 +08:00
int irq_source_id ;
2009-01-04 18:06:06 +02:00
struct kvm_irq_mask_notifier mask_notifier ;
2016-10-19 13:50:47 +02:00
struct kthread_worker * worker ;
2012-04-24 16:40:17 +02:00
struct kthread_work expired ;
2008-01-28 05:10:22 +08:00
} ;
# define KVM_PIT_BASE_ADDRESS 0x40
# define KVM_SPEAKER_BASE_ADDRESS 0x61
# define KVM_PIT_MEM_LENGTH 4
# define KVM_PIT_FREQ 1193181
# define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
# define KVM_PIT_CHANNEL_MASK 0x3
2009-05-14 22:42:53 +02:00
struct kvm_pit * kvm_create_pit ( struct kvm * kvm , u32 flags ) ;
2008-01-28 05:10:22 +08:00
void kvm_free_pit ( struct kvm * kvm ) ;
2016-03-02 22:56:43 +01:00
void kvm_pit_load_count ( struct kvm_pit * pit , int channel , u32 val ,
int hpet_legacy_start ) ;
2016-03-02 22:56:45 +01:00
void kvm_pit_set_reinject ( struct kvm_pit * pit , bool reinject ) ;
2008-01-28 05:10:22 +08:00
# endif