2013-01-23 22:21:58 +04:00
/*
* Copyright ( C ) 2012 ARM Ltd .
* Author : Marc Zyngier < marc . zyngier @ arm . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# include <linux/cpu.h>
# include <linux/of_irq.h>
# include <linux/kvm.h>
# include <linux/kvm_host.h>
# include <linux/interrupt.h>
2013-03-27 19:56:11 +04:00
# include <clocksource/arm_arch_timer.h>
2013-01-23 22:21:58 +04:00
# include <asm/arch_timer.h>
ARM: KVM: move GIC/timer code to a common location
As KVM/arm64 is looming on the horizon, it makes sense to move some
of the common code to a single location in order to reduce duplication.
The code could live anywhere. Actually, most of KVM is already built
with a bunch of ugly ../../.. hacks in the various Makefiles, so we're
not exactly talking about style here. But maybe it is time to start
moving into a less ugly direction.
The include files must be in a "public" location, as they are accessed
from non-KVM files (arch/arm/kernel/asm-offsets.c).
For this purpose, introduce two new locations:
- virt/kvm/arm/ : x86 and ia64 already share the ioapic code in
virt/kvm, so this could be seen as a (very ugly) precedent.
- include/kvm/ : there is already an include/xen, and while the
intent is slightly different, this seems as good a location as
any
Eventually, we should probably have independant Makefiles at every
levels (just like everywhere else in the kernel), but this is just
the first step.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
2013-05-14 17:31:01 +04:00
# include <kvm/arm_vgic.h>
# include <kvm/arm_arch_timer.h>
2013-01-23 22:21:58 +04:00
static struct timecounter * timecounter ;
static struct workqueue_struct * wqueue ;
2013-04-30 10:32:15 +04:00
static unsigned int host_vtimer_irq ;
2013-01-23 22:21:58 +04:00
static cycle_t kvm_phys_timer_read ( void )
{
return timecounter - > cc - > read ( timecounter - > cc ) ;
}
static bool timer_is_armed ( struct arch_timer_cpu * timer )
{
return timer - > armed ;
}
/* timer_arm: as in "arm the timer", not as in ARM the company */
static void timer_arm ( struct arch_timer_cpu * timer , u64 ns )
{
timer - > armed = true ;
hrtimer_start ( & timer - > timer , ktime_add_ns ( ktime_get ( ) , ns ) ,
HRTIMER_MODE_ABS ) ;
}
static void timer_disarm ( struct arch_timer_cpu * timer )
{
if ( timer_is_armed ( timer ) ) {
hrtimer_cancel ( & timer - > timer ) ;
cancel_work_sync ( & timer - > expired ) ;
timer - > armed = false ;
}
}
static void kvm_timer_inject_irq ( struct kvm_vcpu * vcpu )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
2013-03-27 19:56:11 +04:00
timer - > cntv_ctl | = ARCH_TIMER_CTRL_IT_MASK ;
2013-01-23 22:21:58 +04:00
kvm_vgic_inject_irq ( vcpu - > kvm , vcpu - > vcpu_id ,
2013-04-30 10:32:15 +04:00
timer - > irq - > irq ,
timer - > irq - > level ) ;
2013-01-23 22:21:58 +04:00
}
static irqreturn_t kvm_arch_timer_handler ( int irq , void * dev_id )
{
struct kvm_vcpu * vcpu = * ( struct kvm_vcpu * * ) dev_id ;
/*
* We disable the timer in the world switch and let it be
* handled by kvm_timer_sync_hwstate ( ) . Getting a timer
* interrupt at this point is a sure sign of some major
* breakage .
*/
pr_warn ( " Unexpected interrupt %d on vcpu %p \n " , irq , vcpu ) ;
return IRQ_HANDLED ;
}
static void kvm_timer_inject_irq_work ( struct work_struct * work )
{
struct kvm_vcpu * vcpu ;
vcpu = container_of ( work , struct kvm_vcpu , arch . timer_cpu . expired ) ;
vcpu - > arch . timer_cpu . armed = false ;
kvm_timer_inject_irq ( vcpu ) ;
}
static enum hrtimer_restart kvm_timer_expire ( struct hrtimer * hrt )
{
struct arch_timer_cpu * timer ;
timer = container_of ( hrt , struct arch_timer_cpu , timer ) ;
queue_work ( wqueue , & timer - > expired ) ;
return HRTIMER_NORESTART ;
}
/**
* kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
* @ vcpu : The vcpu pointer
*
* Disarm any pending soft timers , since the world - switch code will write the
* virtual timer state back to the physical CPU .
*/
void kvm_timer_flush_hwstate ( struct kvm_vcpu * vcpu )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
/*
* We ' re about to run this vcpu again , so there is no need to
* keep the background timer running , as we ' re about to
* populate the CPU timer again .
*/
timer_disarm ( timer ) ;
}
/**
* kvm_timer_sync_hwstate - sync timer state from cpu
* @ vcpu : The vcpu pointer
*
* Check if the virtual timer was armed and either schedule a corresponding
* soft timer or inject directly if already expired .
*/
void kvm_timer_sync_hwstate ( struct kvm_vcpu * vcpu )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
cycle_t cval , now ;
u64 ns ;
2013-03-27 19:56:11 +04:00
if ( ( timer - > cntv_ctl & ARCH_TIMER_CTRL_IT_MASK ) | |
! ( timer - > cntv_ctl & ARCH_TIMER_CTRL_ENABLE ) )
2013-01-23 22:21:58 +04:00
return ;
cval = timer - > cntv_cval ;
now = kvm_phys_timer_read ( ) - vcpu - > kvm - > arch . timer . cntvoff ;
BUG_ON ( timer_is_armed ( timer ) ) ;
if ( cval < = now ) {
/*
* Timer has already expired while we were not
* looking . Inject the interrupt and carry on .
*/
kvm_timer_inject_irq ( vcpu ) ;
return ;
}
ns = cyclecounter_cyc2ns ( timecounter - > cc , cval - now ) ;
timer_arm ( timer , ns ) ;
}
2013-04-30 10:32:15 +04:00
void kvm_timer_vcpu_reset ( struct kvm_vcpu * vcpu ,
const struct kvm_irq_level * irq )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
/*
* The vcpu timer irq number cannot be determined in
* kvm_timer_vcpu_init ( ) because it is called much before
* kvm_vcpu_set_target ( ) . To handle this , we determine
* vcpu timer irq number when the vcpu is reset .
*/
timer - > irq = irq ;
}
2013-01-23 22:21:58 +04:00
void kvm_timer_vcpu_init ( struct kvm_vcpu * vcpu )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
INIT_WORK ( & timer - > expired , kvm_timer_inject_irq_work ) ;
hrtimer_init ( & timer - > timer , CLOCK_MONOTONIC , HRTIMER_MODE_ABS ) ;
timer - > timer . function = kvm_timer_expire ;
}
static void kvm_timer_init_interrupt ( void * info )
{
2013-04-30 10:32:15 +04:00
enable_percpu_irq ( host_vtimer_irq , 0 ) ;
2013-01-23 22:21:58 +04:00
}
2013-12-13 17:23:26 +04:00
int kvm_arm_timer_set_reg ( struct kvm_vcpu * vcpu , u64 regid , u64 value )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
switch ( regid ) {
case KVM_REG_ARM_TIMER_CTL :
timer - > cntv_ctl = value ;
break ;
case KVM_REG_ARM_TIMER_CNT :
vcpu - > kvm - > arch . timer . cntvoff = kvm_phys_timer_read ( ) - value ;
break ;
case KVM_REG_ARM_TIMER_CVAL :
timer - > cntv_cval = value ;
break ;
default :
return - 1 ;
}
return 0 ;
}
u64 kvm_arm_timer_get_reg ( struct kvm_vcpu * vcpu , u64 regid )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
switch ( regid ) {
case KVM_REG_ARM_TIMER_CTL :
return timer - > cntv_ctl ;
case KVM_REG_ARM_TIMER_CNT :
return kvm_phys_timer_read ( ) - vcpu - > kvm - > arch . timer . cntvoff ;
case KVM_REG_ARM_TIMER_CVAL :
return timer - > cntv_cval ;
}
return ( u64 ) - 1 ;
}
2013-01-23 22:21:58 +04:00
static int kvm_timer_cpu_notify ( struct notifier_block * self ,
unsigned long action , void * cpu )
{
switch ( action ) {
case CPU_STARTING :
case CPU_STARTING_FROZEN :
kvm_timer_init_interrupt ( NULL ) ;
break ;
case CPU_DYING :
case CPU_DYING_FROZEN :
2013-04-30 10:32:15 +04:00
disable_percpu_irq ( host_vtimer_irq ) ;
2013-01-23 22:21:58 +04:00
break ;
}
return NOTIFY_OK ;
}
static struct notifier_block kvm_timer_cpu_nb = {
. notifier_call = kvm_timer_cpu_notify ,
} ;
static const struct of_device_id arch_timer_of_match [ ] = {
{ . compatible = " arm,armv7-timer " , } ,
2013-05-30 21:31:28 +04:00
{ . compatible = " arm,armv8-timer " , } ,
2013-01-23 22:21:58 +04:00
{ } ,
} ;
int kvm_timer_hyp_init ( void )
{
struct device_node * np ;
unsigned int ppi ;
int err ;
timecounter = arch_timer_get_timecounter ( ) ;
if ( ! timecounter )
return - ENODEV ;
np = of_find_matching_node ( NULL , arch_timer_of_match ) ;
if ( ! np ) {
kvm_err ( " kvm_arch_timer: can't find DT node \n " ) ;
return - ENODEV ;
}
ppi = irq_of_parse_and_map ( np , 2 ) ;
if ( ! ppi ) {
kvm_err ( " kvm_arch_timer: no virtual timer interrupt \n " ) ;
err = - EINVAL ;
goto out ;
}
err = request_percpu_irq ( ppi , kvm_arch_timer_handler ,
" kvm guest timer " , kvm_get_running_vcpus ( ) ) ;
if ( err ) {
kvm_err ( " kvm_arch_timer: can't request interrupt %d (%d) \n " ,
ppi , err ) ;
goto out ;
}
2013-04-30 10:32:15 +04:00
host_vtimer_irq = ppi ;
2013-01-23 22:21:58 +04:00
2014-04-06 21:36:08 +04:00
err = __register_cpu_notifier ( & kvm_timer_cpu_nb ) ;
2013-01-23 22:21:58 +04:00
if ( err ) {
kvm_err ( " Cannot register timer CPU notifier \n " ) ;
goto out_free ;
}
wqueue = create_singlethread_workqueue ( " kvm_arch_timer " ) ;
if ( ! wqueue ) {
err = - ENOMEM ;
goto out_free ;
}
kvm_info ( " %s IRQ%d \n " , np - > name , ppi ) ;
on_each_cpu ( kvm_timer_init_interrupt , NULL , 1 ) ;
goto out ;
out_free :
free_percpu_irq ( ppi , kvm_get_running_vcpus ( ) ) ;
out :
of_node_put ( np ) ;
return err ;
}
void kvm_timer_vcpu_terminate ( struct kvm_vcpu * vcpu )
{
struct arch_timer_cpu * timer = & vcpu - > arch . timer_cpu ;
timer_disarm ( timer ) ;
}
int kvm_timer_init ( struct kvm * kvm )
{
if ( timecounter & & wqueue ) {
kvm - > arch . timer . cntvoff = kvm_phys_timer_read ( ) ;
kvm - > arch . timer . enabled = 1 ;
}
return 0 ;
}