2008-10-06 09:48:45 +04:00
/*
* irq_comm . c : Common API for in kernel interrupt controller
* Copyright ( c ) 2007 , Intel Corporation .
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59 Temple
* Place - Suite 330 , Boston , MA 02111 - 1307 USA .
* Authors :
* Yaozu ( Eddie ) Dong < Eddie . dong @ intel . com >
*
*/
# include <linux/kvm_host.h>
2009-06-17 16:22:14 +04:00
# include <trace/events/kvm.h>
2009-02-10 08:57:06 +03:00
# include <asm/msidef.h>
2009-03-05 17:35:04 +03:00
# ifdef CONFIG_IA64
# include <asm/iosapic.h>
# endif
2009-02-10 08:57:06 +03:00
2008-10-06 09:48:45 +04:00
# include "irq.h"
# include "ioapic.h"
2009-08-24 12:54:19 +04:00
static inline int kvm_irq_line_state ( unsigned long * irq_state ,
int irq_source_id , int level )
{
/* Logical OR for level trig interrupt */
if ( level )
set_bit ( irq_source_id , irq_state ) ;
else
clear_bit ( irq_source_id , irq_state ) ;
return ! ! ( * irq_state ) ;
}
2009-02-04 18:28:14 +03:00
static int kvm_set_pic_irq ( struct kvm_kernel_irq_routing_entry * e ,
2009-08-24 12:54:19 +04:00
struct kvm * kvm , int irq_source_id , int level )
2008-11-19 14:58:46 +03:00
{
# ifdef CONFIG_X86
2009-08-24 12:54:19 +04:00
struct kvm_pic * pic = pic_irqchip ( kvm ) ;
level = kvm_irq_line_state ( & pic - > irq_states [ e - > irqchip . pin ] ,
irq_source_id , level ) ;
return kvm_pic_set_irq ( pic , e - > irqchip . pin , level ) ;
2009-02-04 18:28:14 +03:00
# else
return - 1 ;
2008-11-19 14:58:46 +03:00
# endif
}
2009-02-04 18:28:14 +03:00
static int kvm_set_ioapic_irq ( struct kvm_kernel_irq_routing_entry * e ,
2009-08-24 12:54:19 +04:00
struct kvm * kvm , int irq_source_id , int level )
2008-11-19 14:58:46 +03:00
{
2009-08-24 12:54:19 +04:00
struct kvm_ioapic * ioapic = kvm - > arch . vioapic ;
level = kvm_irq_line_state ( & ioapic - > irq_states [ e - > irqchip . pin ] ,
irq_source_id , level ) ;
return kvm_ioapic_set_irq ( ioapic , e - > irqchip . pin , level ) ;
2008-11-19 14:58:46 +03:00
}
2009-03-05 17:35:04 +03:00
inline static bool kvm_is_dm_lowest_prio ( struct kvm_lapic_irq * irq )
2009-02-11 11:03:37 +03:00
{
2009-03-05 17:35:04 +03:00
# ifdef CONFIG_IA64
return irq - > delivery_mode = =
( IOSAPIC_LOWEST_PRIORITY < < IOSAPIC_DELIVERY_SHIFT ) ;
# else
return irq - > delivery_mode = = APIC_DM_LOWEST ;
# endif
}
2009-02-11 11:03:37 +03:00
2009-03-05 17:35:04 +03:00
int kvm_irq_delivery_to_apic ( struct kvm * kvm , struct kvm_lapic * src ,
struct kvm_lapic_irq * irq )
{
int i , r = - 1 ;
struct kvm_vcpu * vcpu , * lowest = NULL ;
if ( irq - > dest_mode = = 0 & & irq - > dest_id = = 0xff & &
kvm_is_dm_lowest_prio ( irq ) )
2009-03-05 17:34:54 +03:00
printk ( KERN_INFO " kvm: apic: phys broadcast and lowest prio \n " ) ;
2009-06-09 16:56:29 +04:00
kvm_for_each_vcpu ( i , vcpu , kvm ) {
if ( ! kvm_apic_present ( vcpu ) )
2009-03-05 17:34:54 +03:00
continue ;
2009-03-05 17:35:04 +03:00
if ( ! kvm_apic_match_dest ( vcpu , src , irq - > shorthand ,
irq - > dest_id , irq - > dest_mode ) )
2009-03-05 17:34:54 +03:00
continue ;
2009-03-05 17:35:04 +03:00
if ( ! kvm_is_dm_lowest_prio ( irq ) ) {
if ( r < 0 )
r = 0 ;
r + = kvm_apic_set_irq ( vcpu , irq ) ;
2009-03-05 17:34:59 +03:00
} else {
2009-03-05 17:35:04 +03:00
if ( ! lowest )
lowest = vcpu ;
else if ( kvm_apic_compare_prio ( vcpu , lowest ) < 0 )
lowest = vcpu ;
2009-03-05 17:34:59 +03:00
}
2009-03-05 17:34:54 +03:00
}
2009-03-05 17:35:04 +03:00
if ( lowest )
r = kvm_apic_set_irq ( lowest , irq ) ;
return r ;
2009-02-11 11:03:37 +03:00
}
2009-02-04 18:28:14 +03:00
static int kvm_set_msi ( struct kvm_kernel_irq_routing_entry * e ,
2009-08-24 12:54:19 +04:00
struct kvm * kvm , int irq_source_id , int level )
2009-02-10 08:57:06 +03:00
{
2009-03-05 17:35:04 +03:00
struct kvm_lapic_irq irq ;
2009-02-10 08:57:06 +03:00
2009-08-24 12:54:19 +04:00
if ( ! level )
return - 1 ;
2009-07-07 17:00:57 +04:00
trace_kvm_msi_set_irq ( e - > msi . address_lo , e - > msi . data ) ;
2009-03-05 17:35:04 +03:00
irq . dest_id = ( e - > msi . address_lo &
2009-02-11 11:03:37 +03:00
MSI_ADDR_DEST_ID_MASK ) > > MSI_ADDR_DEST_ID_SHIFT ;
2009-03-05 17:35:04 +03:00
irq . vector = ( e - > msi . data &
2009-02-11 11:03:37 +03:00
MSI_DATA_VECTOR_MASK ) > > MSI_DATA_VECTOR_SHIFT ;
2009-03-05 17:35:04 +03:00
irq . dest_mode = ( 1 < < MSI_ADDR_DEST_MODE_SHIFT ) & e - > msi . address_lo ;
irq . trig_mode = ( 1 < < MSI_DATA_TRIGGER_SHIFT ) & e - > msi . data ;
irq . delivery_mode = e - > msi . data & 0x700 ;
irq . level = 1 ;
irq . shorthand = 0 ;
2009-02-11 11:03:37 +03:00
/* TODO Deal with RH bit of MSI message address */
2009-03-05 17:35:04 +03:00
return kvm_irq_delivery_to_apic ( kvm , NULL , & irq ) ;
2009-02-10 08:57:06 +03:00
}
2009-08-24 12:54:26 +04:00
/*
2009-02-04 18:28:14 +03:00
* Return value :
* < 0 Interrupt was ignored ( masked or not delivered for other reasons )
* = 0 Interrupt was coalesced ( previous irq is still pending )
* > 0 Number of CPUs interrupt was delivered to
*/
2009-08-24 12:54:20 +04:00
int kvm_set_irq ( struct kvm * kvm , int irq_source_id , u32 irq , int level )
2008-10-06 09:48:45 +04:00
{
2009-08-24 12:54:25 +04:00
struct kvm_kernel_irq_routing_entry * e , irq_set [ KVM_NR_IRQCHIPS ] ;
int ret = - 1 , i = 0 ;
2009-08-24 12:54:20 +04:00
struct kvm_irq_routing_table * irq_rt ;
struct hlist_node * n ;
2009-02-10 08:57:06 +03:00
2009-07-01 13:09:41 +04:00
trace_kvm_set_irq ( irq , level , irq_source_id ) ;
2009-06-17 16:22:14 +04:00
2008-10-06 09:48:45 +04:00
/* Not possible to detect if the guest uses the PIC or the
* IOAPIC . So set the bit in both . The guest will ignore
* writes to the unused one .
*/
2009-08-24 12:54:22 +04:00
rcu_read_lock ( ) ;
irq_rt = rcu_dereference ( kvm - > irq_routing ) ;
2009-08-24 12:54:20 +04:00
if ( irq < irq_rt - > nr_rt_entries )
2009-08-24 12:54:25 +04:00
hlist_for_each_entry ( e , n , & irq_rt - > map [ irq ] , link )
irq_set [ i + + ] = * e ;
2009-08-24 12:54:22 +04:00
rcu_read_unlock ( ) ;
2009-08-24 12:54:25 +04:00
while ( i - - ) {
int r ;
r = irq_set [ i ] . set ( & irq_set [ i ] , kvm , irq_source_id , level ) ;
if ( r < 0 )
continue ;
ret = r + ( ( ret < 0 ) ? 0 : ret ) ;
}
2009-02-04 18:28:14 +03:00
return ret ;
2008-10-06 09:48:45 +04:00
}
2009-01-27 20:12:38 +03:00
void kvm_notify_acked_irq ( struct kvm * kvm , unsigned irqchip , unsigned pin )
2008-10-06 09:48:45 +04:00
{
struct kvm_irq_ack_notifier * kian ;
struct hlist_node * n ;
2009-08-24 12:54:21 +04:00
int gsi ;
2009-01-27 20:12:38 +03:00
2009-06-17 16:22:14 +04:00
trace_kvm_ack_irq ( irqchip , pin ) ;
2009-08-24 12:54:22 +04:00
rcu_read_lock ( ) ;
gsi = rcu_dereference ( kvm - > irq_routing ) - > chip [ irqchip ] [ pin ] ;
2009-08-24 12:54:21 +04:00
if ( gsi ! = - 1 )
2009-08-24 12:54:24 +04:00
hlist_for_each_entry_rcu ( kian , n , & kvm - > irq_ack_notifier_list ,
link )
2009-08-24 12:54:21 +04:00
if ( kian - > gsi = = gsi )
kian - > irq_acked ( kian ) ;
2009-08-24 12:54:24 +04:00
rcu_read_unlock ( ) ;
2008-10-06 09:48:45 +04:00
}
void kvm_register_irq_ack_notifier ( struct kvm * kvm ,
struct kvm_irq_ack_notifier * kian )
{
2009-06-04 22:08:24 +04:00
mutex_lock ( & kvm - > irq_lock ) ;
2009-08-24 12:54:24 +04:00
hlist_add_head_rcu ( & kian - > link , & kvm - > irq_ack_notifier_list ) ;
2009-06-04 22:08:24 +04:00
mutex_unlock ( & kvm - > irq_lock ) ;
2008-10-06 09:48:45 +04:00
}
2009-06-04 22:08:24 +04:00
void kvm_unregister_irq_ack_notifier ( struct kvm * kvm ,
struct kvm_irq_ack_notifier * kian )
2008-10-06 09:48:45 +04:00
{
2009-06-04 22:08:24 +04:00
mutex_lock ( & kvm - > irq_lock ) ;
2009-08-24 12:54:24 +04:00
hlist_del_init_rcu ( & kian - > link ) ;
2009-06-04 22:08:24 +04:00
mutex_unlock ( & kvm - > irq_lock ) ;
2009-08-24 12:54:24 +04:00
synchronize_rcu ( ) ;
2008-10-06 09:48:45 +04:00
}
2008-10-15 16:15:06 +04:00
int kvm_request_irq_source_id ( struct kvm * kvm )
{
unsigned long * bitmap = & kvm - > arch . irq_sources_bitmap ;
2009-06-04 22:08:24 +04:00
int irq_source_id ;
mutex_lock ( & kvm - > irq_lock ) ;
2009-10-18 05:47:23 +04:00
irq_source_id = find_first_zero_bit ( bitmap , BITS_PER_LONG ) ;
2008-12-01 16:57:48 +03:00
2009-10-18 05:47:23 +04:00
if ( irq_source_id > = BITS_PER_LONG ) {
2008-10-15 16:15:06 +04:00
printk ( KERN_WARNING " kvm: exhaust allocatable IRQ sources! \n " ) ;
2009-09-25 11:33:38 +04:00
irq_source_id = - EFAULT ;
goto unlock ;
2008-12-01 16:57:48 +03:00
}
ASSERT ( irq_source_id ! = KVM_USERSPACE_IRQ_SOURCE_ID ) ;
set_bit ( irq_source_id , bitmap ) ;
2009-09-25 11:33:38 +04:00
unlock :
2009-06-04 22:08:24 +04:00
mutex_unlock ( & kvm - > irq_lock ) ;
2008-12-01 16:57:48 +03:00
2008-10-15 16:15:06 +04:00
return irq_source_id ;
}
void kvm_free_irq_source_id ( struct kvm * kvm , int irq_source_id )
{
int i ;
2008-12-01 16:57:48 +03:00
ASSERT ( irq_source_id ! = KVM_USERSPACE_IRQ_SOURCE_ID ) ;
2009-06-04 22:08:24 +04:00
mutex_lock ( & kvm - > irq_lock ) ;
2008-12-01 16:57:48 +03:00
if ( irq_source_id < 0 | |
2009-10-18 05:47:23 +04:00
irq_source_id > = BITS_PER_LONG ) {
2008-10-15 16:15:06 +04:00
printk ( KERN_ERR " kvm: IRQ source ID out of range! \n " ) ;
2009-09-25 11:33:38 +04:00
goto unlock ;
2008-10-15 16:15:06 +04:00
}
2009-10-29 18:44:17 +03:00
clear_bit ( irq_source_id , & kvm - > arch . irq_sources_bitmap ) ;
if ( ! irqchip_in_kernel ( kvm ) )
goto unlock ;
2009-08-24 12:54:19 +04:00
for ( i = 0 ; i < KVM_IOAPIC_NUM_PINS ; i + + ) {
clear_bit ( irq_source_id , & kvm - > arch . vioapic - > irq_states [ i ] ) ;
if ( i > = 16 )
continue ;
# ifdef CONFIG_X86
clear_bit ( irq_source_id , & pic_irqchip ( kvm ) - > irq_states [ i ] ) ;
# endif
}
2009-09-25 11:33:38 +04:00
unlock :
2009-06-04 22:08:24 +04:00
mutex_unlock ( & kvm - > irq_lock ) ;
2008-10-15 16:15:06 +04:00
}
2009-01-04 18:10:50 +03:00
void kvm_register_irq_mask_notifier ( struct kvm * kvm , int irq ,
struct kvm_irq_mask_notifier * kimn )
{
2009-06-04 22:08:24 +04:00
mutex_lock ( & kvm - > irq_lock ) ;
2009-01-04 18:10:50 +03:00
kimn - > irq = irq ;
2009-08-24 12:54:24 +04:00
hlist_add_head_rcu ( & kimn - > link , & kvm - > mask_notifier_list ) ;
2009-06-04 22:08:24 +04:00
mutex_unlock ( & kvm - > irq_lock ) ;
2009-01-04 18:10:50 +03:00
}
void kvm_unregister_irq_mask_notifier ( struct kvm * kvm , int irq ,
struct kvm_irq_mask_notifier * kimn )
{
2009-06-04 22:08:24 +04:00
mutex_lock ( & kvm - > irq_lock ) ;
2009-08-24 12:54:24 +04:00
hlist_del_rcu ( & kimn - > link ) ;
2009-06-04 22:08:24 +04:00
mutex_unlock ( & kvm - > irq_lock ) ;
2009-08-24 12:54:24 +04:00
synchronize_rcu ( ) ;
2009-01-04 18:10:50 +03:00
}
void kvm_fire_mask_notifiers ( struct kvm * kvm , int irq , bool mask )
{
struct kvm_irq_mask_notifier * kimn ;
struct hlist_node * n ;
2009-08-24 12:54:24 +04:00
rcu_read_lock ( ) ;
hlist_for_each_entry_rcu ( kimn , n , & kvm - > mask_notifier_list , link )
2009-01-04 18:10:50 +03:00
if ( kimn - > irq = = irq )
kimn - > func ( kimn , mask ) ;
2009-08-24 12:54:24 +04:00
rcu_read_unlock ( ) ;
2009-01-04 18:10:50 +03:00
}
2008-11-19 14:58:46 +03:00
void kvm_free_irq_routing ( struct kvm * kvm )
{
2009-08-24 12:54:22 +04:00
/* Called only during vm destruction. Nobody can use the pointer
at this stage */
2009-08-24 12:54:20 +04:00
kfree ( kvm - > irq_routing ) ;
2008-11-19 14:58:46 +03:00
}
2009-08-24 12:54:20 +04:00
static int setup_routing_entry ( struct kvm_irq_routing_table * rt ,
struct kvm_kernel_irq_routing_entry * e ,
2009-02-21 04:19:13 +03:00
const struct kvm_irq_routing_entry * ue )
2008-11-19 14:58:46 +03:00
{
int r = - EINVAL ;
int delta ;
2010-01-12 21:42:09 +03:00
unsigned max_pin ;
2009-08-24 12:54:20 +04:00
struct kvm_kernel_irq_routing_entry * ei ;
struct hlist_node * n ;
/*
* Do not allow GSI to be mapped to the same irqchip more than once .
* Allow only one to one mapping between GSI and MSI .
*/
hlist_for_each_entry ( ei , n , & rt - > map [ ue - > gsi ] , link )
if ( ei - > type = = KVM_IRQ_ROUTING_MSI | |
ue - > u . irqchip . irqchip = = ei - > irqchip . irqchip )
return r ;
2008-11-19 14:58:46 +03:00
e - > gsi = ue - > gsi ;
2009-07-26 18:10:01 +04:00
e - > type = ue - > type ;
2008-11-19 14:58:46 +03:00
switch ( ue - > type ) {
case KVM_IRQ_ROUTING_IRQCHIP :
delta = 0 ;
switch ( ue - > u . irqchip . irqchip ) {
case KVM_IRQCHIP_PIC_MASTER :
e - > set = kvm_set_pic_irq ;
2010-01-12 21:42:09 +03:00
max_pin = 16 ;
2008-11-19 14:58:46 +03:00
break ;
case KVM_IRQCHIP_PIC_SLAVE :
2009-02-04 18:28:14 +03:00
e - > set = kvm_set_pic_irq ;
2010-01-12 21:42:09 +03:00
max_pin = 16 ;
2008-11-19 14:58:46 +03:00
delta = 8 ;
break ;
case KVM_IRQCHIP_IOAPIC :
2010-01-12 21:42:09 +03:00
max_pin = KVM_IOAPIC_NUM_PINS ;
2009-05-02 01:15:43 +04:00
e - > set = kvm_set_ioapic_irq ;
2008-11-19 14:58:46 +03:00
break ;
default :
goto out ;
}
e - > irqchip . irqchip = ue - > u . irqchip . irqchip ;
e - > irqchip . pin = ue - > u . irqchip . pin + delta ;
2010-01-12 21:42:09 +03:00
if ( e - > irqchip . pin > = max_pin )
2009-08-24 12:54:21 +04:00
goto out ;
rt - > chip [ ue - > u . irqchip . irqchip ] [ e - > irqchip . pin ] = ue - > gsi ;
2008-11-19 14:58:46 +03:00
break ;
2009-02-10 08:57:06 +03:00
case KVM_IRQ_ROUTING_MSI :
e - > set = kvm_set_msi ;
e - > msi . address_lo = ue - > u . msi . address_lo ;
e - > msi . address_hi = ue - > u . msi . address_hi ;
e - > msi . data = ue - > u . msi . data ;
break ;
2008-11-19 14:58:46 +03:00
default :
goto out ;
}
2009-08-24 12:54:20 +04:00
hlist_add_head ( & e - > link , & rt - > map [ e - > gsi ] ) ;
2008-11-19 14:58:46 +03:00
r = 0 ;
out :
return r ;
}
int kvm_set_irq_routing ( struct kvm * kvm ,
const struct kvm_irq_routing_entry * ue ,
unsigned nr ,
unsigned flags )
{
2009-08-24 12:54:20 +04:00
struct kvm_irq_routing_table * new , * old ;
2009-08-24 12:54:21 +04:00
u32 i , j , nr_rt_entries = 0 ;
2008-11-19 14:58:46 +03:00
int r ;
2009-08-24 12:54:20 +04:00
for ( i = 0 ; i < nr ; + + i ) {
if ( ue [ i ] . gsi > = KVM_MAX_IRQ_ROUTES )
return - EINVAL ;
nr_rt_entries = max ( nr_rt_entries , ue [ i ] . gsi ) ;
}
nr_rt_entries + = 1 ;
new = kzalloc ( sizeof ( * new ) + ( nr_rt_entries * sizeof ( struct hlist_head ) )
+ ( nr * sizeof ( struct kvm_kernel_irq_routing_entry ) ) ,
GFP_KERNEL ) ;
if ( ! new )
return - ENOMEM ;
new - > rt_entries = ( void * ) & new - > map [ nr_rt_entries ] ;
new - > nr_rt_entries = nr_rt_entries ;
2009-08-24 12:54:21 +04:00
for ( i = 0 ; i < 3 ; i + + )
for ( j = 0 ; j < KVM_IOAPIC_NUM_PINS ; j + + )
new - > chip [ i ] [ j ] = - 1 ;
2009-08-24 12:54:20 +04:00
2008-11-19 14:58:46 +03:00
for ( i = 0 ; i < nr ; + + i ) {
r = - EINVAL ;
if ( ue - > flags )
goto out ;
2009-08-24 12:54:20 +04:00
r = setup_routing_entry ( new , & new - > rt_entries [ i ] , ue ) ;
2008-11-19 14:58:46 +03:00
if ( r )
goto out ;
+ + ue ;
}
2009-06-04 22:08:24 +04:00
mutex_lock ( & kvm - > irq_lock ) ;
2009-08-24 12:54:20 +04:00
old = kvm - > irq_routing ;
2009-08-24 12:54:22 +04:00
rcu_assign_pointer ( kvm - > irq_routing , new ) ;
2009-06-04 22:08:24 +04:00
mutex_unlock ( & kvm - > irq_lock ) ;
2009-08-24 12:54:22 +04:00
synchronize_rcu ( ) ;
2008-11-19 14:58:46 +03:00
2009-08-24 12:54:20 +04:00
new = old ;
2008-11-19 14:58:46 +03:00
r = 0 ;
out :
2009-08-24 12:54:20 +04:00
kfree ( new ) ;
2008-11-19 14:58:46 +03:00
return r ;
}
# define IOAPIC_ROUTING_ENTRY(irq) \
{ . gsi = irq , . type = KVM_IRQ_ROUTING_IRQCHIP , \
. u . irqchip . irqchip = KVM_IRQCHIP_IOAPIC , . u . irqchip . pin = ( irq ) }
# define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
# ifdef CONFIG_X86
# define PIC_ROUTING_ENTRY(irq) \
{ . gsi = irq , . type = KVM_IRQ_ROUTING_IRQCHIP , \
. u . irqchip . irqchip = SELECT_PIC ( irq ) , . u . irqchip . pin = ( irq ) % 8 }
# define ROUTING_ENTRY2(irq) \
IOAPIC_ROUTING_ENTRY ( irq ) , PIC_ROUTING_ENTRY ( irq )
# else
# define ROUTING_ENTRY2(irq) \
IOAPIC_ROUTING_ENTRY ( irq )
# endif
static const struct kvm_irq_routing_entry default_routing [ ] = {
ROUTING_ENTRY2 ( 0 ) , ROUTING_ENTRY2 ( 1 ) ,
ROUTING_ENTRY2 ( 2 ) , ROUTING_ENTRY2 ( 3 ) ,
ROUTING_ENTRY2 ( 4 ) , ROUTING_ENTRY2 ( 5 ) ,
ROUTING_ENTRY2 ( 6 ) , ROUTING_ENTRY2 ( 7 ) ,
ROUTING_ENTRY2 ( 8 ) , ROUTING_ENTRY2 ( 9 ) ,
ROUTING_ENTRY2 ( 10 ) , ROUTING_ENTRY2 ( 11 ) ,
ROUTING_ENTRY2 ( 12 ) , ROUTING_ENTRY2 ( 13 ) ,
ROUTING_ENTRY2 ( 14 ) , ROUTING_ENTRY2 ( 15 ) ,
ROUTING_ENTRY1 ( 16 ) , ROUTING_ENTRY1 ( 17 ) ,
ROUTING_ENTRY1 ( 18 ) , ROUTING_ENTRY1 ( 19 ) ,
ROUTING_ENTRY1 ( 20 ) , ROUTING_ENTRY1 ( 21 ) ,
ROUTING_ENTRY1 ( 22 ) , ROUTING_ENTRY1 ( 23 ) ,
# ifdef CONFIG_IA64
ROUTING_ENTRY1 ( 24 ) , ROUTING_ENTRY1 ( 25 ) ,
ROUTING_ENTRY1 ( 26 ) , ROUTING_ENTRY1 ( 27 ) ,
ROUTING_ENTRY1 ( 28 ) , ROUTING_ENTRY1 ( 29 ) ,
ROUTING_ENTRY1 ( 30 ) , ROUTING_ENTRY1 ( 31 ) ,
ROUTING_ENTRY1 ( 32 ) , ROUTING_ENTRY1 ( 33 ) ,
ROUTING_ENTRY1 ( 34 ) , ROUTING_ENTRY1 ( 35 ) ,
ROUTING_ENTRY1 ( 36 ) , ROUTING_ENTRY1 ( 37 ) ,
ROUTING_ENTRY1 ( 38 ) , ROUTING_ENTRY1 ( 39 ) ,
ROUTING_ENTRY1 ( 40 ) , ROUTING_ENTRY1 ( 41 ) ,
ROUTING_ENTRY1 ( 42 ) , ROUTING_ENTRY1 ( 43 ) ,
ROUTING_ENTRY1 ( 44 ) , ROUTING_ENTRY1 ( 45 ) ,
ROUTING_ENTRY1 ( 46 ) , ROUTING_ENTRY1 ( 47 ) ,
# endif
} ;
int kvm_setup_default_irq_routing ( struct kvm * kvm )
{
return kvm_set_irq_routing ( kvm , default_routing ,
ARRAY_SIZE ( default_routing ) , 0 ) ;
}