2007-11-13 06:30:26 +03:00
/*
2014-12-08 13:31:02 +03:00
* Virtio PCI driver - common functionality for all device versions
2007-11-13 06:30:26 +03:00
*
* This module allows virtio devices to be used over a virtual PCI device .
* This can be used with QEMU based VMMs like KVM or Xen .
*
* Copyright IBM Corp . 2007
2014-12-08 13:31:02 +03:00
* Copyright Red Hat , Inc . 2014
2007-11-13 06:30:26 +03:00
*
* Authors :
* Anthony Liguori < aliguori @ us . ibm . com >
2014-12-08 13:31:02 +03:00
* Rusty Russell < rusty @ rustcorp . com . au >
* Michael S . Tsirkin < mst @ redhat . com >
2007-11-13 06:30:26 +03:00
*
* This work is licensed under the terms of the GNU GPL , version 2 or later .
* See the COPYING file in the top - level directory .
*
*/
2014-12-08 17:39:45 +03:00
# include "virtio_pci_common.h"
2007-11-13 06:30:26 +03:00
2015-01-15 18:54:13 +03:00
static bool force_legacy = false ;
# if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
module_param ( force_legacy , bool , 0444 ) ;
MODULE_PARM_DESC ( force_legacy ,
" Force legacy mode for transitional virtio 1 devices " ) ;
# endif
2011-11-17 19:41:15 +04:00
/* wait for pending irq handlers */
2014-12-07 19:41:16 +03:00
void vp_synchronize_vectors ( struct virtio_device * vdev )
2011-11-17 19:41:15 +04:00
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
int i ;
if ( vp_dev - > intx_enabled )
synchronize_irq ( vp_dev - > pci_dev - > irq ) ;
for ( i = 0 ; i < vp_dev - > msix_vectors ; + + i )
synchronize_irq ( vp_dev - > msix_entries [ i ] . vector ) ;
}
2007-11-13 06:30:26 +03:00
/* the notify function used when creating a virt queue */
2014-12-07 19:41:16 +03:00
bool vp_notify ( struct virtqueue * vq )
2007-11-13 06:30:26 +03:00
{
/* we write the queue's selector into the notification register to
* signal the other end */
2014-12-03 19:01:58 +03:00
iowrite16 ( vq - > index , ( void __iomem * ) vq - > priv ) ;
2013-10-29 03:08:50 +04:00
return true ;
2007-11-13 06:30:26 +03:00
}
2009-05-14 14:55:31 +04:00
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed ( int irq , void * opaque )
{
struct virtio_pci_device * vp_dev = opaque ;
2014-10-14 04:10:34 +04:00
virtio_config_changed ( & vp_dev - > vdev ) ;
2009-05-14 14:55:31 +04:00
return IRQ_HANDLED ;
}
/* Notify all virtqueues on an interrupt. */
static irqreturn_t vp_vring_interrupt ( int irq , void * opaque )
{
struct virtio_pci_device * vp_dev = opaque ;
struct virtio_pci_vq_info * info ;
irqreturn_t ret = IRQ_NONE ;
unsigned long flags ;
spin_lock_irqsave ( & vp_dev - > lock , flags ) ;
list_for_each_entry ( info , & vp_dev - > virtqueues , node ) {
if ( vring_interrupt ( irq , info - > vq ) = = IRQ_HANDLED )
ret = IRQ_HANDLED ;
}
spin_unlock_irqrestore ( & vp_dev - > lock , flags ) ;
return ret ;
}
2007-11-13 06:30:26 +03:00
/* A small wrapper to also acknowledge the interrupt when it's handled.
* I really need an EIO hook for the vring so I can ack the interrupt once we
* know that we ' ll be handling the IRQ but before we invoke the callback since
* the callback may notify the host which results in the host attempting to
* raise an interrupt that we would then mask once we acknowledged the
* interrupt . */
static irqreturn_t vp_interrupt ( int irq , void * opaque )
{
struct virtio_pci_device * vp_dev = opaque ;
u8 isr ;
/* reading the ISR has the effect of also clearing it so it's very
* important to save off the value . */
2014-12-02 15:35:27 +03:00
isr = ioread8 ( vp_dev - > isr ) ;
2007-11-13 06:30:26 +03:00
/* It's definitely not us if the ISR was not high */
if ( ! isr )
return IRQ_NONE ;
/* Configuration change? Tell driver if it wants to know. */
2009-05-14 14:55:31 +04:00
if ( isr & VIRTIO_PCI_ISR_CONFIG )
vp_config_changed ( irq , opaque ) ;
2007-11-13 06:30:26 +03:00
2009-05-14 14:55:31 +04:00
return vp_vring_interrupt ( irq , opaque ) ;
2007-11-13 06:30:26 +03:00
}
2009-05-14 14:55:41 +04:00
static void vp_free_vectors ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
int i ;
if ( vp_dev - > intx_enabled ) {
free_irq ( vp_dev - > pci_dev - > irq , vp_dev ) ;
vp_dev - > intx_enabled = 0 ;
}
for ( i = 0 ; i < vp_dev - > msix_used_vectors ; + + i )
free_irq ( vp_dev - > msix_entries [ i ] . vector , vp_dev ) ;
2012-08-28 15:54:14 +04:00
for ( i = 0 ; i < vp_dev - > msix_vectors ; i + + )
if ( vp_dev - > msix_affinity_masks [ i ] )
free_cpumask_var ( vp_dev - > msix_affinity_masks [ i ] ) ;
2009-05-14 14:55:41 +04:00
if ( vp_dev - > msix_enabled ) {
/* Disable the vector used for configuration */
2014-12-07 19:18:09 +03:00
vp_dev - > config_vector ( vp_dev , VIRTIO_MSI_NO_VECTOR ) ;
2009-05-14 14:55:41 +04:00
pci_disable_msix ( vp_dev - > pci_dev ) ;
2009-07-23 15:57:37 +04:00
vp_dev - > msix_enabled = 0 ;
2009-05-14 14:55:41 +04:00
}
2009-07-23 15:57:37 +04:00
2013-07-02 10:05:13 +04:00
vp_dev - > msix_vectors = 0 ;
2009-07-23 15:57:37 +04:00
vp_dev - > msix_used_vectors = 0 ;
kfree ( vp_dev - > msix_names ) ;
vp_dev - > msix_names = NULL ;
kfree ( vp_dev - > msix_entries ) ;
vp_dev - > msix_entries = NULL ;
2012-08-28 15:54:14 +04:00
kfree ( vp_dev - > msix_affinity_masks ) ;
vp_dev - > msix_affinity_masks = NULL ;
2009-05-14 14:55:41 +04:00
}
2009-09-24 08:26:29 +04:00
static int vp_request_msix_vectors ( struct virtio_device * vdev , int nvectors ,
bool per_vq_vectors )
2009-05-14 14:55:41 +04:00
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
const char * name = dev_name ( & vp_dev - > vdev . dev ) ;
unsigned i , v ;
int err = - ENOMEM ;
2009-07-26 16:48:08 +04:00
2013-07-02 10:05:13 +04:00
vp_dev - > msix_vectors = nvectors ;
2009-05-14 14:55:41 +04:00
vp_dev - > msix_entries = kmalloc ( nvectors * sizeof * vp_dev - > msix_entries ,
GFP_KERNEL ) ;
if ( ! vp_dev - > msix_entries )
2009-07-23 15:57:37 +04:00
goto error ;
2009-05-14 14:55:41 +04:00
vp_dev - > msix_names = kmalloc ( nvectors * sizeof * vp_dev - > msix_names ,
GFP_KERNEL ) ;
if ( ! vp_dev - > msix_names )
2009-07-23 15:57:37 +04:00
goto error ;
2012-08-28 15:54:14 +04:00
vp_dev - > msix_affinity_masks
= kzalloc ( nvectors * sizeof * vp_dev - > msix_affinity_masks ,
GFP_KERNEL ) ;
if ( ! vp_dev - > msix_affinity_masks )
goto error ;
for ( i = 0 ; i < nvectors ; + + i )
if ( ! alloc_cpumask_var ( & vp_dev - > msix_affinity_masks [ i ] ,
GFP_KERNEL ) )
goto error ;
2009-05-14 14:55:41 +04:00
for ( i = 0 ; i < nvectors ; + + i )
vp_dev - > msix_entries [ i ] . entry = i ;
2014-03-13 04:53:37 +04:00
err = pci_enable_msix_exact ( vp_dev - > pci_dev ,
vp_dev - > msix_entries , nvectors ) ;
2009-07-26 16:48:08 +04:00
if ( err )
goto error ;
vp_dev - > msix_enabled = 1 ;
/* Set the vector used for configuration */
v = vp_dev - > msix_used_vectors ;
snprintf ( vp_dev - > msix_names [ v ] , sizeof * vp_dev - > msix_names ,
" %s-config " , name ) ;
err = request_irq ( vp_dev - > msix_entries [ v ] . vector ,
vp_config_changed , 0 , vp_dev - > msix_names [ v ] ,
vp_dev ) ;
if ( err )
goto error ;
+ + vp_dev - > msix_used_vectors ;
2009-05-14 14:55:41 +04:00
2014-12-07 19:18:09 +03:00
v = vp_dev - > config_vector ( vp_dev , v ) ;
2009-07-26 16:48:08 +04:00
/* Verify we had enough resources to assign the vector */
if ( v = = VIRTIO_MSI_NO_VECTOR ) {
err = - EBUSY ;
goto error ;
2009-05-14 14:55:41 +04:00
}
2009-07-26 16:48:08 +04:00
if ( ! per_vq_vectors ) {
2009-05-14 14:55:41 +04:00
/* Shared vector for all VQs */
v = vp_dev - > msix_used_vectors ;
snprintf ( vp_dev - > msix_names [ v ] , sizeof * vp_dev - > msix_names ,
" %s-virtqueues " , name ) ;
err = request_irq ( vp_dev - > msix_entries [ v ] . vector ,
vp_vring_interrupt , 0 , vp_dev - > msix_names [ v ] ,
vp_dev ) ;
if ( err )
2009-07-23 15:57:37 +04:00
goto error ;
2009-05-14 14:55:41 +04:00
+ + vp_dev - > msix_used_vectors ;
}
return 0 ;
2009-07-23 15:57:37 +04:00
error :
2009-05-14 14:55:41 +04:00
vp_free_vectors ( vdev ) ;
return err ;
}
2009-09-24 08:26:29 +04:00
static int vp_request_intx ( struct virtio_device * vdev )
{
int err ;
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
err = request_irq ( vp_dev - > pci_dev - > irq , vp_interrupt ,
IRQF_SHARED , dev_name ( & vdev - > dev ) , vp_dev ) ;
if ( ! err )
vp_dev - > intx_enabled = 1 ;
return err ;
}
2014-12-07 19:03:49 +03:00
static struct virtqueue * vp_setup_vq ( struct virtio_device * vdev , unsigned index ,
void ( * callback ) ( struct virtqueue * vq ) ,
const char * name ,
u16 msix_vec )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
struct virtio_pci_vq_info * info = kmalloc ( sizeof * info , GFP_KERNEL ) ;
struct virtqueue * vq ;
unsigned long flags ;
/* fill out our structure that represents an active queue */
if ( ! info )
return ERR_PTR ( - ENOMEM ) ;
vq = vp_dev - > setup_vq ( vp_dev , info , index , callback , name , msix_vec ) ;
if ( IS_ERR ( vq ) )
goto out_info ;
info - > vq = vq ;
2011-10-05 09:38:59 +04:00
if ( callback ) {
spin_lock_irqsave ( & vp_dev - > lock , flags ) ;
list_add ( & info - > node , & vp_dev - > virtqueues ) ;
spin_unlock_irqrestore ( & vp_dev - > lock , flags ) ;
} else {
INIT_LIST_HEAD ( & info - > node ) ;
}
2007-11-13 06:30:26 +03:00
2014-12-03 18:50:50 +03:00
vp_dev - > vqs [ index ] = info ;
2007-11-13 06:30:26 +03:00
return vq ;
out_info :
kfree ( info ) ;
2014-12-07 19:03:49 +03:00
return vq ;
2007-11-13 06:30:26 +03:00
}
2014-12-07 19:03:49 +03:00
static void vp_del_vq ( struct virtqueue * vq )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vq - > vdev ) ;
struct virtio_pci_vq_info * info = vp_dev - > vqs [ vq - > index ] ;
unsigned long flags ;
spin_lock_irqsave ( & vp_dev - > lock , flags ) ;
list_del ( & info - > node ) ;
spin_unlock_irqrestore ( & vp_dev - > lock , flags ) ;
vp_dev - > del_vq ( info ) ;
2007-11-13 06:30:26 +03:00
kfree ( info ) ;
}
2009-05-14 14:55:41 +04:00
/* the config->del_vqs() implementation */
2014-12-07 19:41:16 +03:00
void vp_del_vqs ( struct virtio_device * vdev )
2009-06-13 08:16:36 +04:00
{
2009-07-26 16:48:08 +04:00
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2009-06-13 08:16:36 +04:00
struct virtqueue * vq , * n ;
2009-07-26 16:48:08 +04:00
struct virtio_pci_vq_info * info ;
2009-06-13 08:16:36 +04:00
2009-07-26 16:48:08 +04:00
list_for_each_entry_safe ( vq , n , & vdev - > vqs , list ) {
2014-12-03 18:50:50 +03:00
info = vp_dev - > vqs [ vq - > index ] ;
2010-02-25 20:08:55 +03:00
if ( vp_dev - > per_vq_vectors & &
info - > msix_vector ! = VIRTIO_MSI_NO_VECTOR )
2009-09-24 08:26:29 +04:00
free_irq ( vp_dev - > msix_entries [ info - > msix_vector ] . vector ,
vq ) ;
2009-06-13 08:16:36 +04:00
vp_del_vq ( vq ) ;
2009-07-26 16:48:08 +04:00
}
vp_dev - > per_vq_vectors = false ;
2009-05-14 14:55:41 +04:00
vp_free_vectors ( vdev ) ;
2014-12-03 18:50:50 +03:00
kfree ( vp_dev - > vqs ) ;
2015-01-04 18:21:58 +03:00
vp_dev - > vqs = NULL ;
2009-06-13 08:16:36 +04:00
}
2009-07-26 16:48:08 +04:00
static int vp_try_to_find_vqs ( struct virtio_device * vdev , unsigned nvqs ,
struct virtqueue * vqs [ ] ,
vq_callback_t * callbacks [ ] ,
const char * names [ ] ,
2009-09-24 08:26:29 +04:00
bool use_msix ,
2009-07-26 16:48:08 +04:00
bool per_vq_vectors )
2009-06-13 08:16:36 +04:00
{
2009-07-26 16:48:08 +04:00
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2009-09-24 08:26:29 +04:00
u16 msix_vec ;
int i , err , nvectors , allocated_vectors ;
2009-05-14 14:55:41 +04:00
2014-12-03 18:50:50 +03:00
vp_dev - > vqs = kmalloc ( nvqs * sizeof * vp_dev - > vqs , GFP_KERNEL ) ;
if ( ! vp_dev - > vqs )
return - ENOMEM ;
2009-09-24 08:26:29 +04:00
if ( ! use_msix ) {
/* Old style: one normal interrupt for change and all vqs. */
err = vp_request_intx ( vdev ) ;
if ( err )
2014-12-03 18:50:50 +03:00
goto error_find ;
2009-09-24 08:26:29 +04:00
} else {
if ( per_vq_vectors ) {
/* Best option: one for change interrupt, one per vq. */
nvectors = 1 ;
for ( i = 0 ; i < nvqs ; + + i )
if ( callbacks [ i ] )
+ + nvectors ;
} else {
/* Second best: one for change, shared for all vqs. */
nvectors = 2 ;
}
err = vp_request_msix_vectors ( vdev , nvectors , per_vq_vectors ) ;
if ( err )
2014-12-03 18:50:50 +03:00
goto error_find ;
2009-09-24 08:26:29 +04:00
}
2009-06-13 08:16:36 +04:00
2009-07-26 16:48:08 +04:00
vp_dev - > per_vq_vectors = per_vq_vectors ;
allocated_vectors = vp_dev - > msix_used_vectors ;
2009-06-13 08:16:36 +04:00
for ( i = 0 ; i < nvqs ; + + i ) {
2012-09-05 22:47:45 +04:00
if ( ! names [ i ] ) {
vqs [ i ] = NULL ;
continue ;
} else if ( ! callbacks [ i ] | | ! vp_dev - > msix_enabled )
2009-09-24 08:26:29 +04:00
msix_vec = VIRTIO_MSI_NO_VECTOR ;
2009-07-26 16:48:08 +04:00
else if ( vp_dev - > per_vq_vectors )
2009-09-24 08:26:29 +04:00
msix_vec = allocated_vectors + + ;
2009-07-26 16:48:08 +04:00
else
2009-09-24 08:26:29 +04:00
msix_vec = VP_MSIX_VQ_VECTOR ;
2014-12-07 19:03:49 +03:00
vqs [ i ] = vp_setup_vq ( vdev , i , callbacks [ i ] , names [ i ] , msix_vec ) ;
2009-07-26 16:48:08 +04:00
if ( IS_ERR ( vqs [ i ] ) ) {
err = PTR_ERR ( vqs [ i ] ) ;
2009-05-14 14:55:41 +04:00
goto error_find ;
2009-07-26 16:48:08 +04:00
}
2009-10-22 17:06:06 +04:00
if ( ! vp_dev - > per_vq_vectors | | msix_vec = = VIRTIO_MSI_NO_VECTOR )
continue ;
2009-07-26 16:48:08 +04:00
/* allocate per-vq irq if available and necessary */
2009-10-22 17:06:06 +04:00
snprintf ( vp_dev - > msix_names [ msix_vec ] ,
sizeof * vp_dev - > msix_names ,
" %s-%s " ,
dev_name ( & vp_dev - > vdev . dev ) , names [ i ] ) ;
err = request_irq ( vp_dev - > msix_entries [ msix_vec ] . vector ,
vring_interrupt , 0 ,
vp_dev - > msix_names [ msix_vec ] ,
vqs [ i ] ) ;
if ( err ) {
vp_del_vq ( vqs [ i ] ) ;
goto error_find ;
2009-07-26 16:48:08 +04:00
}
2009-06-13 08:16:36 +04:00
}
return 0 ;
2009-05-14 14:55:41 +04:00
error_find :
2009-06-13 08:16:36 +04:00
vp_del_vqs ( vdev ) ;
2009-07-26 16:48:08 +04:00
return err ;
}
/* the config->find_vqs() implementation */
2014-12-07 19:41:16 +03:00
int vp_find_vqs ( struct virtio_device * vdev , unsigned nvqs ,
struct virtqueue * vqs [ ] ,
vq_callback_t * callbacks [ ] ,
const char * names [ ] )
2009-07-26 16:48:08 +04:00
{
2009-09-24 08:26:29 +04:00
int err ;
2009-07-26 16:48:08 +04:00
2009-09-24 08:26:29 +04:00
/* Try MSI-X with one vector per queue. */
err = vp_try_to_find_vqs ( vdev , nvqs , vqs , callbacks , names , true , true ) ;
2009-07-26 16:48:08 +04:00
if ( ! err )
return 0 ;
2009-09-24 08:26:29 +04:00
/* Fallback: MSI-X with one vector for config, one shared for queues. */
2009-07-26 16:48:08 +04:00
err = vp_try_to_find_vqs ( vdev , nvqs , vqs , callbacks , names ,
2009-09-24 08:26:29 +04:00
true , false ) ;
2009-07-26 16:48:08 +04:00
if ( ! err )
return 0 ;
/* Finally fall back to regular interrupts. */
2009-09-24 08:26:29 +04:00
return vp_try_to_find_vqs ( vdev , nvqs , vqs , callbacks , names ,
false , false ) ;
2009-06-13 08:16:36 +04:00
}
2014-12-07 19:41:16 +03:00
const char * vp_bus_name ( struct virtio_device * vdev )
2011-11-14 18:17:08 +04:00
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
return pci_name ( vp_dev - > pci_dev ) ;
}
2012-08-28 15:54:14 +04:00
/* Setup the affinity for a virtqueue:
* - force the affinity for per vq vector
* - OR over all affinities for shared MSI
* - ignore the affinity request if we ' re using INTX
*/
2014-12-07 19:41:16 +03:00
int vp_set_vq_affinity ( struct virtqueue * vq , int cpu )
2012-08-28 15:54:14 +04:00
{
struct virtio_device * vdev = vq - > vdev ;
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2014-12-03 18:50:50 +03:00
struct virtio_pci_vq_info * info = vp_dev - > vqs [ vq - > index ] ;
2012-08-28 15:54:14 +04:00
struct cpumask * mask ;
unsigned int irq ;
if ( ! vq - > callback )
return - EINVAL ;
if ( vp_dev - > msix_enabled ) {
mask = vp_dev - > msix_affinity_masks [ info - > msix_vector ] ;
irq = vp_dev - > msix_entries [ info - > msix_vector ] . vector ;
if ( cpu = = - 1 )
irq_set_affinity_hint ( irq , NULL ) ;
else {
2015-06-04 11:41:44 +03:00
cpumask_clear ( mask ) ;
2012-08-28 15:54:14 +04:00
cpumask_set_cpu ( cpu , mask ) ;
irq_set_affinity_hint ( irq , mask ) ;
}
}
return 0 ;
}
2013-09-09 04:27:12 +04:00
# ifdef CONFIG_PM_SLEEP
2011-12-22 15:28:26 +04:00
static int virtio_pci_freeze ( struct device * dev )
{
struct pci_dev * pci_dev = to_pci_dev ( dev ) ;
struct virtio_pci_device * vp_dev = pci_get_drvdata ( pci_dev ) ;
int ret ;
2014-10-14 04:10:35 +04:00
ret = virtio_device_freeze ( & vp_dev - > vdev ) ;
2011-12-22 15:28:26 +04:00
if ( ! ret )
pci_disable_device ( pci_dev ) ;
return ret ;
}
2012-03-29 11:24:43 +04:00
static int virtio_pci_restore ( struct device * dev )
2011-12-22 15:28:26 +04:00
{
struct pci_dev * pci_dev = to_pci_dev ( dev ) ;
struct virtio_pci_device * vp_dev = pci_get_drvdata ( pci_dev ) ;
int ret ;
ret = pci_enable_device ( pci_dev ) ;
if ( ret )
return ret ;
2012-03-29 11:24:43 +04:00
2011-12-22 15:28:26 +04:00
pci_set_master ( pci_dev ) ;
2014-10-14 04:10:35 +04:00
return virtio_device_restore ( & vp_dev - > vdev ) ;
2011-12-22 15:28:26 +04:00
}
2014-12-11 22:47:49 +03:00
static const struct dev_pm_ops virtio_pci_pm_ops = {
2012-03-29 11:28:05 +04:00
SET_SYSTEM_SLEEP_PM_OPS ( virtio_pci_freeze , virtio_pci_restore )
2011-12-22 15:28:25 +04:00
} ;
2007-11-13 06:30:26 +03:00
# endif
2014-12-11 22:47:49 +03:00
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
static const struct pci_device_id virtio_pci_id_table [ ] = {
{ PCI_DEVICE ( 0x1af4 , PCI_ANY_ID ) } ,
{ 0 }
} ;
MODULE_DEVICE_TABLE ( pci , virtio_pci_id_table ) ;
2015-01-13 12:23:32 +03:00
static void virtio_pci_release_dev ( struct device * _d )
{
struct virtio_device * vdev = dev_to_virtio ( _d ) ;
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
/* As struct device is a kobject, it's not safe to
* free the memory ( including the reference counter itself )
* until it ' s release callback . */
kfree ( vp_dev ) ;
}
2014-12-11 22:47:49 +03:00
static int virtio_pci_probe ( struct pci_dev * pci_dev ,
const struct pci_device_id * id )
{
2015-01-13 12:23:32 +03:00
struct virtio_pci_device * vp_dev ;
int rc ;
/* allocate our structure and fill it out */
vp_dev = kzalloc ( sizeof ( struct virtio_pci_device ) , GFP_KERNEL ) ;
if ( ! vp_dev )
return - ENOMEM ;
pci_set_drvdata ( pci_dev , vp_dev ) ;
vp_dev - > vdev . dev . parent = & pci_dev - > dev ;
vp_dev - > vdev . dev . release = virtio_pci_release_dev ;
vp_dev - > pci_dev = pci_dev ;
INIT_LIST_HEAD ( & vp_dev - > virtqueues ) ;
spin_lock_init ( & vp_dev - > lock ) ;
/* enable the device */
rc = pci_enable_device ( pci_dev ) ;
if ( rc )
goto err_enable_device ;
2015-01-15 18:54:13 +03:00
if ( force_legacy ) {
2014-12-11 14:59:51 +03:00
rc = virtio_pci_legacy_probe ( vp_dev ) ;
2015-01-15 18:54:13 +03:00
/* Also try modern mode if we can't map BAR0 (no IO space). */
if ( rc = = - ENODEV | | rc = = - ENOMEM )
rc = virtio_pci_modern_probe ( vp_dev ) ;
if ( rc )
goto err_probe ;
} else {
rc = virtio_pci_modern_probe ( vp_dev ) ;
if ( rc = = - ENODEV )
rc = virtio_pci_legacy_probe ( vp_dev ) ;
if ( rc )
goto err_probe ;
}
2015-01-13 12:23:32 +03:00
pci_set_master ( pci_dev ) ;
rc = register_virtio_device ( & vp_dev - > vdev ) ;
if ( rc )
goto err_register ;
return 0 ;
err_register :
2014-12-11 14:59:51 +03:00
if ( vp_dev - > ioaddr )
virtio_pci_legacy_remove ( vp_dev ) ;
else
virtio_pci_modern_remove ( vp_dev ) ;
2015-01-13 12:23:32 +03:00
err_probe :
pci_disable_device ( pci_dev ) ;
err_enable_device :
kfree ( vp_dev ) ;
return rc ;
2014-12-11 22:47:49 +03:00
}
static void virtio_pci_remove ( struct pci_dev * pci_dev )
{
2015-01-13 12:23:32 +03:00
struct virtio_pci_device * vp_dev = pci_get_drvdata ( pci_dev ) ;
unregister_virtio_device ( & vp_dev - > vdev ) ;
2014-12-11 14:59:51 +03:00
if ( vp_dev - > ioaddr )
virtio_pci_legacy_remove ( vp_dev ) ;
else
virtio_pci_modern_remove ( vp_dev ) ;
2015-01-13 12:23:32 +03:00
pci_disable_device ( pci_dev ) ;
2014-12-11 22:47:49 +03:00
}
static struct pci_driver virtio_pci_driver = {
. name = " virtio-pci " ,
. id_table = virtio_pci_id_table ,
. probe = virtio_pci_probe ,
. remove = virtio_pci_remove ,
# ifdef CONFIG_PM_SLEEP
. driver . pm = & virtio_pci_pm_ops ,
# endif
} ;
module_pci_driver ( virtio_pci_driver ) ;
2014-12-17 01:54:03 +03:00
MODULE_AUTHOR ( " Anthony Liguori <aliguori@us.ibm.com> " ) ;
MODULE_DESCRIPTION ( " virtio-pci " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( " 1 " ) ;