2014-12-07 19:41:16 +03:00
/*
2014-12-08 13:31:02 +03:00
* Virtio PCI driver - legacy device support
2014-12-07 19:41:16 +03:00
*
* This module allows virtio devices to be used over a virtual PCI device .
* This can be used with QEMU based VMMs like KVM or Xen .
*
* Copyright IBM Corp . 2007
2014-12-08 13:31:02 +03:00
* Copyright Red Hat , Inc . 2014
2014-12-07 19:41:16 +03:00
*
* Authors :
* Anthony Liguori < aliguori @ us . ibm . com >
2014-12-08 13:31:02 +03:00
* Rusty Russell < rusty @ rustcorp . com . au >
* Michael S . Tsirkin < mst @ redhat . com >
2014-12-07 19:41:16 +03:00
*
* This work is licensed under the terms of the GNU GPL , version 2 or later .
* See the COPYING file in the top - level directory .
*
*/
2014-12-08 17:39:45 +03:00
# include "virtio_pci_common.h"
2014-12-07 19:41:16 +03:00
/* virtio config->get_features() implementation */
static u64 vp_get_features ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
/* When someone needs more than 32 feature bits, we'll need to
* steal a bit to indicate that the rest are somewhere else . */
return ioread32 ( vp_dev - > ioaddr + VIRTIO_PCI_HOST_FEATURES ) ;
}
/* virtio config->finalize_features() implementation */
static int vp_finalize_features ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
/* Give virtio_ring a chance to accept features. */
vring_transport_features ( vdev ) ;
/* Make sure we don't have any features > 32 bits! */
BUG_ON ( ( u32 ) vdev - > features ! = vdev - > features ) ;
/* We only support 32 feature bits. */
iowrite32 ( vdev - > features , vp_dev - > ioaddr + VIRTIO_PCI_GUEST_FEATURES ) ;
return 0 ;
}
/* virtio config->get() implementation */
static void vp_get ( struct virtio_device * vdev , unsigned offset ,
void * buf , unsigned len )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
void __iomem * ioaddr = vp_dev - > ioaddr +
VIRTIO_PCI_CONFIG ( vp_dev ) + offset ;
u8 * ptr = buf ;
int i ;
for ( i = 0 ; i < len ; i + + )
ptr [ i ] = ioread8 ( ioaddr + i ) ;
}
/* the config->set() implementation. it's symmetric to the config->get()
* implementation */
static void vp_set ( struct virtio_device * vdev , unsigned offset ,
const void * buf , unsigned len )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
void __iomem * ioaddr = vp_dev - > ioaddr +
VIRTIO_PCI_CONFIG ( vp_dev ) + offset ;
const u8 * ptr = buf ;
int i ;
for ( i = 0 ; i < len ; i + + )
iowrite8 ( ptr [ i ] , ioaddr + i ) ;
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
return ioread8 ( vp_dev - > ioaddr + VIRTIO_PCI_STATUS ) ;
}
static void vp_set_status ( struct virtio_device * vdev , u8 status )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
/* We should never be setting status to 0. */
BUG_ON ( status = = 0 ) ;
iowrite8 ( status , vp_dev - > ioaddr + VIRTIO_PCI_STATUS ) ;
}
static void vp_reset ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
/* 0 status means a reset. */
iowrite8 ( 0 , vp_dev - > ioaddr + VIRTIO_PCI_STATUS ) ;
/* Flush out the status write, and flush in device writes,
* including MSi - X interrupts , if any . */
ioread8 ( vp_dev - > ioaddr + VIRTIO_PCI_STATUS ) ;
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors ( vdev ) ;
}
static u16 vp_config_vector ( struct virtio_pci_device * vp_dev , u16 vector )
{
/* Setup the vector used for configuration events */
iowrite16 ( vector , vp_dev - > ioaddr + VIRTIO_MSI_CONFIG_VECTOR ) ;
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
return ioread16 ( vp_dev - > ioaddr + VIRTIO_MSI_CONFIG_VECTOR ) ;
}
static struct virtqueue * setup_vq ( struct virtio_pci_device * vp_dev ,
struct virtio_pci_vq_info * info ,
unsigned index ,
void ( * callback ) ( struct virtqueue * vq ) ,
const char * name ,
u16 msix_vec )
{
struct virtqueue * vq ;
unsigned long size ;
u16 num ;
int err ;
/* Select the queue we're interested in */
iowrite16 ( index , vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_SEL ) ;
/* Check if queue is either not available or already active. */
num = ioread16 ( vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_NUM ) ;
if ( ! num | | ioread32 ( vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_PFN ) )
return ERR_PTR ( - ENOENT ) ;
info - > num = num ;
info - > msix_vector = msix_vec ;
size = PAGE_ALIGN ( vring_size ( num , VIRTIO_PCI_VRING_ALIGN ) ) ;
info - > queue = alloc_pages_exact ( size , GFP_KERNEL | __GFP_ZERO ) ;
if ( info - > queue = = NULL )
return ERR_PTR ( - ENOMEM ) ;
/* activate the queue */
iowrite32 ( virt_to_phys ( info - > queue ) > > VIRTIO_PCI_QUEUE_ADDR_SHIFT ,
vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_PFN ) ;
/* create the vring */
vq = vring_new_virtqueue ( index , info - > num ,
VIRTIO_PCI_VRING_ALIGN , & vp_dev - > vdev ,
true , info - > queue , vp_notify , callback , name ) ;
if ( ! vq ) {
err = - ENOMEM ;
goto out_activate_queue ;
}
vq - > priv = ( void __force * ) vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_NOTIFY ;
if ( msix_vec ! = VIRTIO_MSI_NO_VECTOR ) {
iowrite16 ( msix_vec , vp_dev - > ioaddr + VIRTIO_MSI_QUEUE_VECTOR ) ;
msix_vec = ioread16 ( vp_dev - > ioaddr + VIRTIO_MSI_QUEUE_VECTOR ) ;
if ( msix_vec = = VIRTIO_MSI_NO_VECTOR ) {
err = - EBUSY ;
goto out_assign ;
}
}
return vq ;
out_assign :
vring_del_virtqueue ( vq ) ;
out_activate_queue :
iowrite32 ( 0 , vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_PFN ) ;
free_pages_exact ( info - > queue , size ) ;
return ERR_PTR ( err ) ;
}
static void del_vq ( struct virtio_pci_vq_info * info )
{
struct virtqueue * vq = info - > vq ;
struct virtio_pci_device * vp_dev = to_vp_device ( vq - > vdev ) ;
unsigned long size ;
iowrite16 ( vq - > index , vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_SEL ) ;
if ( vp_dev - > msix_enabled ) {
iowrite16 ( VIRTIO_MSI_NO_VECTOR ,
vp_dev - > ioaddr + VIRTIO_MSI_QUEUE_VECTOR ) ;
/* Flush the write out to device */
ioread8 ( vp_dev - > ioaddr + VIRTIO_PCI_ISR ) ;
}
vring_del_virtqueue ( vq ) ;
/* Select and deactivate the queue */
iowrite32 ( 0 , vp_dev - > ioaddr + VIRTIO_PCI_QUEUE_PFN ) ;
size = PAGE_ALIGN ( vring_size ( info - > num , VIRTIO_PCI_VRING_ALIGN ) ) ;
free_pages_exact ( info - > queue , size ) ;
}
static const struct virtio_config_ops virtio_pci_config_ops = {
. get = vp_get ,
. set = vp_set ,
. get_status = vp_get_status ,
. set_status = vp_set_status ,
. reset = vp_reset ,
. find_vqs = vp_find_vqs ,
. del_vqs = vp_del_vqs ,
. get_features = vp_get_features ,
. finalize_features = vp_finalize_features ,
. bus_name = vp_bus_name ,
. set_vq_affinity = vp_set_vq_affinity ,
} ;
2015-01-04 14:25:30 +03:00
static void virtio_pci_release_dev ( struct device * _d )
{
2015-01-02 22:47:40 +03:00
struct virtio_device * vdev = dev_to_virtio ( _d ) ;
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2015-01-04 18:28:27 +03:00
/* As struct device is a kobject, it's not safe to
* free the memory ( including the reference counter itself )
* until it ' s release callback . */
2015-01-02 22:47:40 +03:00
kfree ( vp_dev ) ;
2015-01-04 14:25:30 +03:00
}
2014-12-07 19:41:16 +03:00
/* the PCI probing function */
2014-12-11 22:47:49 +03:00
int virtio_pci_legacy_probe ( struct pci_dev * pci_dev ,
2014-12-07 19:41:16 +03:00
const struct pci_device_id * id )
{
struct virtio_pci_device * vp_dev ;
int err ;
/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
if ( pci_dev - > device < 0x1000 | | pci_dev - > device > 0x103f )
return - ENODEV ;
if ( pci_dev - > revision ! = VIRTIO_PCI_ABI_VERSION ) {
printk ( KERN_ERR " virtio_pci: expected ABI version %d, got %d \n " ,
VIRTIO_PCI_ABI_VERSION , pci_dev - > revision ) ;
return - ENODEV ;
}
/* allocate our structure and fill it out */
vp_dev = kzalloc ( sizeof ( struct virtio_pci_device ) , GFP_KERNEL ) ;
if ( vp_dev = = NULL )
return - ENOMEM ;
vp_dev - > vdev . dev . parent = & pci_dev - > dev ;
vp_dev - > vdev . dev . release = virtio_pci_release_dev ;
vp_dev - > vdev . config = & virtio_pci_config_ops ;
vp_dev - > pci_dev = pci_dev ;
INIT_LIST_HEAD ( & vp_dev - > virtqueues ) ;
spin_lock_init ( & vp_dev - > lock ) ;
/* Disable MSI/MSIX to bring device to a known good state. */
pci_msi_off ( pci_dev ) ;
/* enable the device */
err = pci_enable_device ( pci_dev ) ;
if ( err )
goto out ;
err = pci_request_regions ( pci_dev , " virtio-pci " ) ;
if ( err )
goto out_enable_device ;
vp_dev - > ioaddr = pci_iomap ( pci_dev , 0 , 0 ) ;
if ( vp_dev - > ioaddr = = NULL ) {
err = - ENOMEM ;
goto out_req_regions ;
}
vp_dev - > isr = vp_dev - > ioaddr + VIRTIO_PCI_ISR ;
pci_set_drvdata ( pci_dev , vp_dev ) ;
pci_set_master ( pci_dev ) ;
/* we use the subsystem vendor/device id as the virtio vendor/device
* id . this allows us to use the same PCI vendor / device id for all
* virtio devices and to identify the particular virtio driver by
* the subsystem ids */
vp_dev - > vdev . id . vendor = pci_dev - > subsystem_vendor ;
vp_dev - > vdev . id . device = pci_dev - > subsystem_device ;
vp_dev - > config_vector = vp_config_vector ;
vp_dev - > setup_vq = setup_vq ;
vp_dev - > del_vq = del_vq ;
/* finally register the virtio device */
err = register_virtio_device ( & vp_dev - > vdev ) ;
if ( err )
goto out_set_drvdata ;
return 0 ;
out_set_drvdata :
pci_iounmap ( pci_dev , vp_dev - > ioaddr ) ;
out_req_regions :
pci_release_regions ( pci_dev ) ;
out_enable_device :
pci_disable_device ( pci_dev ) ;
out :
kfree ( vp_dev ) ;
return err ;
}
2014-12-11 22:47:49 +03:00
void virtio_pci_legacy_remove ( struct pci_dev * pci_dev )
2014-12-07 19:41:16 +03:00
{
struct virtio_pci_device * vp_dev = pci_get_drvdata ( pci_dev ) ;
unregister_virtio_device ( & vp_dev - > vdev ) ;
vp_del_vqs ( & vp_dev - > vdev ) ;
pci_iounmap ( pci_dev , vp_dev - > ioaddr ) ;
pci_release_regions ( pci_dev ) ;
pci_disable_device ( pci_dev ) ;
}