2014-12-08 17:39:45 +03:00
# ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
# define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
2014-12-07 19:41:16 +03:00
/*
2014-12-08 13:31:02 +03:00
* Virtio PCI driver - APIs for common functionality for all device versions
2014-12-07 19:41:16 +03:00
*
* This module allows virtio devices to be used over a virtual PCI device .
* This can be used with QEMU based VMMs like KVM or Xen .
*
* Copyright IBM Corp . 2007
2014-12-08 13:31:02 +03:00
* Copyright Red Hat , Inc . 2014
2014-12-07 19:41:16 +03:00
*
* Authors :
* Anthony Liguori < aliguori @ us . ibm . com >
2014-12-08 13:31:02 +03:00
* Rusty Russell < rusty @ rustcorp . com . au >
* Michael S . Tsirkin < mst @ redhat . com >
2014-12-07 19:41:16 +03:00
*
* This work is licensed under the terms of the GNU GPL , version 2 or later .
* See the COPYING file in the top - level directory .
*
*/
# include <linux/module.h>
# include <linux/list.h>
# include <linux/pci.h>
# include <linux/slab.h>
# include <linux/interrupt.h>
# include <linux/virtio.h>
# include <linux/virtio_config.h>
# include <linux/virtio_ring.h>
# include <linux/virtio_pci.h>
# include <linux/highmem.h>
# include <linux/spinlock.h>
2017-04-04 21:44:44 +03:00
struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue * vq ;
/* the list node for the virtqueues list */
struct list_head node ;
/* MSI-X vector (or none) */
unsigned msix_vector ;
} ;
2014-12-07 19:41:16 +03:00
/* Our device structure */
struct virtio_pci_device {
struct virtio_device vdev ;
struct pci_dev * pci_dev ;
2014-12-11 14:59:51 +03:00
/* In legacy mode, these two point to within ->legacy. */
/* Where to read and clear interrupt */
u8 __iomem * isr ;
/* Modern only fields */
/* The IO mapping for the PCI config space (non-legacy mode) */
struct virtio_pci_common_cfg __iomem * common ;
/* Device-specific data (non-legacy mode) */
void __iomem * device ;
2015-01-14 19:50:55 +03:00
/* Base of vq notifications (non-legacy mode). */
void __iomem * notify_base ;
2014-12-11 14:59:51 +03:00
/* So we can sanity-check accesses. */
2015-01-14 19:50:55 +03:00
size_t notify_len ;
2014-12-11 14:59:51 +03:00
size_t device_len ;
/* Capability for when we need to map notifications per-vq. */
int notify_map_cap ;
/* Multiply queue_notify_off by this value. (non-legacy mode). */
u32 notify_offset_multiplier ;
2015-06-24 08:54:15 +03:00
int modern_bars ;
2014-12-11 14:59:51 +03:00
/* Legacy only field */
2014-12-07 19:41:16 +03:00
/* the IO mapping for the PCI config space */
void __iomem * ioaddr ;
2017-04-04 21:44:44 +03:00
/* a list of queues so we can dispatch IRQs */
spinlock_t lock ;
struct list_head virtqueues ;
/* array of all queues for house-keeping */
struct virtio_pci_vq_info * * vqs ;
2017-04-04 21:09:20 +03:00
/* MSI-X support */
int msix_enabled ;
2017-04-04 21:15:41 +03:00
int intx_enabled ;
2014-12-07 19:41:16 +03:00
cpumask_var_t * msix_affinity_masks ;
/* Name strings for interrupts. This size should be enough,
* and I ' m too lazy to allocate each name separately . */
char ( * msix_names ) [ 256 ] ;
2017-04-04 21:15:41 +03:00
/* Number of available vectors */
unsigned msix_vectors ;
/* Vectors allocated, excluding per-vq vectors if any */
unsigned msix_used_vectors ;
2017-04-04 21:44:44 +03:00
/* Whether we have vector per vq */
bool per_vq_vectors ;
2014-12-07 19:41:16 +03:00
struct virtqueue * ( * setup_vq ) ( struct virtio_pci_device * vp_dev ,
2017-04-04 21:44:44 +03:00
struct virtio_pci_vq_info * info ,
2014-12-07 19:41:16 +03:00
unsigned idx ,
void ( * callback ) ( struct virtqueue * vq ) ,
const char * name ,
2017-03-06 19:32:29 +03:00
bool ctx ,
2014-12-07 19:41:16 +03:00
u16 msix_vec ) ;
2017-04-04 21:44:44 +03:00
void ( * del_vq ) ( struct virtio_pci_vq_info * info ) ;
2014-12-07 19:41:16 +03:00
u16 ( * config_vector ) ( struct virtio_pci_device * vp_dev , u16 vector ) ;
} ;
2017-04-04 21:15:41 +03:00
/* Constants for MSI-X */
/* Use first vector for configuration changes, second and the rest for
* virtqueues Thus , we need at least 2 vectors for MSI . */
enum {
VP_MSIX_CONFIG_VECTOR = 0 ,
VP_MSIX_VQ_VECTOR = 1 ,
} ;
2014-12-07 19:41:16 +03:00
/* Convert a generic virtio device to our structure */
static struct virtio_pci_device * to_vp_device ( struct virtio_device * vdev )
{
return container_of ( vdev , struct virtio_pci_device , vdev ) ;
}
/* wait for pending irq handlers */
void vp_synchronize_vectors ( struct virtio_device * vdev ) ;
/* the notify function used when creating a virt queue */
bool vp_notify ( struct virtqueue * vq ) ;
/* the config->del_vqs() implementation */
void vp_del_vqs ( struct virtio_device * vdev ) ;
/* the config->find_vqs() implementation */
int vp_find_vqs ( struct virtio_device * vdev , unsigned nvqs ,
2017-02-05 20:15:22 +03:00
struct virtqueue * vqs [ ] , vq_callback_t * callbacks [ ] ,
2017-03-06 19:32:29 +03:00
const char * const names [ ] , const bool * ctx ,
struct irq_affinity * desc ) ;
2014-12-07 19:41:16 +03:00
const char * vp_bus_name ( struct virtio_device * vdev ) ;
/* Setup the affinity for a virtqueue:
* - force the affinity for per vq vector
* - OR over all affinities for shared MSI
* - ignore the affinity request if we ' re using INTX
*/
int vp_set_vq_affinity ( struct virtqueue * vq , int cpu ) ;
2017-02-05 20:15:23 +03:00
const struct cpumask * vp_get_vq_affinity ( struct virtio_device * vdev , int index ) ;
2015-01-15 17:06:26 +03:00
# if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
2015-01-13 12:23:32 +03:00
int virtio_pci_legacy_probe ( struct virtio_pci_device * ) ;
void virtio_pci_legacy_remove ( struct virtio_pci_device * ) ;
2015-01-15 17:06:26 +03:00
# else
static inline int virtio_pci_legacy_probe ( struct virtio_pci_device * vp_dev )
{
return - ENODEV ;
}
static inline void virtio_pci_legacy_remove ( struct virtio_pci_device * vp_dev )
{
}
# endif
2014-12-11 14:59:51 +03:00
int virtio_pci_modern_probe ( struct virtio_pci_device * ) ;
void virtio_pci_modern_remove ( struct virtio_pci_device * ) ;
2014-12-07 19:41:16 +03:00
# endif