2019-05-22 09:51:24 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-12-11 13:59:51 +02:00
/*
* Virtio PCI driver - modern ( virtio 1.0 ) device support
*
* This module allows virtio devices to be used over a virtual PCI device .
* This can be used with QEMU based VMMs like KVM or Xen .
*
* Copyright IBM Corp . 2007
* Copyright Red Hat , Inc . 2014
*
* Authors :
* Anthony Liguori < aliguori @ us . ibm . com >
* Rusty Russell < rusty @ rustcorp . com . au >
* Michael S . Tsirkin < mst @ redhat . com >
*/
2016-04-03 15:23:37 +03:00
# include <linux/delay.h>
2014-12-11 13:59:51 +02:00
# define VIRTIO_PCI_NO_LEGACY
2019-09-11 12:49:53 +00:00
# define VIRTIO_RING_NO_LEGACY
2014-12-11 13:59:51 +02:00
# include "virtio_pci_common.h"
2021-01-04 14:54:51 +08:00
static u64 vp_get_features ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
return vp_modern_get_features ( & vp_dev - > mdev ) ;
}
2018-06-01 12:02:39 +08:00
static void vp_transport_features ( struct virtio_device * vdev , u64 features )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
struct pci_dev * pci_dev = vp_dev - > pci_dev ;
if ( ( features & BIT_ULL ( VIRTIO_F_SR_IOV ) ) & &
pci_find_ext_capability ( pci_dev , PCI_EXT_CAP_ID_SRIOV ) )
__virtio_set_bit ( vdev , VIRTIO_F_SR_IOV ) ;
}
2014-12-11 13:59:51 +02:00
/* virtio config->finalize_features() implementation */
static int vp_finalize_features ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2018-06-01 12:02:39 +08:00
u64 features = vdev - > features ;
2014-12-11 13:59:51 +02:00
/* Give virtio_ring a chance to accept features. */
vring_transport_features ( vdev ) ;
2018-06-01 12:02:39 +08:00
/* Give virtio_pci a chance to accept features. */
vp_transport_features ( vdev , features ) ;
2014-12-11 13:59:51 +02:00
if ( ! __virtio_test_bit ( vdev , VIRTIO_F_VERSION_1 ) ) {
dev_err ( & vdev - > dev , " virtio: device uses modern interface "
" but does not have VIRTIO_F_VERSION_1 \n " ) ;
return - EINVAL ;
}
2021-01-04 14:54:51 +08:00
vp_modern_set_features ( & vp_dev - > mdev , vdev - > features ) ;
2014-12-11 13:59:51 +02:00
return 0 ;
}
/* virtio config->get() implementation */
static void vp_get ( struct virtio_device * vdev , unsigned offset ,
void * buf , unsigned len )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2021-01-04 14:54:46 +08:00
struct virtio_pci_modern_device * mdev = & vp_dev - > mdev ;
void __iomem * device = mdev - > device ;
2014-12-11 13:59:51 +02:00
u8 b ;
__le16 w ;
__le32 l ;
2021-01-04 14:54:46 +08:00
BUG_ON ( offset + len > mdev - > device_len ) ;
2014-12-11 13:59:51 +02:00
switch ( len ) {
case 1 :
2021-01-04 14:54:45 +08:00
b = ioread8 ( device + offset ) ;
2014-12-11 13:59:51 +02:00
memcpy ( buf , & b , sizeof b ) ;
break ;
case 2 :
2021-01-04 14:54:45 +08:00
w = cpu_to_le16 ( ioread16 ( device + offset ) ) ;
2014-12-11 13:59:51 +02:00
memcpy ( buf , & w , sizeof w ) ;
break ;
case 4 :
2021-01-04 14:54:45 +08:00
l = cpu_to_le32 ( ioread32 ( device + offset ) ) ;
2014-12-11 13:59:51 +02:00
memcpy ( buf , & l , sizeof l ) ;
break ;
case 8 :
2021-01-04 14:54:45 +08:00
l = cpu_to_le32 ( ioread32 ( device + offset ) ) ;
2014-12-11 13:59:51 +02:00
memcpy ( buf , & l , sizeof l ) ;
2021-01-04 14:54:45 +08:00
l = cpu_to_le32 ( ioread32 ( device + offset + sizeof l ) ) ;
2014-12-11 13:59:51 +02:00
memcpy ( buf + sizeof l , & l , sizeof l ) ;
break ;
default :
BUG ( ) ;
}
}
/* the config->set() implementation. it's symmetric to the config->get()
* implementation */
static void vp_set ( struct virtio_device * vdev , unsigned offset ,
const void * buf , unsigned len )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2021-01-04 14:54:46 +08:00
struct virtio_pci_modern_device * mdev = & vp_dev - > mdev ;
void __iomem * device = mdev - > device ;
2014-12-11 13:59:51 +02:00
u8 b ;
__le16 w ;
__le32 l ;
2021-01-04 14:54:46 +08:00
BUG_ON ( offset + len > mdev - > device_len ) ;
2014-12-11 13:59:51 +02:00
switch ( len ) {
case 1 :
memcpy ( & b , buf , sizeof b ) ;
2021-01-04 14:54:45 +08:00
iowrite8 ( b , device + offset ) ;
2014-12-11 13:59:51 +02:00
break ;
case 2 :
memcpy ( & w , buf , sizeof w ) ;
2021-01-04 14:54:45 +08:00
iowrite16 ( le16_to_cpu ( w ) , device + offset ) ;
2014-12-11 13:59:51 +02:00
break ;
case 4 :
memcpy ( & l , buf , sizeof l ) ;
2021-01-04 14:54:45 +08:00
iowrite32 ( le32_to_cpu ( l ) , device + offset ) ;
2014-12-11 13:59:51 +02:00
break ;
case 8 :
memcpy ( & l , buf , sizeof l ) ;
2021-01-04 14:54:45 +08:00
iowrite32 ( le32_to_cpu ( l ) , device + offset ) ;
2014-12-11 13:59:51 +02:00
memcpy ( & l , buf + sizeof l , sizeof l ) ;
2021-01-04 14:54:45 +08:00
iowrite32 ( le32_to_cpu ( l ) , device + offset + sizeof l ) ;
2014-12-11 13:59:51 +02:00
break ;
default :
BUG ( ) ;
}
}
2021-01-04 14:54:52 +08:00
static u32 vp_generation ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
return vp_modern_generation ( & vp_dev - > mdev ) ;
2014-12-11 13:59:51 +02:00
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2021-01-04 14:54:50 +08:00
return vp_modern_get_status ( & vp_dev - > mdev ) ;
}
2014-12-11 13:59:51 +02:00
static void vp_set_status ( struct virtio_device * vdev , u8 status )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2021-01-04 14:54:45 +08:00
2014-12-11 13:59:51 +02:00
/* We should never be setting status to 0. */
BUG_ON ( status = = 0 ) ;
2021-01-04 14:54:50 +08:00
vp_modern_set_status ( & vp_dev - > mdev , status ) ;
2014-12-11 13:59:51 +02:00
}
static void vp_reset ( struct virtio_device * vdev )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
2021-01-04 14:54:46 +08:00
struct virtio_pci_modern_device * mdev = & vp_dev - > mdev ;
2021-01-04 14:54:45 +08:00
2014-12-11 13:59:51 +02:00
/* 0 status means a reset. */
2021-01-04 14:54:50 +08:00
vp_modern_set_status ( mdev , 0 ) ;
2016-04-03 15:23:37 +03:00
/* After writing 0 to device_status, the driver MUST wait for a read of
* device_status to return 0 before reinitializing the device .
* This will flush out the status write , and flush in device writes ,
* including MSI - X interrupts , if any .
*/
2021-01-04 14:54:50 +08:00
while ( vp_modern_get_status ( mdev ) )
2016-04-03 15:23:37 +03:00
msleep ( 1 ) ;
2021-10-19 15:01:46 +08:00
/* Disable VQ/configuration callbacks. */
vp_disable_cbs ( vdev ) ;
2014-12-11 13:59:51 +02:00
}
2021-01-04 14:54:49 +08:00
static u16 vp_config_vector ( struct virtio_pci_device * vp_dev , u16 vector )
{
return vp_modern_config_vector ( & vp_dev - > mdev , vector ) ;
}
2014-12-11 13:59:51 +02:00
static struct virtqueue * setup_vq ( struct virtio_pci_device * vp_dev ,
2017-04-04 21:44:44 +03:00
struct virtio_pci_vq_info * info ,
2014-12-11 13:59:51 +02:00
unsigned index ,
void ( * callback ) ( struct virtqueue * vq ) ,
const char * name ,
2017-03-06 18:32:29 +02:00
bool ctx ,
2014-12-11 13:59:51 +02:00
u16 msix_vec )
{
2021-01-04 14:54:46 +08:00
struct virtio_pci_modern_device * mdev = & vp_dev - > mdev ;
2014-12-11 13:59:51 +02:00
struct virtqueue * vq ;
2021-04-15 03:31:42 -04:00
u16 num ;
2014-12-11 13:59:51 +02:00
int err ;
2021-01-04 14:54:57 +08:00
if ( index > = vp_modern_get_num_queues ( mdev ) )
2014-12-11 13:59:51 +02:00
return ERR_PTR ( - ENOENT ) ;
/* Check if queue is either not available or already active. */
2021-01-04 14:54:56 +08:00
num = vp_modern_get_queue_size ( mdev , index ) ;
2021-01-04 14:54:55 +08:00
if ( ! num | | vp_modern_get_queue_enable ( mdev , index ) )
2014-12-11 13:59:51 +02:00
return ERR_PTR ( - ENOENT ) ;
if ( num & ( num - 1 ) ) {
dev_warn ( & vp_dev - > pci_dev - > dev , " bad queue size %u " , num ) ;
return ERR_PTR ( - EINVAL ) ;
}
2017-04-04 21:44:44 +03:00
info - > msix_vector = msix_vec ;
2014-12-11 13:59:51 +02:00
/* create the vring */
2016-02-02 21:46:39 -08:00
vq = vring_create_virtqueue ( index , num ,
SMP_CACHE_BYTES , & vp_dev - > vdev ,
2017-03-06 18:32:29 +02:00
true , true , ctx ,
vp_notify , callback , name ) ;
2016-02-02 21:46:39 -08:00
if ( ! vq )
return ERR_PTR ( - ENOMEM ) ;
2014-12-11 13:59:51 +02:00
/* activate the queue */
2021-01-04 14:54:56 +08:00
vp_modern_set_queue_size ( mdev , index , virtqueue_get_vring_size ( vq ) ) ;
2021-01-04 14:54:54 +08:00
vp_modern_queue_address ( mdev , index , virtqueue_get_desc_addr ( vq ) ,
virtqueue_get_avail_addr ( vq ) ,
virtqueue_get_used_addr ( vq ) ) ;
2014-12-11 13:59:51 +02:00
2021-05-04 04:12:10 -04:00
vq - > priv = ( void __force * ) vp_modern_map_vq_notify ( mdev , index , NULL ) ;
2014-12-11 13:59:51 +02:00
if ( ! vq - > priv ) {
err = - ENOMEM ;
goto err_map_notify ;
}
if ( msix_vec ! = VIRTIO_MSI_NO_VECTOR ) {
2021-01-04 14:54:53 +08:00
msix_vec = vp_modern_queue_vector ( mdev , index , msix_vec ) ;
2014-12-11 13:59:51 +02:00
if ( msix_vec = = VIRTIO_MSI_NO_VECTOR ) {
err = - EBUSY ;
goto err_assign_vector ;
}
}
return vq ;
err_assign_vector :
2021-01-04 14:54:46 +08:00
if ( ! mdev - > notify_base )
pci_iounmap ( mdev - > pci_dev , ( void __iomem __force * ) vq - > priv ) ;
2014-12-11 13:59:51 +02:00
err_map_notify :
vring_del_virtqueue ( vq ) ;
return ERR_PTR ( err ) ;
}
static int vp_modern_find_vqs ( struct virtio_device * vdev , unsigned nvqs ,
2017-03-06 18:32:29 +02:00
struct virtqueue * vqs [ ] ,
vq_callback_t * callbacks [ ] ,
const char * const names [ ] , const bool * ctx ,
struct irq_affinity * desc )
2014-12-11 13:59:51 +02:00
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
struct virtqueue * vq ;
2017-03-06 18:32:29 +02:00
int rc = vp_find_vqs ( vdev , nvqs , vqs , callbacks , names , ctx , desc ) ;
2014-12-11 13:59:51 +02:00
if ( rc )
return rc ;
/* Select and activate all queues. Has to be done last: once we do
* this , there ' s no way to go back except reset .
*/
2021-01-04 14:54:55 +08:00
list_for_each_entry ( vq , & vdev - > vqs , list )
vp_modern_set_queue_enable ( & vp_dev - > mdev , vq - > index , true ) ;
2014-12-11 13:59:51 +02:00
return 0 ;
}
2017-04-04 21:44:44 +03:00
static void del_vq ( struct virtio_pci_vq_info * info )
2014-12-11 13:59:51 +02:00
{
2017-04-04 21:44:44 +03:00
struct virtqueue * vq = info - > vq ;
2014-12-11 13:59:51 +02:00
struct virtio_pci_device * vp_dev = to_vp_device ( vq - > vdev ) ;
2021-01-04 14:54:46 +08:00
struct virtio_pci_modern_device * mdev = & vp_dev - > mdev ;
2014-12-11 13:59:51 +02:00
2021-01-04 14:54:53 +08:00
if ( vp_dev - > msix_enabled )
vp_modern_queue_vector ( mdev , vq - > index ,
VIRTIO_MSI_NO_VECTOR ) ;
2014-12-11 13:59:51 +02:00
2021-01-04 14:54:46 +08:00
if ( ! mdev - > notify_base )
pci_iounmap ( mdev - > pci_dev , ( void __force __iomem * ) vq - > priv ) ;
2014-12-11 13:59:51 +02:00
vring_del_virtqueue ( vq ) ;
}
2020-08-19 18:19:42 -04:00
static int virtio_pci_find_shm_cap ( struct pci_dev * dev , u8 required_id ,
u8 * bar , u64 * offset , u64 * len )
{
int pos ;
for ( pos = pci_find_capability ( dev , PCI_CAP_ID_VNDR ) ; pos > 0 ;
pos = pci_find_next_capability ( dev , pos , PCI_CAP_ID_VNDR ) ) {
u8 type , cap_len , id ;
u32 tmp32 ;
u64 res_offset , res_length ;
pci_read_config_byte ( dev , pos + offsetof ( struct virtio_pci_cap ,
cfg_type ) , & type ) ;
if ( type ! = VIRTIO_PCI_CAP_SHARED_MEMORY_CFG )
continue ;
pci_read_config_byte ( dev , pos + offsetof ( struct virtio_pci_cap ,
cap_len ) , & cap_len ) ;
if ( cap_len ! = sizeof ( struct virtio_pci_cap64 ) ) {
dev_err ( & dev - > dev , " %s: shm cap with bad size offset: "
" %d size: %d \n " , __func__ , pos , cap_len ) ;
continue ;
}
pci_read_config_byte ( dev , pos + offsetof ( struct virtio_pci_cap ,
id ) , & id ) ;
if ( id ! = required_id )
continue ;
/* Type, and ID match, looks good */
pci_read_config_byte ( dev , pos + offsetof ( struct virtio_pci_cap ,
bar ) , bar ) ;
/* Read the lower 32bit of length and offset */
pci_read_config_dword ( dev , pos + offsetof ( struct virtio_pci_cap ,
offset ) , & tmp32 ) ;
res_offset = tmp32 ;
pci_read_config_dword ( dev , pos + offsetof ( struct virtio_pci_cap ,
length ) , & tmp32 ) ;
res_length = tmp32 ;
/* and now the top half */
pci_read_config_dword ( dev ,
pos + offsetof ( struct virtio_pci_cap64 ,
offset_hi ) , & tmp32 ) ;
res_offset | = ( ( u64 ) tmp32 ) < < 32 ;
pci_read_config_dword ( dev ,
pos + offsetof ( struct virtio_pci_cap64 ,
length_hi ) , & tmp32 ) ;
res_length | = ( ( u64 ) tmp32 ) < < 32 ;
* offset = res_offset ;
* len = res_length ;
return pos ;
}
return 0 ;
}
static bool vp_get_shm_region ( struct virtio_device * vdev ,
struct virtio_shm_region * region , u8 id )
{
struct virtio_pci_device * vp_dev = to_vp_device ( vdev ) ;
struct pci_dev * pci_dev = vp_dev - > pci_dev ;
u8 bar ;
u64 offset , len ;
phys_addr_t phys_addr ;
size_t bar_len ;
if ( ! virtio_pci_find_shm_cap ( pci_dev , id , & bar , & offset , & len ) )
return false ;
phys_addr = pci_resource_start ( pci_dev , bar ) ;
bar_len = pci_resource_len ( pci_dev , bar ) ;
if ( ( offset + len ) < offset ) {
dev_err ( & pci_dev - > dev , " %s: cap offset+len overflow detected \n " ,
__func__ ) ;
return false ;
}
if ( offset + len > bar_len ) {
dev_err ( & pci_dev - > dev , " %s: bar shorter than cap offset+len \n " ,
__func__ ) ;
return false ;
}
region - > len = len ;
region - > addr = ( u64 ) phys_addr + offset ;
return true ;
}
2015-01-13 16:34:58 +02:00
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
2021-10-19 15:01:46 +08:00
. enable_cbs = vp_enable_cbs ,
2015-01-13 16:34:58 +02:00
. get = NULL ,
. set = NULL ,
. generation = vp_generation ,
. get_status = vp_get_status ,
. set_status = vp_set_status ,
. reset = vp_reset ,
. find_vqs = vp_modern_find_vqs ,
. del_vqs = vp_del_vqs ,
. get_features = vp_get_features ,
. finalize_features = vp_finalize_features ,
. bus_name = vp_bus_name ,
. set_vq_affinity = vp_set_vq_affinity ,
2017-02-05 18:15:23 +01:00
. get_vq_affinity = vp_get_vq_affinity ,
2020-08-19 18:19:42 -04:00
. get_shm_region = vp_get_shm_region ,
2015-01-13 16:34:58 +02:00
} ;
2014-12-11 13:59:51 +02:00
static const struct virtio_config_ops virtio_pci_config_ops = {
2021-10-19 15:01:46 +08:00
. enable_cbs = vp_enable_cbs ,
2014-12-11 13:59:51 +02:00
. get = vp_get ,
. set = vp_set ,
. generation = vp_generation ,
. get_status = vp_get_status ,
. set_status = vp_set_status ,
. reset = vp_reset ,
. find_vqs = vp_modern_find_vqs ,
. del_vqs = vp_del_vqs ,
. get_features = vp_get_features ,
. finalize_features = vp_finalize_features ,
. bus_name = vp_bus_name ,
. set_vq_affinity = vp_set_vq_affinity ,
2017-02-05 18:15:23 +01:00
. get_vq_affinity = vp_get_vq_affinity ,
2020-08-19 18:19:42 -04:00
. get_shm_region = vp_get_shm_region ,
2014-12-11 13:59:51 +02:00
} ;
2021-01-04 14:54:47 +08:00
/* the PCI probing function */
int virtio_pci_modern_probe ( struct virtio_pci_device * vp_dev )
{
struct virtio_pci_modern_device * mdev = & vp_dev - > mdev ;
struct pci_dev * pci_dev = vp_dev - > pci_dev ;
int err ;
mdev - > pci_dev = pci_dev ;
err = vp_modern_probe ( mdev ) ;
if ( err )
return err ;
if ( mdev - > device )
vp_dev - > vdev . config = & virtio_pci_config_ops ;
else
vp_dev - > vdev . config = & virtio_pci_config_nodev_ops ;
vp_dev - > config_vector = vp_config_vector ;
vp_dev - > setup_vq = setup_vq ;
vp_dev - > del_vq = del_vq ;
vp_dev - > isr = mdev - > isr ;
vp_dev - > vdev . id = mdev - > id ;
return 0 ;
}
2021-01-04 14:54:48 +08:00
void virtio_pci_modern_remove ( struct virtio_pci_device * vp_dev )
{
struct virtio_pci_modern_device * mdev = & vp_dev - > mdev ;
vp_modern_remove ( mdev ) ;
}