2020-03-26 22:01:25 +08:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel IFC VF NIC driver for virtio dataplane offloading
*
* Copyright ( C ) 2020 Intel Corporation .
*
* Author : Zhu Lingshan < lingshan . zhu @ intel . com >
*
*/
# include "ifcvf_base.h"
struct ifcvf_adapter * vf_to_adapter ( struct ifcvf_hw * hw )
{
return container_of ( hw , struct ifcvf_adapter , vf ) ;
}
vDPA/ifcvf: implement shared IRQ feature
On some platforms/devices, there may not be enough MSI vectors
allocated for the virtqueues and config changes. In such a case,
the interrupt sources(virtqueues, config changes) must share
an IRQ/vector, to avoid initialization failures, keep
the device functional.
This commit handles three cases:
(1) number of the allocated vectors == the number of virtqueues + 1
(config changes), every virtqueue and the config interrupt has
a separated vector/IRQ, the best and the most likely case.
(2) number of the allocated vectors is less than the best case, but
greater than 1. In this case, all virtqueues share a vector/IRQ,
the config interrupt has a separated vector/IRQ
(3) only one vector is allocated, in this case, the virtqueues and
the config interrupt share a vector/IRQ. The worst and most
unlikely case.
Otherwise, it needs to fail.
This commit introduces some helper functions:
ifcvf_set_vq_vector() and ifcvf_set_config_vector() sets virtqueue
vector and config vector in the device config space, so that
the device can send interrupt DMA.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Link: https://lore.kernel.org/r/20220222115428.998334-5-lingshan.zhu@intel.com
Signed-off-by: Tom Rix <trix@redhat.com>
Link: https://lore.kernel.org/r/20220315124130.1710030-1-trix@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-02-22 19:54:27 +08:00
u16 ifcvf_set_vq_vector ( struct ifcvf_hw * hw , u16 qid , int vector )
{
struct virtio_pci_common_cfg __iomem * cfg = hw - > common_cfg ;
vp_iowrite16 ( qid , & cfg - > queue_select ) ;
vp_iowrite16 ( vector , & cfg - > queue_msix_vector ) ;
return vp_ioread16 ( & cfg - > queue_msix_vector ) ;
}
u16 ifcvf_set_config_vector ( struct ifcvf_hw * hw , int vector )
{
struct virtio_pci_common_cfg __iomem * cfg = hw - > common_cfg ;
vp_iowrite16 ( vector , & cfg - > msix_config ) ;
return vp_ioread16 ( & cfg - > msix_config ) ;
}
2020-03-26 22:01:25 +08:00
static void __iomem * get_cap_addr ( struct ifcvf_hw * hw ,
struct virtio_pci_cap * cap )
{
struct ifcvf_adapter * ifcvf ;
struct pci_dev * pdev ;
u32 length , offset ;
u8 bar ;
length = le32_to_cpu ( cap - > length ) ;
offset = le32_to_cpu ( cap - > offset ) ;
bar = cap - > bar ;
ifcvf = vf_to_adapter ( hw ) ;
pdev = ifcvf - > pdev ;
if ( bar > = IFCVF_PCI_MAX_RESOURCE ) {
IFCVF_DBG ( pdev ,
" Invalid bar number %u to get capabilities \n " , bar ) ;
return NULL ;
}
if ( offset + length > pci_resource_len ( pdev , bar ) ) {
IFCVF_DBG ( pdev ,
" offset(%u) + len(%u) overflows bar%u's capability \n " ,
offset , length , bar ) ;
return NULL ;
}
return hw - > base [ bar ] + offset ;
}
static int ifcvf_read_config_range ( struct pci_dev * dev ,
uint32_t * val , int size , int where )
{
int ret , i ;
for ( i = 0 ; i < size ; i + = 4 ) {
ret = pci_read_config_dword ( dev , where + i , val + i / 4 ) ;
if ( ret < 0 )
return ret ;
}
return 0 ;
}
int ifcvf_init_hw ( struct ifcvf_hw * hw , struct pci_dev * pdev )
{
struct virtio_pci_cap cap ;
u16 notify_off ;
int ret ;
u8 pos ;
u32 i ;
ret = pci_read_config_byte ( pdev , PCI_CAPABILITY_LIST , & pos ) ;
if ( ret < 0 ) {
IFCVF_ERR ( pdev , " Failed to read PCI capability list \n " ) ;
return - EIO ;
}
while ( pos ) {
ret = ifcvf_read_config_range ( pdev , ( u32 * ) & cap ,
sizeof ( cap ) , pos ) ;
if ( ret < 0 ) {
IFCVF_ERR ( pdev ,
" Failed to get PCI capability at %x \n " , pos ) ;
break ;
}
if ( cap . cap_vndr ! = PCI_CAP_ID_VNDR )
goto next ;
switch ( cap . cfg_type ) {
case VIRTIO_PCI_CAP_COMMON_CFG :
hw - > common_cfg = get_cap_addr ( hw , & cap ) ;
IFCVF_DBG ( pdev , " hw->common_cfg = %p \n " ,
hw - > common_cfg ) ;
break ;
case VIRTIO_PCI_CAP_NOTIFY_CFG :
pci_read_config_dword ( pdev , pos + sizeof ( cap ) ,
& hw - > notify_off_multiplier ) ;
hw - > notify_bar = cap . bar ;
hw - > notify_base = get_cap_addr ( hw , & cap ) ;
2021-06-02 16:45:49 +08:00
hw - > notify_base_pa = pci_resource_start ( pdev , cap . bar ) +
le32_to_cpu ( cap . offset ) ;
2020-03-26 22:01:25 +08:00
IFCVF_DBG ( pdev , " hw->notify_base = %p \n " ,
hw - > notify_base ) ;
break ;
case VIRTIO_PCI_CAP_ISR_CFG :
hw - > isr = get_cap_addr ( hw , & cap ) ;
IFCVF_DBG ( pdev , " hw->isr = %p \n " , hw - > isr ) ;
break ;
case VIRTIO_PCI_CAP_DEVICE_CFG :
2021-12-01 16:12:55 +08:00
hw - > dev_cfg = get_cap_addr ( hw , & cap ) ;
2022-07-22 19:53:04 +08:00
hw - > cap_dev_config_size = le32_to_cpu ( cap . length ) ;
2021-12-01 16:12:55 +08:00
IFCVF_DBG ( pdev , " hw->dev_cfg = %p \n " , hw - > dev_cfg ) ;
2020-03-26 22:01:25 +08:00
break ;
}
next :
pos = cap . cap_next ;
}
if ( hw - > common_cfg = = NULL | | hw - > notify_base = = NULL | |
2021-12-01 16:12:55 +08:00
hw - > isr = = NULL | | hw - > dev_cfg = = NULL ) {
2020-03-26 22:01:25 +08:00
IFCVF_ERR ( pdev , " Incomplete PCI capabilities \n " ) ;
return - EIO ;
}
2022-02-22 19:54:24 +08:00
hw - > nr_vring = vp_ioread16 ( & hw - > common_cfg - > num_queues ) ;
2021-08-18 17:57:13 +08:00
for ( i = 0 ; i < hw - > nr_vring ; i + + ) {
2022-02-22 19:54:24 +08:00
vp_iowrite16 ( i , & hw - > common_cfg - > queue_select ) ;
notify_off = vp_ioread16 ( & hw - > common_cfg - > queue_notify_off ) ;
2020-03-26 22:01:25 +08:00
hw - > vring [ i ] . notify_addr = hw - > notify_base +
notify_off * hw - > notify_off_multiplier ;
2021-06-02 16:45:49 +08:00
hw - > vring [ i ] . notify_pa = hw - > notify_base_pa +
notify_off * hw - > notify_off_multiplier ;
vDPA/ifcvf: implement shared IRQ feature
On some platforms/devices, there may not be enough MSI vectors
allocated for the virtqueues and config changes. In such a case,
the interrupt sources(virtqueues, config changes) must share
an IRQ/vector, to avoid initialization failures, keep
the device functional.
This commit handles three cases:
(1) number of the allocated vectors == the number of virtqueues + 1
(config changes), every virtqueue and the config interrupt has
a separated vector/IRQ, the best and the most likely case.
(2) number of the allocated vectors is less than the best case, but
greater than 1. In this case, all virtqueues share a vector/IRQ,
the config interrupt has a separated vector/IRQ
(3) only one vector is allocated, in this case, the virtqueues and
the config interrupt share a vector/IRQ. The worst and most
unlikely case.
Otherwise, it needs to fail.
This commit introduces some helper functions:
ifcvf_set_vq_vector() and ifcvf_set_config_vector() sets virtqueue
vector and config vector in the device config space, so that
the device can send interrupt DMA.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Link: https://lore.kernel.org/r/20220222115428.998334-5-lingshan.zhu@intel.com
Signed-off-by: Tom Rix <trix@redhat.com>
Link: https://lore.kernel.org/r/20220315124130.1710030-1-trix@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-02-22 19:54:27 +08:00
hw - > vring [ i ] . irq = - EINVAL ;
2020-03-26 22:01:25 +08:00
}
hw - > lm_cfg = hw - > base [ IFCVF_LM_BAR ] ;
IFCVF_DBG ( pdev ,
" PCI capability mapping: common cfg: %p, notify base: %p \n , isr cfg: %p, device cfg: %p, multiplier: %u \n " ,
hw - > common_cfg , hw - > notify_base , hw - > isr ,
2021-12-01 16:12:55 +08:00
hw - > dev_cfg , hw - > notify_off_multiplier ) ;
2020-03-26 22:01:25 +08:00
vDPA/ifcvf: implement shared IRQ feature
On some platforms/devices, there may not be enough MSI vectors
allocated for the virtqueues and config changes. In such a case,
the interrupt sources(virtqueues, config changes) must share
an IRQ/vector, to avoid initialization failures, keep
the device functional.
This commit handles three cases:
(1) number of the allocated vectors == the number of virtqueues + 1
(config changes), every virtqueue and the config interrupt has
a separated vector/IRQ, the best and the most likely case.
(2) number of the allocated vectors is less than the best case, but
greater than 1. In this case, all virtqueues share a vector/IRQ,
the config interrupt has a separated vector/IRQ
(3) only one vector is allocated, in this case, the virtqueues and
the config interrupt share a vector/IRQ. The worst and most
unlikely case.
Otherwise, it needs to fail.
This commit introduces some helper functions:
ifcvf_set_vq_vector() and ifcvf_set_config_vector() sets virtqueue
vector and config vector in the device config space, so that
the device can send interrupt DMA.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Link: https://lore.kernel.org/r/20220222115428.998334-5-lingshan.zhu@intel.com
Signed-off-by: Tom Rix <trix@redhat.com>
Link: https://lore.kernel.org/r/20220315124130.1710030-1-trix@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-02-22 19:54:27 +08:00
hw - > vqs_reused_irq = - EINVAL ;
hw - > config_irq = - EINVAL ;
2020-03-26 22:01:25 +08:00
return 0 ;
}
u8 ifcvf_get_status ( struct ifcvf_hw * hw )
{
2022-02-22 19:54:24 +08:00
return vp_ioread8 ( & hw - > common_cfg - > device_status ) ;
2020-03-26 22:01:25 +08:00
}
void ifcvf_set_status ( struct ifcvf_hw * hw , u8 status )
{
2022-02-22 19:54:24 +08:00
vp_iowrite8 ( status , & hw - > common_cfg - > device_status ) ;
2020-03-26 22:01:25 +08:00
}
void ifcvf_reset ( struct ifcvf_hw * hw )
{
2020-06-05 18:27:15 +08:00
hw - > config_cb . callback = NULL ;
hw - > config_cb . private = NULL ;
2020-03-26 22:01:25 +08:00
ifcvf_set_status ( hw , 0 ) ;
/* flush set_status, make sure VF is stopped, reset */
ifcvf_get_status ( hw ) ;
}
static void ifcvf_add_status ( struct ifcvf_hw * hw , u8 status )
{
if ( status ! = 0 )
status | = ifcvf_get_status ( hw ) ;
ifcvf_set_status ( hw , status ) ;
ifcvf_get_status ( hw ) ;
}
2021-03-17 17:49:31 +08:00
u64 ifcvf_get_hw_features ( struct ifcvf_hw * hw )
2020-03-26 22:01:25 +08:00
{
struct virtio_pci_common_cfg __iomem * cfg = hw - > common_cfg ;
u32 features_lo , features_hi ;
2021-03-17 17:49:31 +08:00
u64 features ;
2020-03-26 22:01:25 +08:00
2022-02-22 19:54:24 +08:00
vp_iowrite32 ( 0 , & cfg - > device_feature_select ) ;
features_lo = vp_ioread32 ( & cfg - > device_feature ) ;
2020-03-26 22:01:25 +08:00
2022-02-22 19:54:24 +08:00
vp_iowrite32 ( 1 , & cfg - > device_feature_select ) ;
features_hi = vp_ioread32 ( & cfg - > device_feature ) ;
2020-03-26 22:01:25 +08:00
2021-03-17 17:49:31 +08:00
features = ( ( u64 ) features_hi < < 32 ) | features_lo ;
return features ;
}
u64 ifcvf_get_features ( struct ifcvf_hw * hw )
{
return hw - > hw_features ;
2020-03-26 22:01:25 +08:00
}
2021-03-17 17:49:32 +08:00
int ifcvf_verify_min_features ( struct ifcvf_hw * hw , u64 features )
{
struct ifcvf_adapter * ifcvf = vf_to_adapter ( hw ) ;
if ( ! ( features & BIT_ULL ( VIRTIO_F_ACCESS_PLATFORM ) ) & & features ) {
IFCVF_ERR ( ifcvf - > pdev , " VIRTIO_F_ACCESS_PLATFORM is not negotiated \n " ) ;
return - EINVAL ;
}
return 0 ;
}
2021-12-01 16:12:55 +08:00
u32 ifcvf_get_config_size ( struct ifcvf_hw * hw )
{
struct ifcvf_adapter * adapter ;
2022-07-22 19:53:04 +08:00
u32 net_config_size = sizeof ( struct virtio_net_config ) ;
u32 blk_config_size = sizeof ( struct virtio_blk_config ) ;
u32 cap_size = hw - > cap_dev_config_size ;
2021-12-01 16:12:55 +08:00
u32 config_size ;
adapter = vf_to_adapter ( hw ) ;
2022-07-22 19:53:04 +08:00
/* If the onboard device config space size is greater than
* the size of struct virtio_net / blk_config , only the spec
* implementing contents size is returned , this is very
* unlikely , defensive programming .
*/
2021-12-01 16:12:55 +08:00
switch ( hw - > dev_type ) {
case VIRTIO_ID_NET :
2022-07-22 19:53:04 +08:00
config_size = min ( cap_size , net_config_size ) ;
2021-12-01 16:12:55 +08:00
break ;
case VIRTIO_ID_BLOCK :
2022-07-22 19:53:04 +08:00
config_size = min ( cap_size , blk_config_size ) ;
2021-12-01 16:12:55 +08:00
break ;
default :
config_size = 0 ;
IFCVF_ERR ( adapter - > pdev , " VIRTIO ID %u not supported \n " , hw - > dev_type ) ;
}
return config_size ;
}
void ifcvf_read_dev_config ( struct ifcvf_hw * hw , u64 offset ,
2020-03-26 22:01:25 +08:00
void * dst , int length )
{
u8 old_gen , new_gen , * p ;
int i ;
2021-12-01 16:12:55 +08:00
WARN_ON ( offset + length > hw - > config_size ) ;
2020-03-26 22:01:25 +08:00
do {
2022-02-22 19:54:24 +08:00
old_gen = vp_ioread8 ( & hw - > common_cfg - > config_generation ) ;
2020-03-26 22:01:25 +08:00
p = dst ;
for ( i = 0 ; i < length ; i + + )
2022-02-22 19:54:24 +08:00
* p + + = vp_ioread8 ( hw - > dev_cfg + offset + i ) ;
2020-03-26 22:01:25 +08:00
2022-02-22 19:54:24 +08:00
new_gen = vp_ioread8 ( & hw - > common_cfg - > config_generation ) ;
2020-03-26 22:01:25 +08:00
} while ( old_gen ! = new_gen ) ;
}
2021-12-01 16:12:55 +08:00
void ifcvf_write_dev_config ( struct ifcvf_hw * hw , u64 offset ,
2020-03-26 22:01:25 +08:00
const void * src , int length )
{
const u8 * p ;
int i ;
p = src ;
2021-12-01 16:12:55 +08:00
WARN_ON ( offset + length > hw - > config_size ) ;
2020-03-26 22:01:25 +08:00
for ( i = 0 ; i < length ; i + + )
2022-02-22 19:54:24 +08:00
vp_iowrite8 ( * p + + , hw - > dev_cfg + offset + i ) ;
2020-03-26 22:01:25 +08:00
}
static void ifcvf_set_features ( struct ifcvf_hw * hw , u64 features )
{
struct virtio_pci_common_cfg __iomem * cfg = hw - > common_cfg ;
2022-02-22 19:54:24 +08:00
vp_iowrite32 ( 0 , & cfg - > guest_feature_select ) ;
vp_iowrite32 ( ( u32 ) features , & cfg - > guest_feature ) ;
2020-03-26 22:01:25 +08:00
2022-02-22 19:54:24 +08:00
vp_iowrite32 ( 1 , & cfg - > guest_feature_select ) ;
vp_iowrite32 ( features > > 32 , & cfg - > guest_feature ) ;
2020-03-26 22:01:25 +08:00
}
static int ifcvf_config_features ( struct ifcvf_hw * hw )
{
struct ifcvf_adapter * ifcvf ;
ifcvf = vf_to_adapter ( hw ) ;
ifcvf_set_features ( hw , hw - > req_features ) ;
ifcvf_add_status ( hw , VIRTIO_CONFIG_S_FEATURES_OK ) ;
if ( ! ( ifcvf_get_status ( hw ) & VIRTIO_CONFIG_S_FEATURES_OK ) ) {
IFCVF_ERR ( ifcvf - > pdev , " Failed to set FEATURES_OK status \n " ) ;
return - EIO ;
}
return 0 ;
}
2020-08-04 19:20:43 +03:00
u16 ifcvf_get_vq_state ( struct ifcvf_hw * hw , u16 qid )
2020-03-26 22:01:25 +08:00
{
struct ifcvf_lm_cfg __iomem * ifcvf_lm ;
void __iomem * avail_idx_addr ;
u16 last_avail_idx ;
u32 q_pair_id ;
ifcvf_lm = ( struct ifcvf_lm_cfg __iomem * ) hw - > lm_cfg ;
2022-09-23 17:10:13 +08:00
q_pair_id = qid / 2 ;
2020-03-26 22:01:25 +08:00
avail_idx_addr = & ifcvf_lm - > vring_lm_cfg [ q_pair_id ] . idx_addr [ qid % 2 ] ;
2022-02-22 19:54:24 +08:00
last_avail_idx = vp_ioread16 ( avail_idx_addr ) ;
2020-03-26 22:01:25 +08:00
return last_avail_idx ;
}
2020-08-04 19:20:43 +03:00
int ifcvf_set_vq_state ( struct ifcvf_hw * hw , u16 qid , u16 num )
2020-03-26 22:01:25 +08:00
{
struct ifcvf_lm_cfg __iomem * ifcvf_lm ;
void __iomem * avail_idx_addr ;
u32 q_pair_id ;
ifcvf_lm = ( struct ifcvf_lm_cfg __iomem * ) hw - > lm_cfg ;
2022-09-23 17:10:13 +08:00
q_pair_id = qid / 2 ;
2020-03-26 22:01:25 +08:00
avail_idx_addr = & ifcvf_lm - > vring_lm_cfg [ q_pair_id ] . idx_addr [ qid % 2 ] ;
hw - > vring [ qid ] . last_avail_idx = num ;
2022-02-22 19:54:24 +08:00
vp_iowrite16 ( num , avail_idx_addr ) ;
2020-03-26 22:01:25 +08:00
return 0 ;
}
static int ifcvf_hw_enable ( struct ifcvf_hw * hw )
{
struct virtio_pci_common_cfg __iomem * cfg ;
u32 i ;
cfg = hw - > common_cfg ;
for ( i = 0 ; i < hw - > nr_vring ; i + + ) {
if ( ! hw - > vring [ i ] . ready )
break ;
2022-02-22 19:54:24 +08:00
vp_iowrite16 ( i , & cfg - > queue_select ) ;
vp_iowrite64_twopart ( hw - > vring [ i ] . desc , & cfg - > queue_desc_lo ,
2020-03-26 22:01:25 +08:00
& cfg - > queue_desc_hi ) ;
2022-02-22 19:54:24 +08:00
vp_iowrite64_twopart ( hw - > vring [ i ] . avail , & cfg - > queue_avail_lo ,
2020-03-26 22:01:25 +08:00
& cfg - > queue_avail_hi ) ;
2022-02-22 19:54:24 +08:00
vp_iowrite64_twopart ( hw - > vring [ i ] . used , & cfg - > queue_used_lo ,
2020-03-26 22:01:25 +08:00
& cfg - > queue_used_hi ) ;
2022-02-22 19:54:24 +08:00
vp_iowrite16 ( hw - > vring [ i ] . size , & cfg - > queue_size ) ;
2020-03-26 22:01:25 +08:00
ifcvf_set_vq_state ( hw , i , hw - > vring [ i ] . last_avail_idx ) ;
2022-02-22 19:54:24 +08:00
vp_iowrite16 ( 1 , & cfg - > queue_enable ) ;
2020-03-26 22:01:25 +08:00
}
return 0 ;
}
static void ifcvf_hw_disable ( struct ifcvf_hw * hw )
{
u32 i ;
vDPA/ifcvf: implement shared IRQ feature
On some platforms/devices, there may not be enough MSI vectors
allocated for the virtqueues and config changes. In such a case,
the interrupt sources(virtqueues, config changes) must share
an IRQ/vector, to avoid initialization failures, keep
the device functional.
This commit handles three cases:
(1) number of the allocated vectors == the number of virtqueues + 1
(config changes), every virtqueue and the config interrupt has
a separated vector/IRQ, the best and the most likely case.
(2) number of the allocated vectors is less than the best case, but
greater than 1. In this case, all virtqueues share a vector/IRQ,
the config interrupt has a separated vector/IRQ
(3) only one vector is allocated, in this case, the virtqueues and
the config interrupt share a vector/IRQ. The worst and most
unlikely case.
Otherwise, it needs to fail.
This commit introduces some helper functions:
ifcvf_set_vq_vector() and ifcvf_set_config_vector() sets virtqueue
vector and config vector in the device config space, so that
the device can send interrupt DMA.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Link: https://lore.kernel.org/r/20220222115428.998334-5-lingshan.zhu@intel.com
Signed-off-by: Tom Rix <trix@redhat.com>
Link: https://lore.kernel.org/r/20220315124130.1710030-1-trix@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-02-22 19:54:27 +08:00
ifcvf_set_config_vector ( hw , VIRTIO_MSI_NO_VECTOR ) ;
2020-03-26 22:01:25 +08:00
for ( i = 0 ; i < hw - > nr_vring ; i + + ) {
vDPA/ifcvf: implement shared IRQ feature
On some platforms/devices, there may not be enough MSI vectors
allocated for the virtqueues and config changes. In such a case,
the interrupt sources(virtqueues, config changes) must share
an IRQ/vector, to avoid initialization failures, keep
the device functional.
This commit handles three cases:
(1) number of the allocated vectors == the number of virtqueues + 1
(config changes), every virtqueue and the config interrupt has
a separated vector/IRQ, the best and the most likely case.
(2) number of the allocated vectors is less than the best case, but
greater than 1. In this case, all virtqueues share a vector/IRQ,
the config interrupt has a separated vector/IRQ
(3) only one vector is allocated, in this case, the virtqueues and
the config interrupt share a vector/IRQ. The worst and most
unlikely case.
Otherwise, it needs to fail.
This commit introduces some helper functions:
ifcvf_set_vq_vector() and ifcvf_set_config_vector() sets virtqueue
vector and config vector in the device config space, so that
the device can send interrupt DMA.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Link: https://lore.kernel.org/r/20220222115428.998334-5-lingshan.zhu@intel.com
Signed-off-by: Tom Rix <trix@redhat.com>
Link: https://lore.kernel.org/r/20220315124130.1710030-1-trix@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-02-22 19:54:27 +08:00
ifcvf_set_vq_vector ( hw , i , VIRTIO_MSI_NO_VECTOR ) ;
2020-03-26 22:01:25 +08:00
}
}
int ifcvf_start_hw ( struct ifcvf_hw * hw )
{
ifcvf_reset ( hw ) ;
ifcvf_add_status ( hw , VIRTIO_CONFIG_S_ACKNOWLEDGE ) ;
ifcvf_add_status ( hw , VIRTIO_CONFIG_S_DRIVER ) ;
if ( ifcvf_config_features ( hw ) < 0 )
return - EINVAL ;
if ( ifcvf_hw_enable ( hw ) < 0 )
return - EINVAL ;
ifcvf_add_status ( hw , VIRTIO_CONFIG_S_DRIVER_OK ) ;
return 0 ;
}
void ifcvf_stop_hw ( struct ifcvf_hw * hw )
{
ifcvf_hw_disable ( hw ) ;
ifcvf_reset ( hw ) ;
}
void ifcvf_notify_queue ( struct ifcvf_hw * hw , u16 qid )
{
2022-02-22 19:54:24 +08:00
vp_iowrite16 ( qid , hw - > vring [ qid ] . notify_addr ) ;
2020-03-26 22:01:25 +08:00
}