virtio: last minute fixes
a collection of small fixes that look like worth having in this release. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmUv/JMPHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRpO/kH/j/uunE6oOE/BhtfO1USciebjRhLJ7lvoAvS OD4/bcA45GRGLGIZaJtkcCIOOb9djUWLsS3QqA2UUFX+NN2/teEX6lsnv1tJTjdC a2DkDS6AVYwp+rpzxSE5PUn/ImpiDt0/+R0ZbN56R3rHTOl7nFeXvutMbzxNXZvL eWLcSDmRg7nmAdF+YbZ5omdgSL11Wi+dBFEJ0unEsecyu8pO7WcAGYvU6x/x04XJ uLrjsaAGKr3rtoLLZ1DtnSmoED/b/lwDwzVR5REsg4kf2aiHxj1+kKGNXfrtqMl5 2OVxZEorcLufHM212LW4KT3Ncw4KE4xJzjt2mzEwO/ztgtomnBM= =Rhxy -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio fixes from Michael Tsirkin: "A collection of small fixes that look like worth having in this release" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: virtio_pci: fix the common cfg map size virtio-crypto: handle config changed by work queue vhost: Allow null msg.size on VHOST_IOTLB_INVALIDATE vdpa/mlx5: Fix firmware error on creation of 1k VQs virtio_balloon: Fix endless deflation and inflation on arm64 vdpa/mlx5: Fix double release of debugfs entry virtio-mmio: fix memory leak of vm_dev vdpa_sim_blk: Fix the potential leak of mgmt_dev tools/virtio: Add dma sync api for virtio test
This commit is contained in:
commit
7c14564010
@ -35,6 +35,9 @@ struct virtio_crypto {
|
||||
struct virtqueue *ctrl_vq;
|
||||
struct data_queue *data_vq;
|
||||
|
||||
/* Work struct for config space updates */
|
||||
struct work_struct config_work;
|
||||
|
||||
/* To protect the vq operations for the controlq */
|
||||
spinlock_t ctrl_lock;
|
||||
|
||||
|
@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
|
||||
virtcrypto_free_queues(vcrypto);
|
||||
}
|
||||
|
||||
static void vcrypto_config_changed_work(struct work_struct *work)
|
||||
{
|
||||
struct virtio_crypto *vcrypto =
|
||||
container_of(work, struct virtio_crypto, config_work);
|
||||
|
||||
virtcrypto_update_status(vcrypto);
|
||||
}
|
||||
|
||||
static int virtcrypto_probe(struct virtio_device *vdev)
|
||||
{
|
||||
int err = -EFAULT;
|
||||
@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev)
|
||||
if (err)
|
||||
goto free_engines;
|
||||
|
||||
INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
|
||||
|
||||
return 0;
|
||||
|
||||
free_engines:
|
||||
@ -490,6 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
|
||||
|
||||
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
|
||||
|
||||
flush_work(&vcrypto->config_work);
|
||||
if (virtcrypto_dev_started(vcrypto))
|
||||
virtcrypto_dev_stop(vcrypto);
|
||||
virtio_reset_device(vdev);
|
||||
@ -504,7 +515,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_crypto *vcrypto = vdev->priv;
|
||||
|
||||
virtcrypto_update_status(vcrypto);
|
||||
schedule_work(&vcrypto->config_work);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
@ -512,6 +523,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_crypto *vcrypto = vdev->priv;
|
||||
|
||||
flush_work(&vcrypto->config_work);
|
||||
virtio_reset_device(vdev);
|
||||
virtcrypto_free_unused_reqs(vcrypto);
|
||||
if (virtcrypto_dev_started(vcrypto))
|
||||
|
@ -146,7 +146,8 @@ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev)
|
||||
ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs);
|
||||
}
|
||||
|
||||
void mlx5_vdpa_remove_debugfs(struct dentry *dbg)
|
||||
void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev)
|
||||
{
|
||||
debugfs_remove_recursive(dbg);
|
||||
debugfs_remove_recursive(ndev->debugfs);
|
||||
ndev->debugfs = NULL;
|
||||
}
|
||||
|
@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
|
||||
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
|
||||
}
|
||||
|
||||
static int read_umem_params(struct mlx5_vdpa_net *ndev)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
|
||||
u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01);
|
||||
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
|
||||
int out_size;
|
||||
void *caps;
|
||||
void *out;
|
||||
int err;
|
||||
|
||||
out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
|
||||
out = kzalloc(out_size, GFP_KERNEL);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
|
||||
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
|
||||
err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
|
||||
if (err) {
|
||||
mlx5_vdpa_warn(&ndev->mvdev,
|
||||
"Failed reading vdpa umem capabilities with err %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
|
||||
|
||||
ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a);
|
||||
ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b);
|
||||
|
||||
ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a);
|
||||
ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b);
|
||||
|
||||
ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a);
|
||||
ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b);
|
||||
|
||||
out:
|
||||
kfree(out);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
|
||||
struct mlx5_vdpa_umem **umemp)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
|
||||
int p_a;
|
||||
int p_b;
|
||||
u32 p_a;
|
||||
u32 p_b;
|
||||
|
||||
switch (num) {
|
||||
case 1:
|
||||
p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
|
||||
p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
|
||||
p_a = ndev->umem_1_buffer_param_a;
|
||||
p_b = ndev->umem_1_buffer_param_b;
|
||||
*umemp = &mvq->umem1;
|
||||
break;
|
||||
case 2:
|
||||
p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
|
||||
p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
|
||||
p_a = ndev->umem_2_buffer_param_a;
|
||||
p_b = ndev->umem_2_buffer_param_b;
|
||||
*umemp = &mvq->umem2;
|
||||
break;
|
||||
case 3:
|
||||
p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
|
||||
p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
|
||||
p_a = ndev->umem_3_buffer_param_a;
|
||||
p_b = ndev->umem_3_buffer_param_b;
|
||||
*umemp = &mvq->umem3;
|
||||
break;
|
||||
}
|
||||
|
||||
(*umemp)->size = p_a * mvq->num_ent + p_b;
|
||||
}
|
||||
|
||||
@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
|
||||
goto out;
|
||||
}
|
||||
mlx5_vdpa_add_debugfs(ndev);
|
||||
|
||||
err = read_umem_params(ndev);
|
||||
if (err)
|
||||
goto err_setup;
|
||||
|
||||
err = setup_virtqueues(mvdev);
|
||||
if (err) {
|
||||
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
|
||||
@ -2713,7 +2758,7 @@ err_tir:
|
||||
err_rqt:
|
||||
teardown_virtqueues(ndev);
|
||||
err_setup:
|
||||
mlx5_vdpa_remove_debugfs(ndev->debugfs);
|
||||
mlx5_vdpa_remove_debugfs(ndev);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
@ -2727,8 +2772,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
|
||||
if (!ndev->setup)
|
||||
return;
|
||||
|
||||
mlx5_vdpa_remove_debugfs(ndev->debugfs);
|
||||
ndev->debugfs = NULL;
|
||||
mlx5_vdpa_remove_debugfs(ndev);
|
||||
teardown_steering(ndev);
|
||||
destroy_tir(ndev);
|
||||
destroy_rqt(ndev);
|
||||
@ -3489,8 +3533,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
|
||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
mlx5_vdpa_remove_debugfs(ndev->debugfs);
|
||||
ndev->debugfs = NULL;
|
||||
unregister_link_notifier(ndev);
|
||||
_vdpa_unregister_device(dev);
|
||||
wq = mvdev->wq;
|
||||
|
@ -65,6 +65,15 @@ struct mlx5_vdpa_net {
|
||||
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
|
||||
struct mlx5_vdpa_irq_pool irqp;
|
||||
struct dentry *debugfs;
|
||||
|
||||
u32 umem_1_buffer_param_a;
|
||||
u32 umem_1_buffer_param_b;
|
||||
|
||||
u32 umem_2_buffer_param_a;
|
||||
u32 umem_2_buffer_param_b;
|
||||
|
||||
u32 umem_3_buffer_param_a;
|
||||
u32 umem_3_buffer_param_b;
|
||||
};
|
||||
|
||||
struct mlx5_vdpa_counter {
|
||||
@ -88,7 +97,7 @@ struct macvlan_node {
|
||||
};
|
||||
|
||||
void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
|
||||
void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
|
||||
void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev);
|
||||
void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
|
||||
void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
|
||||
void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
|
||||
|
@ -499,12 +499,13 @@ static int __init vdpasim_blk_init(void)
|
||||
GFP_KERNEL);
|
||||
if (!shared_buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto parent_err;
|
||||
goto mgmt_dev_err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
mgmt_dev_err:
|
||||
vdpa_mgmtdev_unregister(&mgmt_dev);
|
||||
parent_err:
|
||||
device_unregister(&vdpasim_blk_mgmtdev);
|
||||
return ret;
|
||||
|
@ -1458,9 +1458,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((msg.type == VHOST_IOTLB_UPDATE ||
|
||||
msg.type == VHOST_IOTLB_INVALIDATE) &&
|
||||
msg.size == 0) {
|
||||
if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
|
||||
virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
|
||||
&num_pages);
|
||||
|
||||
target = num_pages;
|
||||
/*
|
||||
* Aligned up to guest page size to avoid inflating and deflating
|
||||
* balloon endlessly.
|
||||
*/
|
||||
target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
|
||||
return target - vb->num_pages;
|
||||
}
|
||||
|
||||
|
@ -631,14 +631,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&vm_dev->lock);
|
||||
|
||||
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(vm_dev->base))
|
||||
return PTR_ERR(vm_dev->base);
|
||||
if (IS_ERR(vm_dev->base)) {
|
||||
rc = PTR_ERR(vm_dev->base);
|
||||
goto free_vm_dev;
|
||||
}
|
||||
|
||||
/* Check magic value */
|
||||
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
|
||||
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
|
||||
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
|
||||
return -ENODEV;
|
||||
rc = -ENODEV;
|
||||
goto free_vm_dev;
|
||||
}
|
||||
|
||||
/* Check device version */
|
||||
@ -646,7 +649,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
|
||||
if (vm_dev->version < 1 || vm_dev->version > 2) {
|
||||
dev_err(&pdev->dev, "Version %ld not supported!\n",
|
||||
vm_dev->version);
|
||||
return -ENXIO;
|
||||
rc = -ENXIO;
|
||||
goto free_vm_dev;
|
||||
}
|
||||
|
||||
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
|
||||
@ -655,7 +659,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
|
||||
* virtio-mmio device with an ID 0 is a (dummy) placeholder
|
||||
* with no function. End probing now with no error reported.
|
||||
*/
|
||||
return -ENODEV;
|
||||
rc = -ENODEV;
|
||||
goto free_vm_dev;
|
||||
}
|
||||
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
|
||||
|
||||
@ -685,6 +690,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
|
||||
put_device(&vm_dev->vdev.dev);
|
||||
|
||||
return rc;
|
||||
|
||||
free_vm_dev:
|
||||
kfree(vm_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int virtio_mmio_remove(struct platform_device *pdev)
|
||||
|
@ -291,7 +291,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
|
||||
err = -EINVAL;
|
||||
mdev->common = vp_modern_map_capability(mdev, common,
|
||||
sizeof(struct virtio_pci_common_cfg), 4,
|
||||
0, sizeof(struct virtio_pci_common_cfg),
|
||||
0, sizeof(struct virtio_pci_modern_common_cfg),
|
||||
NULL, NULL);
|
||||
if (!mdev->common)
|
||||
goto err_map_common;
|
||||
|
@ -24,11 +24,23 @@ enum dma_data_direction {
|
||||
#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
|
||||
|
||||
#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
|
||||
#define dma_map_single_attrs(d, p, s, dir, a) (virt_to_phys(p))
|
||||
#define dma_mapping_error(...) (0)
|
||||
|
||||
#define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
|
||||
#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
|
||||
|
||||
#define sg_dma_address(sg) (0)
|
||||
#define dma_need_sync(v, a) (0)
|
||||
#define dma_unmap_single_attrs(d, a, s, r, t) do { \
|
||||
(void)(d); (void)(a); (void)(s); (void)(r); (void)(t); \
|
||||
} while (0)
|
||||
#define dma_sync_single_range_for_cpu(d, a, o, s, r) do { \
|
||||
(void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
|
||||
} while (0)
|
||||
#define dma_sync_single_range_for_device(d, a, o, s, r) do { \
|
||||
(void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
|
||||
} while (0)
|
||||
#define dma_max_mapping_size(...) SIZE_MAX
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user