virtio, vhost: fixes, cleanups
fixes and cleanups all over the place Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJcPTc7AAoJECgfDbjSjVRpOEgH/Ahdx7VMYJtFsdmoJKiwhB7M jRRi9R903V9H87vl1BXy6dutHw+WONJtm6FSZ1ayNWlVmUmWS6vci+IUErr2uDrv KSG+dJMQLlF7t1dnLRwlLazvGa4/58+u0J459uKPQ5ckqwV5wXPjUS5Z0xF3ldxM Twz6vhYRGKCUc10YZm/WmsjlLROgaNtRya10PzAGVmXPzbCpvJfiojKWJER+Eigq JxWynTCm/YvIk824Ls9cDBVkDvb8GPS3blVbFnusR+D3ktvX7vLDPOsErGn4umVS nUm3/WiQALB9fKer+SsgcEGVh+fa06KIITK+IBblULmrAIT3CJdJp70UJBjfdTM= =DCkE -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio/vhost fixes and cleanups from Michael Tsirkin: "Fixes and cleanups all over the place" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: vhost/scsi: Use copy_to_iter() to send control queue response vhost: return EINVAL if iovecs size does not match the message size virtio-balloon: tweak config_changed implementation virtio: don't allocate vqs when names[i] = NULL virtio_pci: use queue idx instead of array idx to set up the vq virtio: document virtio_config_ops restrictions virtio: fix virtio_config_ops description
This commit is contained in:
commit
bb617b9b45
@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
|
||||
struct _vop_vdev *vdev = to_vopvdev(dev);
|
||||
struct vop_device *vpdev = vdev->vpdev;
|
||||
struct mic_device_ctrl __iomem *dc = vdev->dc;
|
||||
int i, err, retry;
|
||||
int i, err, retry, queue_idx = 0;
|
||||
|
||||
/* We must have this many virtqueues. */
|
||||
if (nvqs > ioread8(&vdev->desc->num_vq))
|
||||
return -ENOENT;
|
||||
|
||||
for (i = 0; i < nvqs; ++i) {
|
||||
if (!names[i]) {
|
||||
vqs[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
|
||||
__func__, i, names[i]);
|
||||
vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i],
|
||||
vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
|
||||
ctx ? ctx[i] : false);
|
||||
if (IS_ERR(vqs[i])) {
|
||||
err = PTR_ERR(vqs[i]);
|
||||
|
@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
|
||||
const bool * ctx,
|
||||
struct irq_affinity *desc)
|
||||
{
|
||||
int i, ret;
|
||||
int i, ret, queue_idx = 0;
|
||||
|
||||
for (i = 0; i < nvqs; ++i) {
|
||||
vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i],
|
||||
if (!names[i]) {
|
||||
vqs[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
|
||||
ctx ? ctx[i] : false);
|
||||
if (IS_ERR(vqs[i])) {
|
||||
ret = PTR_ERR(vqs[i]);
|
||||
|
@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
{
|
||||
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
|
||||
unsigned long *indicatorp = NULL;
|
||||
int ret, i;
|
||||
int ret, i, queue_idx = 0;
|
||||
struct ccw1 *ccw;
|
||||
|
||||
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
|
||||
@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nvqs; ++i) {
|
||||
vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
|
||||
ctx ? ctx[i] : false, ccw);
|
||||
if (!names[i]) {
|
||||
vqs[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
|
||||
names[i], ctx ? ctx[i] : false,
|
||||
ccw);
|
||||
if (IS_ERR(vqs[i])) {
|
||||
ret = PTR_ERR(vqs[i]);
|
||||
vqs[i] = NULL;
|
||||
|
@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
|
||||
struct vhost_virtqueue *vq,
|
||||
struct vhost_scsi_ctx *vc)
|
||||
{
|
||||
struct virtio_scsi_ctrl_tmf_resp __user *resp;
|
||||
struct virtio_scsi_ctrl_tmf_resp rsp;
|
||||
struct iov_iter iov_iter;
|
||||
int ret;
|
||||
|
||||
pr_debug("%s\n", __func__);
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
|
||||
resp = vq->iov[vc->out].iov_base;
|
||||
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
|
||||
if (!ret)
|
||||
|
||||
iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
|
||||
|
||||
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
|
||||
if (likely(ret == sizeof(rsp)))
|
||||
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
|
||||
else
|
||||
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
|
||||
@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
|
||||
struct vhost_virtqueue *vq,
|
||||
struct vhost_scsi_ctx *vc)
|
||||
{
|
||||
struct virtio_scsi_ctrl_an_resp __user *resp;
|
||||
struct virtio_scsi_ctrl_an_resp rsp;
|
||||
struct iov_iter iov_iter;
|
||||
int ret;
|
||||
|
||||
pr_debug("%s\n", __func__);
|
||||
memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
|
||||
rsp.response = VIRTIO_SCSI_S_OK;
|
||||
resp = vq->iov[vc->out].iov_base;
|
||||
ret = __copy_to_user(resp, &rsp, sizeof(rsp));
|
||||
if (!ret)
|
||||
|
||||
iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
|
||||
|
||||
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
|
||||
if (likely(ret == sizeof(rsp)))
|
||||
vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
|
||||
else
|
||||
pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
|
||||
|
@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
||||
int type, ret;
|
||||
|
||||
ret = copy_from_iter(&type, sizeof(type), from);
|
||||
if (ret != sizeof(type))
|
||||
if (ret != sizeof(type)) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case VHOST_IOTLB_MSG:
|
||||
@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
|
||||
|
||||
iov_iter_advance(from, offset);
|
||||
ret = copy_from_iter(&msg, sizeof(msg), from);
|
||||
if (ret != sizeof(msg))
|
||||
if (ret != sizeof(msg)) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (vhost_process_iotlb_msg(dev, &msg)) {
|
||||
ret = -EFAULT;
|
||||
goto done;
|
||||
|
@ -61,6 +61,10 @@ enum virtio_balloon_vq {
|
||||
VIRTIO_BALLOON_VQ_MAX
|
||||
};
|
||||
|
||||
enum virtio_balloon_config_read {
|
||||
VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
|
||||
};
|
||||
|
||||
struct virtio_balloon {
|
||||
struct virtio_device *vdev;
|
||||
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
|
||||
@ -77,14 +81,20 @@ struct virtio_balloon {
|
||||
/* Prevent updating balloon when it is being canceled. */
|
||||
spinlock_t stop_update_lock;
|
||||
bool stop_update;
|
||||
/* Bitmap to indicate if reading the related config fields are needed */
|
||||
unsigned long config_read_bitmap;
|
||||
|
||||
/* The list of allocated free pages, waiting to be given back to mm */
|
||||
struct list_head free_page_list;
|
||||
spinlock_t free_page_list_lock;
|
||||
/* The number of free page blocks on the above list */
|
||||
unsigned long num_free_page_blocks;
|
||||
/* The cmd id received from host */
|
||||
u32 cmd_id_received;
|
||||
/*
|
||||
* The cmd id received from host.
|
||||
* Read it via virtio_balloon_cmd_id_received to get the latest value
|
||||
* sent from host.
|
||||
*/
|
||||
u32 cmd_id_received_cache;
|
||||
/* The cmd id that is actively in use */
|
||||
__virtio32 cmd_id_active;
|
||||
/* Buffer to store the stop sign */
|
||||
@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
|
||||
return num_returned;
|
||||
}
|
||||
|
||||
static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
|
||||
{
|
||||
if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
|
||||
return;
|
||||
|
||||
/* No need to queue the work if the bit was already set. */
|
||||
if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
|
||||
&vb->config_read_bitmap))
|
||||
return;
|
||||
|
||||
queue_work(vb->balloon_wq, &vb->report_free_page_work);
|
||||
}
|
||||
|
||||
static void virtballoon_changed(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_balloon *vb = vdev->priv;
|
||||
unsigned long flags;
|
||||
s64 diff = towards_target(vb);
|
||||
|
||||
if (diff) {
|
||||
spin_lock_irqsave(&vb->stop_update_lock, flags);
|
||||
if (!vb->stop_update)
|
||||
queue_work(system_freezable_wq,
|
||||
&vb->update_balloon_size_work);
|
||||
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
|
||||
}
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
|
||||
virtio_cread(vdev, struct virtio_balloon_config,
|
||||
free_page_report_cmd_id, &vb->cmd_id_received);
|
||||
if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
|
||||
/* Pass ULONG_MAX to give back all the free pages */
|
||||
return_free_pages_to_mm(vb, ULONG_MAX);
|
||||
} else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
|
||||
vb->cmd_id_received !=
|
||||
virtio32_to_cpu(vdev, vb->cmd_id_active)) {
|
||||
spin_lock_irqsave(&vb->stop_update_lock, flags);
|
||||
if (!vb->stop_update) {
|
||||
queue_work(vb->balloon_wq,
|
||||
&vb->report_free_page_work);
|
||||
}
|
||||
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
|
||||
}
|
||||
spin_lock_irqsave(&vb->stop_update_lock, flags);
|
||||
if (!vb->stop_update) {
|
||||
queue_work(system_freezable_wq,
|
||||
&vb->update_balloon_size_work);
|
||||
virtio_balloon_queue_free_page_work(vb);
|
||||
}
|
||||
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
|
||||
}
|
||||
|
||||
static void update_balloon_size(struct virtio_balloon *vb)
|
||||
@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
|
||||
{
|
||||
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
|
||||
&vb->config_read_bitmap))
|
||||
virtio_cread(vb->vdev, struct virtio_balloon_config,
|
||||
free_page_report_cmd_id,
|
||||
&vb->cmd_id_received_cache);
|
||||
|
||||
return vb->cmd_id_received_cache;
|
||||
}
|
||||
|
||||
static int send_cmd_id_start(struct virtio_balloon *vb)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
|
||||
while (virtqueue_get_buf(vq, &unused))
|
||||
;
|
||||
|
||||
vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
|
||||
vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
|
||||
virtio_balloon_cmd_id_received(vb));
|
||||
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
|
||||
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
|
||||
if (!err)
|
||||
@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
|
||||
* stop the reporting.
|
||||
*/
|
||||
cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
|
||||
if (cmd_id_active != vb->cmd_id_received)
|
||||
if (unlikely(cmd_id_active !=
|
||||
virtio_balloon_cmd_id_received(vb)))
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void report_free_page_func(struct work_struct *work)
|
||||
static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
|
||||
{
|
||||
int err;
|
||||
struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
|
||||
report_free_page_work);
|
||||
struct device *dev = &vb->vdev->dev;
|
||||
|
||||
/* Start by sending the received cmd id to host with an outbuf. */
|
||||
@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
|
||||
dev_err(dev, "Failed to send a stop id, err = %d\n", err);
|
||||
}
|
||||
|
||||
static void report_free_page_func(struct work_struct *work)
|
||||
{
|
||||
struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
|
||||
report_free_page_work);
|
||||
u32 cmd_id_received;
|
||||
|
||||
cmd_id_received = virtio_balloon_cmd_id_received(vb);
|
||||
if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
|
||||
/* Pass ULONG_MAX to give back all the free pages */
|
||||
return_free_pages_to_mm(vb, ULONG_MAX);
|
||||
} else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
|
||||
cmd_id_received !=
|
||||
virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
|
||||
virtio_balloon_report_free_page(vb);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BALLOON_COMPACTION
|
||||
/*
|
||||
* virtballoon_migratepage - perform the balloon page migration on behalf of
|
||||
@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
||||
goto out_del_vqs;
|
||||
}
|
||||
INIT_WORK(&vb->report_free_page_work, report_free_page_func);
|
||||
vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
|
||||
vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
|
||||
vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
|
||||
VIRTIO_BALLOON_CMD_ID_STOP);
|
||||
vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
|
||||
|
@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
{
|
||||
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
|
||||
unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
|
||||
int i, err;
|
||||
int i, err, queue_idx = 0;
|
||||
|
||||
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
|
||||
dev_name(&vdev->dev), vm_dev);
|
||||
@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
return err;
|
||||
|
||||
for (i = 0; i < nvqs; ++i) {
|
||||
vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i],
|
||||
if (!names[i]) {
|
||||
vqs[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
|
||||
ctx ? ctx[i] : false);
|
||||
if (IS_ERR(vqs[i])) {
|
||||
vm_del_vqs(vdev);
|
||||
|
@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
|
||||
{
|
||||
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||
u16 msix_vec;
|
||||
int i, err, nvectors, allocated_vectors;
|
||||
int i, err, nvectors, allocated_vectors, queue_idx = 0;
|
||||
|
||||
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
|
||||
if (!vp_dev->vqs)
|
||||
@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
|
||||
msix_vec = allocated_vectors++;
|
||||
else
|
||||
msix_vec = VP_MSIX_VQ_VECTOR;
|
||||
vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
|
||||
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
|
||||
ctx ? ctx[i] : false,
|
||||
msix_vec);
|
||||
if (IS_ERR(vqs[i])) {
|
||||
@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
|
||||
const char * const names[], const bool *ctx)
|
||||
{
|
||||
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||
int i, err;
|
||||
int i, err, queue_idx = 0;
|
||||
|
||||
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
|
||||
if (!vp_dev->vqs)
|
||||
@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
|
||||
vqs[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
|
||||
vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
|
||||
ctx ? ctx[i] : false,
|
||||
VIRTIO_MSI_NO_VECTOR);
|
||||
if (IS_ERR(vqs[i])) {
|
||||
|
@ -12,6 +12,11 @@ struct irq_affinity;
|
||||
|
||||
/**
|
||||
* virtio_config_ops - operations for configuring a virtio device
|
||||
* Note: Do not assume that a transport implements all of the operations
|
||||
* getting/setting a value as a simple read/write! Generally speaking,
|
||||
* any of @get/@set, @get_status/@set_status, or @get_features/
|
||||
* @finalize_features are NOT safe to be called from an atomic
|
||||
* context.
|
||||
* @get: read the value of a configuration field
|
||||
* vdev: the virtio_device
|
||||
* offset: the offset of the configuration field
|
||||
@ -22,7 +27,7 @@ struct irq_affinity;
|
||||
* offset: the offset of the configuration field
|
||||
* buf: the buffer to read the field value from.
|
||||
* len: the length of the buffer
|
||||
* @generation: config generation counter
|
||||
* @generation: config generation counter (optional)
|
||||
* vdev: the virtio_device
|
||||
* Returns the config generation counter
|
||||
* @get_status: read the status byte
|
||||
@ -48,17 +53,17 @@ struct irq_affinity;
|
||||
* @del_vqs: free virtqueues found by find_vqs().
|
||||
* @get_features: get the array of feature bits for this device.
|
||||
* vdev: the virtio_device
|
||||
* Returns the first 32 feature bits (all we currently need).
|
||||
* Returns the first 64 feature bits (all we currently need).
|
||||
* @finalize_features: confirm what device features we'll be using.
|
||||
* vdev: the virtio_device
|
||||
* This gives the final feature bits for the device: it can change
|
||||
* the dev->feature bits if it wants.
|
||||
* Returns 0 on success or error status
|
||||
* @bus_name: return the bus name associated with the device
|
||||
* @bus_name: return the bus name associated with the device (optional)
|
||||
* vdev: the virtio_device
|
||||
* This returns a pointer to the bus name a la pci_name from which
|
||||
* the caller can then copy.
|
||||
* @set_vq_affinity: set the affinity for a virtqueue.
|
||||
* @set_vq_affinity: set the affinity for a virtqueue (optional).
|
||||
* @get_vq_affinity: get the affinity for a virtqueue (optional).
|
||||
*/
|
||||
typedef void vq_callback_t(struct virtqueue *);
|
||||
|
Loading…
Reference in New Issue
Block a user