virtio: harsher barriers for rpmsg.
We were cheating with our barriers; using the smp ones rather than the
real device ones. That was fine, until rpmsg came along, which is
used to talk to a real device (a non-SMP CPU).
Unfortunately, just putting back the real barriers (reverting
d57ed95d
) causes a performance regression on virtio-pci. In
particular, Amos reports netbench's TCP_RR over virtio_net CPU
utilization increased up to 35% while throughput went down by up to
14%.
By comparison, this branch is in the noise.
Reference: https://lkml.org/lkml/2011/12/11/22
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
e343a895a9
commit
7b21e34fd1
@ -292,10 +292,12 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* OK, tell virtio_ring.c to set up a virtqueue now we know its size
|
* OK, tell virtio_ring.c to set up a virtqueue now we know its size
|
||||||
* and we've got a pointer to its pages.
|
* and we've got a pointer to its pages. Note that we set weak_barriers
|
||||||
|
* to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
|
||||||
|
* barriers.
|
||||||
*/
|
*/
|
||||||
vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
|
vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev,
|
||||||
vdev, lvq->pages, lg_notify, callback, name);
|
true, lvq->pages, lg_notify, callback, name);
|
||||||
if (!vq) {
|
if (!vq) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto unmap;
|
goto unmap;
|
||||||
|
@ -198,7 +198,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
|
vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
|
||||||
vdev, (void *) config->address,
|
vdev, true, (void *) config->address,
|
||||||
kvm_notify, callback, name);
|
kvm_notify, callback, name);
|
||||||
if (!vq) {
|
if (!vq) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
@ -310,8 +310,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
|
|||||||
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
|
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
|
||||||
|
|
||||||
/* Create the vring */
|
/* Create the vring */
|
||||||
vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN,
|
vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
|
||||||
vdev, info->queue, vm_notify, callback, name);
|
true, info->queue, vm_notify, callback, name);
|
||||||
if (!vq) {
|
if (!vq) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto error_new_virtqueue;
|
goto error_new_virtqueue;
|
||||||
|
@ -414,8 +414,8 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
|
|||||||
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
|
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
|
||||||
|
|
||||||
/* create the vring */
|
/* create the vring */
|
||||||
vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
|
vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
|
||||||
vdev, info->queue, vp_notify, callback, name);
|
true, info->queue, vp_notify, callback, name);
|
||||||
if (!vq) {
|
if (!vq) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out_activate_queue;
|
goto out_activate_queue;
|
||||||
|
@ -28,17 +28,20 @@
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* Where possible, use SMP barriers which are more lightweight than mandatory
|
/* Where possible, use SMP barriers which are more lightweight than mandatory
|
||||||
* barriers, because mandatory barriers control MMIO effects on accesses
|
* barriers, because mandatory barriers control MMIO effects on accesses
|
||||||
* through relaxed memory I/O windows (which virtio does not use). */
|
* through relaxed memory I/O windows (which virtio-pci does not use). */
|
||||||
#define virtio_mb() smp_mb()
|
#define virtio_mb(vq) \
|
||||||
#define virtio_rmb() smp_rmb()
|
do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
|
||||||
#define virtio_wmb() smp_wmb()
|
#define virtio_rmb(vq) \
|
||||||
|
do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
|
||||||
|
#define virtio_wmb(vq) \
|
||||||
|
do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
|
||||||
#else
|
#else
|
||||||
/* We must force memory ordering even if guest is UP since host could be
|
/* We must force memory ordering even if guest is UP since host could be
|
||||||
* running on another CPU, but SMP barriers are defined to barrier() in that
|
* running on another CPU, but SMP barriers are defined to barrier() in that
|
||||||
* configuration. So fall back to mandatory barriers instead. */
|
* configuration. So fall back to mandatory barriers instead. */
|
||||||
#define virtio_mb() mb()
|
#define virtio_mb(vq) mb()
|
||||||
#define virtio_rmb() rmb()
|
#define virtio_rmb(vq) rmb()
|
||||||
#define virtio_wmb() wmb()
|
#define virtio_wmb(vq) wmb()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
@ -77,6 +80,9 @@ struct vring_virtqueue
|
|||||||
/* Actual memory layout for this queue */
|
/* Actual memory layout for this queue */
|
||||||
struct vring vring;
|
struct vring vring;
|
||||||
|
|
||||||
|
/* Can we use weak barriers? */
|
||||||
|
bool weak_barriers;
|
||||||
|
|
||||||
/* Other side has made a mess, don't try any more. */
|
/* Other side has made a mess, don't try any more. */
|
||||||
bool broken;
|
bool broken;
|
||||||
|
|
||||||
@ -245,14 +251,14 @@ void virtqueue_kick(struct virtqueue *_vq)
|
|||||||
START_USE(vq);
|
START_USE(vq);
|
||||||
/* Descriptors and available array need to be set before we expose the
|
/* Descriptors and available array need to be set before we expose the
|
||||||
* new available array entries. */
|
* new available array entries. */
|
||||||
virtio_wmb();
|
virtio_wmb(vq);
|
||||||
|
|
||||||
old = vq->vring.avail->idx;
|
old = vq->vring.avail->idx;
|
||||||
new = vq->vring.avail->idx = old + vq->num_added;
|
new = vq->vring.avail->idx = old + vq->num_added;
|
||||||
vq->num_added = 0;
|
vq->num_added = 0;
|
||||||
|
|
||||||
/* Need to update avail index before checking if we should notify */
|
/* Need to update avail index before checking if we should notify */
|
||||||
virtio_mb();
|
virtio_mb(vq);
|
||||||
|
|
||||||
if (vq->event ?
|
if (vq->event ?
|
||||||
vring_need_event(vring_avail_event(&vq->vring), new, old) :
|
vring_need_event(vring_avail_event(&vq->vring), new, old) :
|
||||||
@ -314,7 +320,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Only get used array entries after they have been exposed by host. */
|
/* Only get used array entries after they have been exposed by host. */
|
||||||
virtio_rmb();
|
virtio_rmb(vq);
|
||||||
|
|
||||||
i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
|
i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
|
||||||
*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
|
*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
|
||||||
@ -337,7 +343,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
|
|||||||
* the read in the next get_buf call. */
|
* the read in the next get_buf call. */
|
||||||
if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
|
if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
|
||||||
vring_used_event(&vq->vring) = vq->last_used_idx;
|
vring_used_event(&vq->vring) = vq->last_used_idx;
|
||||||
virtio_mb();
|
virtio_mb(vq);
|
||||||
}
|
}
|
||||||
|
|
||||||
END_USE(vq);
|
END_USE(vq);
|
||||||
@ -366,7 +372,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
|
|||||||
* entry. Always do both to keep code simple. */
|
* entry. Always do both to keep code simple. */
|
||||||
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vring_used_event(&vq->vring) = vq->last_used_idx;
|
vring_used_event(&vq->vring) = vq->last_used_idx;
|
||||||
virtio_mb();
|
virtio_mb(vq);
|
||||||
if (unlikely(more_used(vq))) {
|
if (unlikely(more_used(vq))) {
|
||||||
END_USE(vq);
|
END_USE(vq);
|
||||||
return false;
|
return false;
|
||||||
@ -393,7 +399,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
|
|||||||
/* TODO: tune this threshold */
|
/* TODO: tune this threshold */
|
||||||
bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
|
bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
|
||||||
vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
|
vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
|
||||||
virtio_mb();
|
virtio_mb(vq);
|
||||||
if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
|
if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
|
||||||
END_USE(vq);
|
END_USE(vq);
|
||||||
return false;
|
return false;
|
||||||
@ -453,6 +459,7 @@ EXPORT_SYMBOL_GPL(vring_interrupt);
|
|||||||
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
||||||
unsigned int vring_align,
|
unsigned int vring_align,
|
||||||
struct virtio_device *vdev,
|
struct virtio_device *vdev,
|
||||||
|
bool weak_barriers,
|
||||||
void *pages,
|
void *pages,
|
||||||
void (*notify)(struct virtqueue *),
|
void (*notify)(struct virtqueue *),
|
||||||
void (*callback)(struct virtqueue *),
|
void (*callback)(struct virtqueue *),
|
||||||
@ -476,6 +483,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
|
|||||||
vq->vq.vdev = vdev;
|
vq->vq.vdev = vdev;
|
||||||
vq->vq.name = name;
|
vq->vq.name = name;
|
||||||
vq->notify = notify;
|
vq->notify = notify;
|
||||||
|
vq->weak_barriers = weak_barriers;
|
||||||
vq->broken = false;
|
vq->broken = false;
|
||||||
vq->last_used_idx = 0;
|
vq->last_used_idx = 0;
|
||||||
vq->num_added = 0;
|
vq->num_added = 0;
|
||||||
|
@ -168,6 +168,7 @@ struct virtqueue;
|
|||||||
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
||||||
unsigned int vring_align,
|
unsigned int vring_align,
|
||||||
struct virtio_device *vdev,
|
struct virtio_device *vdev,
|
||||||
|
bool weak_barriers,
|
||||||
void *pages,
|
void *pages,
|
||||||
void (*notify)(struct virtqueue *vq),
|
void (*notify)(struct virtqueue *vq),
|
||||||
void (*callback)(struct virtqueue *vq),
|
void (*callback)(struct virtqueue *vq),
|
||||||
|
@ -214,6 +214,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
|
|||||||
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
||||||
unsigned int vring_align,
|
unsigned int vring_align,
|
||||||
struct virtio_device *vdev,
|
struct virtio_device *vdev,
|
||||||
|
bool weak_barriers,
|
||||||
void *pages,
|
void *pages,
|
||||||
void (*notify)(struct virtqueue *vq),
|
void (*notify)(struct virtqueue *vq),
|
||||||
void (*callback)(struct virtqueue *vq),
|
void (*callback)(struct virtqueue *vq),
|
||||||
|
@ -92,7 +92,8 @@ static void vq_info_add(struct vdev_info *dev, int num)
|
|||||||
assert(r >= 0);
|
assert(r >= 0);
|
||||||
memset(info->ring, 0, vring_size(num, 4096));
|
memset(info->ring, 0, vring_size(num, 4096));
|
||||||
vring_init(&info->vring, num, info->ring, 4096);
|
vring_init(&info->vring, num, info->ring, 4096);
|
||||||
info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev, info->ring,
|
info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev,
|
||||||
|
true, info->ring,
|
||||||
vq_notify, vq_callback, "test");
|
vq_notify, vq_callback, "test");
|
||||||
assert(info->vq);
|
assert(info->vq);
|
||||||
info->vq->priv = info;
|
info->vq->priv = info;
|
||||||
|
Loading…
Reference in New Issue
Block a user