iommu/virtio: Add event queue
The event queue offers a way for the device to report access faults from endpoints. It is implemented on virtqueue #1. Whenever the host needs to signal a fault, it fills one of the buffers offered by the guest and interrupts it. Tested-by: Bharat Bhushan <bharat.bhushan@nxp.com> Tested-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
committed by
Michael S. Tsirkin
parent
2a5a314874
commit
169a126c6e
@@ -29,7 +29,8 @@
|
|||||||
#define MSI_IOVA_LENGTH 0x100000
|
#define MSI_IOVA_LENGTH 0x100000
|
||||||
|
|
||||||
#define VIOMMU_REQUEST_VQ 0
|
#define VIOMMU_REQUEST_VQ 0
|
||||||
#define VIOMMU_NR_VQS 1
|
#define VIOMMU_EVENT_VQ 1
|
||||||
|
#define VIOMMU_NR_VQS 2
|
||||||
|
|
||||||
struct viommu_dev {
|
struct viommu_dev {
|
||||||
struct iommu_device iommu;
|
struct iommu_device iommu;
|
||||||
@@ -41,6 +42,7 @@ struct viommu_dev {
|
|||||||
struct virtqueue *vqs[VIOMMU_NR_VQS];
|
struct virtqueue *vqs[VIOMMU_NR_VQS];
|
||||||
spinlock_t request_lock;
|
spinlock_t request_lock;
|
||||||
struct list_head requests;
|
struct list_head requests;
|
||||||
|
void *evts;
|
||||||
|
|
||||||
/* Device configuration */
|
/* Device configuration */
|
||||||
struct iommu_domain_geometry geometry;
|
struct iommu_domain_geometry geometry;
|
||||||
@@ -82,6 +84,15 @@ struct viommu_request {
|
|||||||
char buf[];
|
char buf[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define VIOMMU_FAULT_RESV_MASK 0xffffff00
|
||||||
|
|
||||||
|
struct viommu_event {
|
||||||
|
union {
|
||||||
|
u32 head;
|
||||||
|
struct virtio_iommu_fault fault;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
#define to_viommu_domain(domain) \
|
#define to_viommu_domain(domain) \
|
||||||
container_of(domain, struct viommu_domain, domain)
|
container_of(domain, struct viommu_domain, domain)
|
||||||
|
|
||||||
@@ -503,6 +514,68 @@ out_free:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int viommu_fault_handler(struct viommu_dev *viommu,
|
||||||
|
struct virtio_iommu_fault *fault)
|
||||||
|
{
|
||||||
|
char *reason_str;
|
||||||
|
|
||||||
|
u8 reason = fault->reason;
|
||||||
|
u32 flags = le32_to_cpu(fault->flags);
|
||||||
|
u32 endpoint = le32_to_cpu(fault->endpoint);
|
||||||
|
u64 address = le64_to_cpu(fault->address);
|
||||||
|
|
||||||
|
switch (reason) {
|
||||||
|
case VIRTIO_IOMMU_FAULT_R_DOMAIN:
|
||||||
|
reason_str = "domain";
|
||||||
|
break;
|
||||||
|
case VIRTIO_IOMMU_FAULT_R_MAPPING:
|
||||||
|
reason_str = "page";
|
||||||
|
break;
|
||||||
|
case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
|
||||||
|
default:
|
||||||
|
reason_str = "unknown";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* TODO: find EP by ID and report_iommu_fault */
|
||||||
|
if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
|
||||||
|
dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
|
||||||
|
reason_str, endpoint, address,
|
||||||
|
flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
|
||||||
|
flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
|
||||||
|
flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
|
||||||
|
else
|
||||||
|
dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
|
||||||
|
reason_str, endpoint);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void viommu_event_handler(struct virtqueue *vq)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
unsigned int len;
|
||||||
|
struct scatterlist sg[1];
|
||||||
|
struct viommu_event *evt;
|
||||||
|
struct viommu_dev *viommu = vq->vdev->priv;
|
||||||
|
|
||||||
|
while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
|
||||||
|
if (len > sizeof(*evt)) {
|
||||||
|
dev_err(viommu->dev,
|
||||||
|
"invalid event buffer (len %u != %zu)\n",
|
||||||
|
len, sizeof(*evt));
|
||||||
|
} else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
|
||||||
|
viommu_fault_handler(viommu, &evt->fault);
|
||||||
|
}
|
||||||
|
|
||||||
|
sg_init_one(sg, evt, sizeof(*evt));
|
||||||
|
ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
|
||||||
|
if (ret)
|
||||||
|
dev_err(viommu->dev, "could not add event buffer\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
virtqueue_kick(vq);
|
||||||
|
}
|
||||||
|
|
||||||
/* IOMMU API */
|
/* IOMMU API */
|
||||||
|
|
||||||
static struct iommu_domain *viommu_domain_alloc(unsigned type)
|
static struct iommu_domain *viommu_domain_alloc(unsigned type)
|
||||||
@@ -886,17 +959,36 @@ static struct iommu_ops viommu_ops = {
|
|||||||
static int viommu_init_vqs(struct viommu_dev *viommu)
|
static int viommu_init_vqs(struct viommu_dev *viommu)
|
||||||
{
|
{
|
||||||
struct virtio_device *vdev = dev_to_virtio(viommu->dev);
|
struct virtio_device *vdev = dev_to_virtio(viommu->dev);
|
||||||
const char *name = "request";
|
const char *names[] = { "request", "event" };
|
||||||
void *ret;
|
vq_callback_t *callbacks[] = {
|
||||||
|
NULL, /* No async requests */
|
||||||
|
viommu_event_handler,
|
||||||
|
};
|
||||||
|
|
||||||
ret = virtio_find_single_vq(vdev, NULL, name);
|
return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
|
||||||
if (IS_ERR(ret)) {
|
names, NULL);
|
||||||
dev_err(viommu->dev, "cannot find VQ\n");
|
}
|
||||||
return PTR_ERR(ret);
|
|
||||||
|
static int viommu_fill_evtq(struct viommu_dev *viommu)
|
||||||
|
{
|
||||||
|
int i, ret;
|
||||||
|
struct scatterlist sg[1];
|
||||||
|
struct viommu_event *evts;
|
||||||
|
struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
|
||||||
|
size_t nr_evts = vq->num_free;
|
||||||
|
|
||||||
|
viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
|
||||||
|
sizeof(*evts), GFP_KERNEL);
|
||||||
|
if (!evts)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_evts; i++) {
|
||||||
|
sg_init_one(sg, &evts[i], sizeof(*evts));
|
||||||
|
ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
viommu->vqs[VIOMMU_REQUEST_VQ] = ret;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -964,6 +1056,11 @@ static int viommu_probe(struct virtio_device *vdev)
|
|||||||
|
|
||||||
virtio_device_ready(vdev);
|
virtio_device_ready(vdev);
|
||||||
|
|
||||||
|
/* Populate the event queue with buffers */
|
||||||
|
ret = viommu_fill_evtq(viommu);
|
||||||
|
if (ret)
|
||||||
|
goto err_free_vqs;
|
||||||
|
|
||||||
ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
|
ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
|
||||||
virtio_bus_name(vdev));
|
virtio_bus_name(vdev));
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@@ -139,4 +139,23 @@ struct virtio_iommu_req_probe {
|
|||||||
*/
|
*/
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Fault types */
|
||||||
|
#define VIRTIO_IOMMU_FAULT_R_UNKNOWN 0
|
||||||
|
#define VIRTIO_IOMMU_FAULT_R_DOMAIN 1
|
||||||
|
#define VIRTIO_IOMMU_FAULT_R_MAPPING 2
|
||||||
|
|
||||||
|
#define VIRTIO_IOMMU_FAULT_F_READ (1 << 0)
|
||||||
|
#define VIRTIO_IOMMU_FAULT_F_WRITE (1 << 1)
|
||||||
|
#define VIRTIO_IOMMU_FAULT_F_EXEC (1 << 2)
|
||||||
|
#define VIRTIO_IOMMU_FAULT_F_ADDRESS (1 << 8)
|
||||||
|
|
||||||
|
struct virtio_iommu_fault {
|
||||||
|
__u8 reason;
|
||||||
|
__u8 reserved[3];
|
||||||
|
__le32 flags;
|
||||||
|
__le32 endpoint;
|
||||||
|
__u8 reserved2[4];
|
||||||
|
__le64 address;
|
||||||
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user