iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush

Add ops->flush_iotlb_all operation to enable virtio-iommu for the
dma-iommu deferred flush scheme. This results in a significant increase
in performance in exchange for a window in which devices can still
access previously IOMMU mapped memory when running with
CONFIG_IOMMU_DEFAULT_DMA_LAZY. The previous strict behavior can be
achieved with iommu.strict=1 on the kernel command line or
CONFIG_IOMMU_DEFAULT_DMA_STRICT.

Link: https://lore.kernel.org/lkml/20230802123612.GA6142@myrica/
Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20231120-viommu-sync-map-v3-2-50a57ecf78b5@linux.ibm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Niklas Schnelle 2023-11-20 15:51:57 +01:00 committed by Joerg Roedel
parent 00271ca5cb
commit 6f01a73260

View File

@ -926,6 +926,19 @@ static int viommu_iotlb_sync_map(struct iommu_domain *domain,
return viommu_sync_req(vdomain->viommu);
}
static void viommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
/*
* May be called before the viommu is initialized including
* while creating direct mapping
*/
if (!vdomain->nr_endpoints)
return;
viommu_sync_req(vdomain->viommu);
}
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
@ -1051,6 +1064,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
default:
return false;
}
@ -1071,6 +1086,7 @@ static struct iommu_ops viommu_ops = {
.map_pages = viommu_map_pages,
.unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.flush_iotlb_all = viommu_flush_iotlb_all,
.iotlb_sync = viommu_iotlb_sync,
.iotlb_sync_map = viommu_iotlb_sync_map,
.free = viommu_domain_free,