iommu/virtio: Make use of ops->iotlb_sync_map

Pull out the sync operation from viommu_map_pages() by implementing
ops->iotlb_sync_map. This allows the common IOMMU code to map multiple
elements of an sg with a single sync (see iommu_map_sg()).

Link: https://lore.kernel.org/lkml/20230726111433.1105665-1-schnelle@linux.ibm.com/
Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20231120-viommu-sync-map-v3-1-50a57ecf78b5@linux.ibm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Niklas Schnelle 2023-11-20 15:51:56 +01:00 committed by Joerg Roedel
parent 2cc14f52ae
commit 00271ca5cb

View File

@ -843,7 +843,7 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
.flags = cpu_to_le32(flags),
};
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
if (ret) {
viommu_del_mappings(vdomain, iova, end);
return ret;
@ -912,6 +912,20 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
viommu_sync_req(vdomain->viommu);
}
static int viommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
/*
* May be called before the viommu is initialized including
* while creating direct mapping
*/
if (!vdomain->nr_endpoints)
return 0;
return viommu_sync_req(vdomain->viommu);
}
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
@ -1058,6 +1072,7 @@ static struct iommu_ops viommu_ops = {
.unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.iotlb_sync_map = viommu_iotlb_sync_map,
.free = viommu_domain_free,
}
};