iommu: Allow .iotlb_sync_map to fail and handle s390's -ENOMEM return
On s390 when using a paging hypervisor, .iotlb_sync_map is used to sync mappings by letting the hypervisor inspect the synced IOVA range and updating a shadow table. This however means that .iotlb_sync_map can fail as the hypervisor may run out of resources while doing the sync. This can be due to the hypervisor being unable to pin guest pages, due to a limit on mapped addresses such as vfio_iommu_type1.dma_entry_limit or lack of other resources. Either way such a failure to sync a mapping should result in a DMA_MAPPING_ERROR. Now especially when running with batched IOTLB flushes for unmap it may be that some IOVAs have already been invalidated but not yet synced via .iotlb_sync_map. Thus if the hypervisor indicates running out of resources, first do a global flush allowing the hypervisor to free resources associated with these mappings as well a retry creating the new mappings and only if that also fails report this error to callers. Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com> Acked-by: Jernej Skrabec <jernej.skrabec@gmail.com> # sun50i Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com> Link: https://lore.kernel.org/r/20230928-dma_iommu-v13-1-9e5fc4dacc36@linux.ibm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
ccb76c5751
commit
fa4c450709
@ -2233,14 +2233,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
|
||||
unsigned long iova, size_t size)
|
||||
static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct protection_domain *domain = to_pdomain(dom);
|
||||
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
|
||||
|
||||
if (ops->map_pages)
|
||||
domain_flush_np_cache(domain, iova, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
|
||||
|
@ -506,10 +506,11 @@ static void apple_dart_iotlb_sync(struct iommu_domain *domain,
|
||||
apple_dart_domain_flush_tlb(to_dart_domain(domain));
|
||||
}
|
||||
|
||||
static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
static int apple_dart_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
apple_dart_domain_flush_tlb(to_dart_domain(domain));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
|
||||
|
@ -4678,8 +4678,8 @@ static bool risky_device(struct pci_dev *pdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
unsigned long pages = aligned_nrpages(iova, size);
|
||||
@ -4689,6 +4689,7 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
|
||||
xa_for_each(&dmar_domain->iommu_array, i, info)
|
||||
__mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
|
@ -2585,8 +2585,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
return -EINVAL;
|
||||
|
||||
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
|
||||
if (ret == 0 && ops->iotlb_sync_map)
|
||||
ops->iotlb_sync_map(domain, iova, size);
|
||||
if (ret == 0 && ops->iotlb_sync_map) {
|
||||
ret = ops->iotlb_sync_map(domain, iova, size);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_err:
|
||||
/* undo mappings already done */
|
||||
iommu_unmap(domain, iova, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2714,8 +2723,11 @@ next:
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
if (ops->iotlb_sync_map)
|
||||
ops->iotlb_sync_map(domain, iova, mapped);
|
||||
if (ops->iotlb_sync_map) {
|
||||
ret = ops->iotlb_sync_map(domain, iova, mapped);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
return mapped;
|
||||
|
||||
out_err:
|
||||
|
@ -498,12 +498,13 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size)
|
||||
static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size)
|
||||
{
|
||||
struct msm_priv *priv = to_msm_priv(domain);
|
||||
|
||||
__flush_iotlb_range(iova, size, SZ_4K, false, priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
|
@ -837,12 +837,13 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
|
||||
mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
|
||||
}
|
||||
|
||||
static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size)
|
||||
static int mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size)
|
||||
{
|
||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||
|
||||
mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
|
@ -219,6 +219,12 @@ static void s390_iommu_release_device(struct device *dev)
|
||||
__s390_iommu_detach_device(zdev);
|
||||
}
|
||||
|
||||
static int zpci_refresh_all(struct zpci_dev *zdev)
|
||||
{
|
||||
return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
|
||||
zdev->end_dma - zdev->start_dma + 1);
|
||||
}
|
||||
|
||||
static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
||||
{
|
||||
struct s390_domain *s390_domain = to_s390_domain(domain);
|
||||
@ -226,8 +232,7 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
|
||||
zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
|
||||
zdev->end_dma - zdev->start_dma + 1);
|
||||
zpci_refresh_all(zdev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
@ -251,20 +256,32 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct s390_domain *s390_domain = to_s390_domain(domain);
|
||||
struct zpci_dev *zdev;
|
||||
int ret = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
|
||||
if (!zdev->tlb_refresh)
|
||||
continue;
|
||||
zpci_refresh_trans((u64)zdev->fh << 32,
|
||||
iova, size);
|
||||
ret = zpci_refresh_trans((u64)zdev->fh << 32,
|
||||
iova, size);
|
||||
/*
|
||||
* let the hypervisor discover invalidated entries
|
||||
* allowing it to free IOVAs and unpin pages
|
||||
*/
|
||||
if (ret == -ENOMEM) {
|
||||
ret = zpci_refresh_all(zdev);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
|
||||
|
@ -341,8 +341,8 @@ static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
return size;
|
||||
}
|
||||
|
||||
static void sprd_iommu_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
static int sprd_iommu_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
|
||||
unsigned int reg;
|
||||
@ -354,6 +354,7 @@ static void sprd_iommu_sync_map(struct iommu_domain *domain,
|
||||
|
||||
/* clear IOMMU TLB buffer after page table updated */
|
||||
sprd_iommu_write(dom->sdev, reg, 0xffffffff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sprd_iommu_sync(struct iommu_domain *domain,
|
||||
|
@ -401,8 +401,8 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
||||
spin_unlock_irqrestore(&iommu->iommu_lock, flags);
|
||||
}
|
||||
|
||||
static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
static int sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
|
||||
struct sun50i_iommu *iommu = sun50i_domain->iommu;
|
||||
@ -411,6 +411,8 @@ static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
spin_lock_irqsave(&iommu->iommu_lock, flags);
|
||||
sun50i_iommu_zap_range(iommu, iova, size);
|
||||
spin_unlock_irqrestore(&iommu->iommu_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
|
||||
|
@ -350,8 +350,8 @@ struct iommu_domain_ops {
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
|
||||
void (*flush_iotlb_all)(struct iommu_domain *domain);
|
||||
void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
void (*iotlb_sync)(struct iommu_domain *domain,
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user