iommu/dma: Fix race condition during iova_domain initialization
When many devices share the same iova domain, iommu_dma_init_domain() may be called at the same time. The checking of iovad->start_pfn will all get false in iommu_dma_init_domain() and both enter init_iova_domain() to do iovad initialization. Fix this by protecting init_iova_domain() with iommu_dma_cookie->mutex. Exception backtrace: rb_insert_color(param1=0xFFFFFF80CD2BDB40, param3=1) + 64 init_iova_domain() + 180 iommu_setup_dma_ops() + 260 arch_setup_dma_ops() + 132 of_dma_configure_id() + 468 platform_dma_configure() + 32 really_probe() + 1168 driver_probe_device() + 268 __device_attach_driver() + 524 __device_attach() + 524 bus_probe_device() + 64 deferred_probe_work_func() + 260 process_one_work() + 580 worker_thread() + 1076 kthread() + 332 ret_from_fork() + 16 Signed-off-by: Ning Li <ning.li@mediatek.com> Signed-off-by: Yunfei Wang <yf.wang@mediatek.com> Acked-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Miles Chen <miles.chen@mediatek.com> Link: https://lore.kernel.org/r/20220530120748.31733-1-yf.wang@mediatek.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
4bf7fda4dc
commit
ac9a5d522b
@ -64,6 +64,7 @@ struct iommu_dma_cookie {
|
|||||||
|
|
||||||
/* Domain for flush queue callback; NULL if flush queue not in use */
|
/* Domain for flush queue callback; NULL if flush queue not in use */
|
||||||
struct iommu_domain *fq_domain;
|
struct iommu_domain *fq_domain;
|
||||||
|
struct mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
|
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
|
||||||
@ -310,6 +311,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
|
|||||||
if (!domain->iova_cookie)
|
if (!domain->iova_cookie)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mutex_init(&domain->iova_cookie->mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -560,26 +562,33 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* start_pfn is always nonzero for an already-initialised domain */
|
/* start_pfn is always nonzero for an already-initialised domain */
|
||||||
|
mutex_lock(&cookie->mutex);
|
||||||
if (iovad->start_pfn) {
|
if (iovad->start_pfn) {
|
||||||
if (1UL << order != iovad->granule ||
|
if (1UL << order != iovad->granule ||
|
||||||
base_pfn != iovad->start_pfn) {
|
base_pfn != iovad->start_pfn) {
|
||||||
pr_warn("Incompatible range for DMA domain\n");
|
pr_warn("Incompatible range for DMA domain\n");
|
||||||
return -EFAULT;
|
ret = -EFAULT;
|
||||||
|
goto done_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
ret = 0;
|
||||||
|
goto done_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_iova_domain(iovad, 1UL << order, base_pfn);
|
init_iova_domain(iovad, 1UL << order, base_pfn);
|
||||||
ret = iova_domain_init_rcaches(iovad);
|
ret = iova_domain_init_rcaches(iovad);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto done_unlock;
|
||||||
|
|
||||||
/* If the FQ fails we can simply fall back to strict mode */
|
/* If the FQ fails we can simply fall back to strict mode */
|
||||||
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
|
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
|
||||||
domain->type = IOMMU_DOMAIN_DMA;
|
domain->type = IOMMU_DOMAIN_DMA;
|
||||||
|
|
||||||
return iova_reserve_iommu_regions(dev, domain);
|
ret = iova_reserve_iommu_regions(dev, domain);
|
||||||
|
|
||||||
|
done_unlock:
|
||||||
|
mutex_unlock(&cookie->mutex);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
x
Reference in New Issue
Block a user