iommu: Add a gfp parameter to iommu_map()
The internal mechanisms support this, but instead of exposting the gfp to the caller it wrappers it into iommu_map() and iommu_map_atomic() Fix this instead of adding more variants for GFP_KERNEL_ACCOUNT. Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org> Link: https://lore.kernel.org/r/1-v3-76b587fe28df+6e3-iommu_map_gfp_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
b7bfaa761d
commit
1369459b2e
@ -984,7 +984,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
|
|||||||
|
|
||||||
len = (j - i) << PAGE_SHIFT;
|
len = (j - i) << PAGE_SHIFT;
|
||||||
ret = iommu_map(mapping->domain, iova, phys, len,
|
ret = iommu_map(mapping->domain, iova, phys, len,
|
||||||
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
|
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
iova += len;
|
iova += len;
|
||||||
@ -1207,7 +1208,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||||||
|
|
||||||
prot = __dma_info_to_prot(dir, attrs);
|
prot = __dma_info_to_prot(dir, attrs);
|
||||||
|
|
||||||
ret = iommu_map(mapping->domain, iova, phys, len, prot);
|
ret = iommu_map(mapping->domain, iova, phys, len, prot,
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
count += len >> PAGE_SHIFT;
|
count += len >> PAGE_SHIFT;
|
||||||
@ -1379,7 +1381,8 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
|||||||
|
|
||||||
prot = __dma_info_to_prot(dir, attrs);
|
prot = __dma_info_to_prot(dir, attrs);
|
||||||
|
|
||||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
|
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
|
||||||
|
prot, GFP_KERNEL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
@ -1443,7 +1446,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
|||||||
|
|
||||||
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
|
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
|
||||||
|
|
||||||
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
|
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@ -475,7 +475,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
|
|||||||
u32 offset = (r->offset + i) << imem->iommu_pgshift;
|
u32 offset = (r->offset + i) << imem->iommu_pgshift;
|
||||||
|
|
||||||
ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
|
ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
|
||||||
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
|
PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
|
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
|
||||||
|
|
||||||
|
@ -1057,7 +1057,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
|
|||||||
|
|
||||||
*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
|
*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
|
||||||
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
|
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
|
||||||
size, IOMMU_READ | IOMMU_WRITE);
|
size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free_iova;
|
goto free_iova;
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
|
|||||||
|
|
||||||
pb->dma = iova_dma_addr(&host1x->iova, alloc);
|
pb->dma = iova_dma_addr(&host1x->iova, alloc);
|
||||||
err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
|
err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
|
||||||
IOMMU_READ);
|
IOMMU_READ, GFP_KERNEL);
|
||||||
if (err)
|
if (err)
|
||||||
goto iommu_free_iova;
|
goto iommu_free_iova;
|
||||||
} else {
|
} else {
|
||||||
|
@ -277,7 +277,7 @@ iter_chunk:
|
|||||||
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
|
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
|
||||||
va_start, &pa_start, size, flags);
|
va_start, &pa_start, size, flags);
|
||||||
err = iommu_map(pd->domain, va_start, pa_start,
|
err = iommu_map(pd->domain, va_start, pa_start,
|
||||||
size, flags);
|
size, flags, GFP_KERNEL);
|
||||||
if (err) {
|
if (err) {
|
||||||
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
||||||
va_start, &pa_start, size, err);
|
va_start, &pa_start, size, err);
|
||||||
@ -294,7 +294,7 @@ iter_chunk:
|
|||||||
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
|
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
|
||||||
va_start, &pa_start, size, flags);
|
va_start, &pa_start, size, flags);
|
||||||
err = iommu_map(pd->domain, va_start, pa_start,
|
err = iommu_map(pd->domain, va_start, pa_start,
|
||||||
size, flags);
|
size, flags, GFP_KERNEL);
|
||||||
if (err) {
|
if (err) {
|
||||||
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
||||||
va_start, &pa_start, size, err);
|
va_start, &pa_start, size, err);
|
||||||
|
@ -1615,7 +1615,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
|||||||
if (!iova)
|
if (!iova)
|
||||||
goto out_free_page;
|
goto out_free_page;
|
||||||
|
|
||||||
if (iommu_map(domain, iova, msi_addr, size, prot))
|
if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
|
||||||
goto out_free_iova;
|
goto out_free_iova;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&msi_page->list);
|
INIT_LIST_HEAD(&msi_page->list);
|
||||||
|
@ -930,7 +930,7 @@ map_end:
|
|||||||
if (map_size) {
|
if (map_size) {
|
||||||
ret = iommu_map(domain, addr - map_size,
|
ret = iommu_map(domain, addr - map_size,
|
||||||
addr - map_size, map_size,
|
addr - map_size, map_size,
|
||||||
entry->prot);
|
entry->prot, GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
map_size = 0;
|
map_size = 0;
|
||||||
@ -2360,31 +2360,31 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
|
int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||||
{
|
{
|
||||||
const struct iommu_domain_ops *ops = domain->ops;
|
const struct iommu_domain_ops *ops = domain->ops;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
might_sleep_if(gfpflags_allow_blocking(gfp));
|
||||||
|
|
||||||
|
/* Discourage passing strange GFP flags */
|
||||||
|
if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
|
||||||
|
__GFP_HIGHMEM)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
|
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
|
||||||
if (ret == 0 && ops->iotlb_sync_map)
|
if (ret == 0 && ops->iotlb_sync_map)
|
||||||
ops->iotlb_sync_map(domain, iova, size);
|
ops->iotlb_sync_map(domain, iova, size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
|
||||||
{
|
|
||||||
might_sleep();
|
|
||||||
return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(iommu_map);
|
EXPORT_SYMBOL_GPL(iommu_map);
|
||||||
|
|
||||||
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
|
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot)
|
||||||
{
|
{
|
||||||
return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
|
return iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_map_atomic);
|
EXPORT_SYMBOL_GPL(iommu_map_atomic);
|
||||||
|
|
||||||
|
@ -456,7 +456,8 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
|
|||||||
size % PAGE_SIZE);
|
size % PAGE_SIZE);
|
||||||
|
|
||||||
while (size) {
|
while (size) {
|
||||||
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
|
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
|
||||||
|
GFP_KERNEL);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_unmap;
|
goto err_unmap;
|
||||||
iova += PAGE_SIZE;
|
iova += PAGE_SIZE;
|
||||||
@ -500,7 +501,8 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
|
|||||||
else
|
else
|
||||||
rc = iommu_map(domain, iova,
|
rc = iommu_map(domain, iova,
|
||||||
PFN_PHYS(batch->pfns[cur]) + page_offset,
|
PFN_PHYS(batch->pfns[cur]) + page_offset,
|
||||||
next_iova - iova, area->iommu_prot);
|
next_iova - iova, area->iommu_prot,
|
||||||
|
GFP_KERNEL);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_unmap;
|
goto err_unmap;
|
||||||
iova = next_iova;
|
iova = next_iova;
|
||||||
|
@ -158,7 +158,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
|
|||||||
core->fw.mapped_mem_size = mem_size;
|
core->fw.mapped_mem_size = mem_size;
|
||||||
|
|
||||||
ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
|
ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
|
||||||
IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
|
IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "could not map video firmware region\n");
|
dev_err(dev, "could not map video firmware region\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -466,7 +466,8 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
|
|||||||
size = PAGE_ALIGN(size + addr - phys);
|
size = PAGE_ALIGN(size + addr - phys);
|
||||||
iova = phys; /* We just want a direct mapping */
|
iova = phys; /* We just want a direct mapping */
|
||||||
|
|
||||||
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
|
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -574,7 +575,8 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
|
|||||||
size = PAGE_ALIGN(size + addr - phys);
|
size = PAGE_ALIGN(size + addr - phys);
|
||||||
iova = phys; /* We just want a direct mapping */
|
iova = phys; /* We just want a direct mapping */
|
||||||
|
|
||||||
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
|
ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1639,7 +1639,7 @@ static int ath10k_fw_init(struct ath10k *ar)
|
|||||||
|
|
||||||
ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
|
ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
|
||||||
ar->msa.paddr, ar->msa.mem_size,
|
ar->msa.paddr, ar->msa.mem_size,
|
||||||
IOMMU_READ | IOMMU_WRITE);
|
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ath10k_err(ar, "failed to map firmware region: %d\n", ret);
|
ath10k_err(ar, "failed to map firmware region: %d\n", ret);
|
||||||
goto err_iommu_detach;
|
goto err_iommu_detach;
|
||||||
|
@ -1021,7 +1021,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
|
|||||||
|
|
||||||
ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
|
ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
|
||||||
ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
|
ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
|
||||||
IOMMU_READ | IOMMU_WRITE);
|
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ath11k_err(ab, "failed to map firmware region: %d\n", ret);
|
ath11k_err(ab, "failed to map firmware region: %d\n", ret);
|
||||||
goto err_iommu_detach;
|
goto err_iommu_detach;
|
||||||
@ -1029,7 +1029,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
|
|||||||
|
|
||||||
ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
|
ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
|
||||||
ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
|
ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
|
||||||
IOMMU_READ | IOMMU_WRITE);
|
IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
|
ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
|
||||||
goto err_iommu_unmap;
|
goto err_iommu_unmap;
|
||||||
|
@ -643,7 +643,8 @@ static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
|
|||||||
if (!mapping)
|
if (!mapping)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
|
ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags,
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "failed to map devmem: %d\n", ret);
|
dev_err(dev, "failed to map devmem: %d\n", ret);
|
||||||
goto out;
|
goto out;
|
||||||
@ -737,7 +738,7 @@ static int rproc_alloc_carveout(struct rproc *rproc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
|
ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
|
||||||
mem->flags);
|
mem->flags, GFP_KERNEL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "iommu_map failed: %d\n", ret);
|
dev_err(dev, "iommu_map failed: %d\n", ret);
|
||||||
goto free_mapping;
|
goto free_mapping;
|
||||||
|
@ -1480,7 +1480,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
|
|||||||
|
|
||||||
list_for_each_entry(d, &iommu->domain_list, next) {
|
list_for_each_entry(d, &iommu->domain_list, next) {
|
||||||
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
|
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
|
||||||
npage << PAGE_SHIFT, prot | IOMMU_CACHE);
|
npage << PAGE_SHIFT, prot | IOMMU_CACHE,
|
||||||
|
GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unwind;
|
goto unwind;
|
||||||
|
|
||||||
@ -1777,8 +1778,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
|||||||
size = npage << PAGE_SHIFT;
|
size = npage << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = iommu_map(domain->domain, iova, phys,
|
ret = iommu_map(domain->domain, iova, phys, size,
|
||||||
size, dma->prot | IOMMU_CACHE);
|
dma->prot | IOMMU_CACHE, GFP_KERNEL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
if (!dma->iommu_mapped) {
|
if (!dma->iommu_mapped) {
|
||||||
vfio_unpin_pages_remote(dma, iova,
|
vfio_unpin_pages_remote(dma, iova,
|
||||||
@ -1866,7 +1867,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
|
ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
|
||||||
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
|
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
|
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
|
||||||
|
|
||||||
|
@ -792,7 +792,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
|
|||||||
r = ops->set_map(vdpa, asid, iotlb);
|
r = ops->set_map(vdpa, asid, iotlb);
|
||||||
} else {
|
} else {
|
||||||
r = iommu_map(v->domain, iova, pa, size,
|
r = iommu_map(v->domain, iova, pa, size,
|
||||||
perm_to_iommu_flags(perm));
|
perm_to_iommu_flags(perm), GFP_KERNEL);
|
||||||
}
|
}
|
||||||
if (r) {
|
if (r) {
|
||||||
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
|
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
|
||||||
|
@ -467,7 +467,7 @@ extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
|
|||||||
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
|
||||||
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
|
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
|
||||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot);
|
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
|
||||||
extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
|
extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot);
|
phys_addr_t paddr, size_t size, int prot);
|
||||||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
@ -773,7 +773,7 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||||
{
|
{
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user