drm/tegra: gem: Always map SG tables for DMA-BUFs
When an importer wants to map a DMA-BUF, make sure to always actually map it, irrespective of whether the buffer is contiguous or not. Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
parent
d81f3431e6
commit
8b5a3c17a2
@ -516,15 +516,15 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
||||
|
||||
for_each_sg(sgt->sgl, sg, bo->num_pages, i)
|
||||
sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
|
||||
|
||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
||||
goto free;
|
||||
} else {
|
||||
if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
|
||||
gem->size) < 0)
|
||||
goto free;
|
||||
}
|
||||
|
||||
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
||||
goto free;
|
||||
|
||||
return sgt;
|
||||
|
||||
free:
|
||||
|
Loading…
Reference in New Issue
Block a user