drm/tegra: Implement correct DMA-BUF semantics
DMA-BUF requires that each device that accesses a DMA-BUF attaches to it separately. To do so the host1x_bo_pin() and host1x_bo_unpin() functions need to be reimplemented so that they can return a mapping, which either represents an attachment or a map of the driver's own GEM object. Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
parent
7a56783850
commit
c6aeaf56f4
@ -23,6 +23,31 @@
|
|||||||
|
|
||||||
MODULE_IMPORT_NS(DMA_BUF);
|
MODULE_IMPORT_NS(DMA_BUF);
|
||||||
|
|
||||||
|
static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
|
||||||
|
{
|
||||||
|
dma_addr_t next = ~(dma_addr_t)0;
|
||||||
|
unsigned int count = 0, i;
|
||||||
|
struct scatterlist *s;
|
||||||
|
|
||||||
|
for_each_sg(sgl, s, nents, i) {
|
||||||
|
/* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
|
||||||
|
if (!sg_dma_len(s))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (sg_dma_address(s) != next) {
|
||||||
|
next = sg_dma_address(s) + sg_dma_len(s);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
|
||||||
|
{
|
||||||
|
return sg_dma_count_chunks(sgt->sgl, sgt->nents);
|
||||||
|
}
|
||||||
|
|
||||||
static void tegra_bo_put(struct host1x_bo *bo)
|
static void tegra_bo_put(struct host1x_bo *bo)
|
||||||
{
|
{
|
||||||
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
||||||
@ -30,79 +55,64 @@ static void tegra_bo_put(struct host1x_bo *bo)
|
|||||||
drm_gem_object_put(&obj->gem);
|
drm_gem_object_put(&obj->gem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX move this into lib/scatterlist.c? */
|
static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
||||||
static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
|
enum dma_data_direction direction)
|
||||||
unsigned int nents, gfp_t gfp_mask)
|
|
||||||
{
|
|
||||||
struct scatterlist *dst;
|
|
||||||
unsigned int i;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = sg_alloc_table(sgt, nents, gfp_mask);
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
dst = sgt->sgl;
|
|
||||||
|
|
||||||
for (i = 0; i < nents; i++) {
|
|
||||||
sg_set_page(dst, sg_page(sg), sg->length, 0);
|
|
||||||
dst = sg_next(dst);
|
|
||||||
sg = sg_next(sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
|
||||||
dma_addr_t *phys)
|
|
||||||
{
|
{
|
||||||
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
||||||
struct sg_table *sgt;
|
struct drm_gem_object *gem = &obj->gem;
|
||||||
|
struct host1x_bo_mapping *map;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||||
|
if (!map)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
map->bo = host1x_bo_get(bo);
|
||||||
|
map->direction = direction;
|
||||||
|
map->dev = dev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we've manually mapped the buffer object through the IOMMU, make
|
* Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
|
||||||
* sure to return the IOVA address of our mapping.
|
|
||||||
*
|
|
||||||
* Similarly, for buffers that have been allocated by the DMA API the
|
|
||||||
* physical address can be used for devices that are not attached to
|
|
||||||
* an IOMMU. For these devices, callers must pass a valid pointer via
|
|
||||||
* the @phys argument.
|
|
||||||
*
|
|
||||||
* Imported buffers were also already mapped at import time, so the
|
|
||||||
* existing mapping can be reused.
|
|
||||||
*/
|
*/
|
||||||
if (phys) {
|
if (gem->import_attach) {
|
||||||
*phys = obj->iova;
|
struct dma_buf *buf = gem->import_attach->dmabuf;
|
||||||
return NULL;
|
|
||||||
|
map->attach = dma_buf_attach(buf, dev);
|
||||||
|
if (IS_ERR(map->attach)) {
|
||||||
|
err = PTR_ERR(map->attach);
|
||||||
|
goto free;
|
||||||
|
}
|
||||||
|
|
||||||
|
map->sgt = dma_buf_map_attachment(map->attach, direction);
|
||||||
|
if (IS_ERR(map->sgt)) {
|
||||||
|
dma_buf_detach(buf, map->attach);
|
||||||
|
err = PTR_ERR(map->sgt);
|
||||||
|
goto free;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sgt_dma_count_chunks(map->sgt);
|
||||||
|
map->size = gem->size;
|
||||||
|
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we don't have a mapping for this buffer yet, return an SG table
|
* If we don't have a mapping for this buffer yet, return an SG table
|
||||||
* so that host1x can do the mapping for us via the DMA API.
|
* so that host1x can do the mapping for us via the DMA API.
|
||||||
*/
|
*/
|
||||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
|
||||||
if (!sgt)
|
if (!map->sgt) {
|
||||||
return ERR_PTR(-ENOMEM);
|
err = -ENOMEM;
|
||||||
|
goto free;
|
||||||
|
}
|
||||||
|
|
||||||
if (obj->pages) {
|
if (obj->pages) {
|
||||||
/*
|
/*
|
||||||
* If the buffer object was allocated from the explicit IOMMU
|
* If the buffer object was allocated from the explicit IOMMU
|
||||||
* API code paths, construct an SG table from the pages.
|
* API code paths, construct an SG table from the pages.
|
||||||
*/
|
*/
|
||||||
err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
|
err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
|
||||||
0, obj->gem.size, GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (err < 0)
|
|
||||||
goto free;
|
|
||||||
} else if (obj->sgt) {
|
|
||||||
/*
|
|
||||||
* If the buffer object already has an SG table but no pages
|
|
||||||
* were allocated for it, it means the buffer was imported and
|
|
||||||
* the SG table needs to be copied to avoid overwriting any
|
|
||||||
* other potential users of the original SG table.
|
|
||||||
*/
|
|
||||||
err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
|
|
||||||
obj->sgt->orig_nents, GFP_KERNEL);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free;
|
goto free;
|
||||||
} else {
|
} else {
|
||||||
@ -111,25 +121,56 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
|
|||||||
* not imported, it had to be allocated with the DMA API, so
|
* not imported, it had to be allocated with the DMA API, so
|
||||||
* the DMA API helper can be used.
|
* the DMA API helper can be used.
|
||||||
*/
|
*/
|
||||||
err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
|
err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
|
||||||
obj->gem.size);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free;
|
goto free;
|
||||||
}
|
}
|
||||||
|
|
||||||
return sgt;
|
err = dma_map_sgtable(dev, map->sgt, direction, 0);
|
||||||
|
if (err)
|
||||||
|
goto free_sgt;
|
||||||
|
|
||||||
|
out:
|
||||||
|
/*
|
||||||
|
* If we've manually mapped the buffer object through the IOMMU, make sure to return the
|
||||||
|
* existing IOVA address of our mapping.
|
||||||
|
*/
|
||||||
|
if (!obj->mm) {
|
||||||
|
map->phys = sg_dma_address(map->sgt->sgl);
|
||||||
|
map->chunks = err;
|
||||||
|
} else {
|
||||||
|
map->phys = obj->iova;
|
||||||
|
map->chunks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
map->size = gem->size;
|
||||||
|
|
||||||
|
return map;
|
||||||
|
|
||||||
|
free_sgt:
|
||||||
|
sg_free_table(map->sgt);
|
||||||
free:
|
free:
|
||||||
kfree(sgt);
|
kfree(map->sgt);
|
||||||
|
kfree(map);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
|
static void tegra_bo_unpin(struct host1x_bo_mapping *map)
|
||||||
{
|
{
|
||||||
if (sgt) {
|
if (!map)
|
||||||
sg_free_table(sgt);
|
return;
|
||||||
kfree(sgt);
|
|
||||||
|
if (map->attach) {
|
||||||
|
dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
|
||||||
|
dma_buf_detach(map->attach->dmabuf, map->attach);
|
||||||
|
} else {
|
||||||
|
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
|
||||||
|
sg_free_table(map->sgt);
|
||||||
|
kfree(map->sgt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
host1x_bo_put(map->bo);
|
||||||
|
kfree(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *tegra_bo_mmap(struct host1x_bo *bo)
|
static void *tegra_bo_mmap(struct host1x_bo *bo)
|
||||||
|
@ -74,7 +74,7 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
|
|||||||
|
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
copy->iova[i] = DMA_MAPPING_ERROR;
|
copy->iova[i] = DMA_MAPPING_ERROR;
|
||||||
copy->sgt[i] = NULL;
|
copy->map[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ©->base;
|
return ©->base;
|
||||||
@ -138,55 +138,37 @@ const struct drm_plane_funcs tegra_plane_funcs = {
|
|||||||
|
|
||||||
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev);
|
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
for (i = 0; i < state->base.fb->format->num_planes; i++) {
|
for (i = 0; i < state->base.fb->format->num_planes; i++) {
|
||||||
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
|
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
|
||||||
dma_addr_t phys_addr, *phys;
|
struct host1x_bo_mapping *map;
|
||||||
struct sg_table *sgt;
|
|
||||||
|
|
||||||
/*
|
map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE);
|
||||||
* If we're not attached to a domain, we already stored the
|
if (IS_ERR(map)) {
|
||||||
* physical address when the buffer was allocated. If we're
|
err = PTR_ERR(map);
|
||||||
* part of a group that's shared between all display
|
|
||||||
* controllers, we've also already mapped the framebuffer
|
|
||||||
* through the SMMU. In both cases we can short-circuit the
|
|
||||||
* code below and retrieve the stored IOV address.
|
|
||||||
*/
|
|
||||||
if (!domain || dc->client.group)
|
|
||||||
phys = &phys_addr;
|
|
||||||
else
|
|
||||||
phys = NULL;
|
|
||||||
|
|
||||||
sgt = host1x_bo_pin(dc->dev, &bo->base, phys);
|
|
||||||
if (IS_ERR(sgt)) {
|
|
||||||
err = PTR_ERR(sgt);
|
|
||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sgt) {
|
if (!dc->client.group) {
|
||||||
err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
|
|
||||||
if (err)
|
|
||||||
goto unpin;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The display controller needs contiguous memory, so
|
* The display controller needs contiguous memory, so
|
||||||
* fail if the buffer is discontiguous and we fail to
|
* fail if the buffer is discontiguous and we fail to
|
||||||
* map its SG table to a single contiguous chunk of
|
* map its SG table to a single contiguous chunk of
|
||||||
* I/O virtual memory.
|
* I/O virtual memory.
|
||||||
*/
|
*/
|
||||||
if (sgt->nents > 1) {
|
if (map->chunks > 1) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
state->iova[i] = sg_dma_address(sgt->sgl);
|
state->iova[i] = map->phys;
|
||||||
state->sgt[i] = sgt;
|
|
||||||
} else {
|
} else {
|
||||||
state->iova[i] = phys_addr;
|
state->iova[i] = bo->iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
state->map[i] = map;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -195,15 +177,9 @@ unpin:
|
|||||||
dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
|
dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
|
||||||
|
|
||||||
while (i--) {
|
while (i--) {
|
||||||
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
|
host1x_bo_unpin(state->map[i]);
|
||||||
struct sg_table *sgt = state->sgt[i];
|
|
||||||
|
|
||||||
if (sgt)
|
|
||||||
dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
|
|
||||||
|
|
||||||
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
|
||||||
state->iova[i] = DMA_MAPPING_ERROR;
|
state->iova[i] = DMA_MAPPING_ERROR;
|
||||||
state->sgt[i] = NULL;
|
state->map[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -214,15 +190,9 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < state->base.fb->format->num_planes; i++) {
|
for (i = 0; i < state->base.fb->format->num_planes; i++) {
|
||||||
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
|
host1x_bo_unpin(state->map[i]);
|
||||||
struct sg_table *sgt = state->sgt[i];
|
|
||||||
|
|
||||||
if (sgt)
|
|
||||||
dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
|
|
||||||
|
|
||||||
host1x_bo_unpin(dc->dev, &bo->base, sgt);
|
|
||||||
state->iova[i] = DMA_MAPPING_ERROR;
|
state->iova[i] = DMA_MAPPING_ERROR;
|
||||||
state->sgt[i] = NULL;
|
state->map[i] = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ struct tegra_plane_legacy_blending_state {
|
|||||||
struct tegra_plane_state {
|
struct tegra_plane_state {
|
||||||
struct drm_plane_state base;
|
struct drm_plane_state base;
|
||||||
|
|
||||||
struct sg_table *sgt[3];
|
struct host1x_bo_mapping *map[3];
|
||||||
dma_addr_t iova[3];
|
dma_addr_t iova[3];
|
||||||
|
|
||||||
struct tegra_bo_tiling tiling;
|
struct tegra_bo_tiling tiling;
|
||||||
|
@ -64,33 +64,61 @@ static void gather_bo_put(struct host1x_bo *host_bo)
|
|||||||
kref_put(&bo->ref, gather_bo_release);
|
kref_put(&bo->ref, gather_bo_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sg_table *
|
static struct host1x_bo_mapping *
|
||||||
gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys)
|
gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
|
struct gather_bo *gather = container_of(bo, struct gather_bo, base);
|
||||||
struct sg_table *sgt;
|
struct host1x_bo_mapping *map;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||||
if (!sgt)
|
if (!map)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma,
|
map->bo = host1x_bo_get(bo);
|
||||||
bo->gather_data_words * 4);
|
map->direction = direction;
|
||||||
if (err) {
|
map->dev = dev;
|
||||||
kfree(sgt);
|
|
||||||
return ERR_PTR(err);
|
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
|
||||||
|
if (!map->sgt) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto free;
|
||||||
}
|
}
|
||||||
|
|
||||||
return sgt;
|
err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
|
||||||
|
gather->gather_data_words * 4);
|
||||||
|
if (err)
|
||||||
|
goto free_sgt;
|
||||||
|
|
||||||
|
err = dma_map_sgtable(dev, map->sgt, direction, 0);
|
||||||
|
if (err)
|
||||||
|
goto free_sgt;
|
||||||
|
|
||||||
|
map->phys = sg_dma_address(map->sgt->sgl);
|
||||||
|
map->size = gather->gather_data_words * 4;
|
||||||
|
map->chunks = err;
|
||||||
|
|
||||||
|
return map;
|
||||||
|
|
||||||
|
free_sgt:
|
||||||
|
sg_free_table(map->sgt);
|
||||||
|
kfree(map->sgt);
|
||||||
|
free:
|
||||||
|
kfree(map);
|
||||||
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gather_bo_unpin(struct device *dev, struct sg_table *sgt)
|
static void gather_bo_unpin(struct host1x_bo_mapping *map)
|
||||||
{
|
{
|
||||||
if (sgt) {
|
if (!map)
|
||||||
sg_free_table(sgt);
|
return;
|
||||||
kfree(sgt);
|
|
||||||
}
|
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
|
||||||
|
sg_free_table(map->sgt);
|
||||||
|
kfree(map->sgt);
|
||||||
|
host1x_bo_put(map->bo);
|
||||||
|
|
||||||
|
kfree(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *gather_bo_mmap(struct host1x_bo *host_bo)
|
static void *gather_bo_mmap(struct host1x_bo *host_bo)
|
||||||
|
@ -17,11 +17,7 @@ static void tegra_drm_mapping_release(struct kref *ref)
|
|||||||
struct tegra_drm_mapping *mapping =
|
struct tegra_drm_mapping *mapping =
|
||||||
container_of(ref, struct tegra_drm_mapping, ref);
|
container_of(ref, struct tegra_drm_mapping, ref);
|
||||||
|
|
||||||
if (mapping->sgt)
|
host1x_bo_unpin(mapping->map);
|
||||||
dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
|
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
|
|
||||||
host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
|
|
||||||
host1x_bo_put(mapping->bo);
|
host1x_bo_put(mapping->bo);
|
||||||
|
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
@ -159,6 +155,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
|
|||||||
struct drm_tegra_channel_map *args = data;
|
struct drm_tegra_channel_map *args = data;
|
||||||
struct tegra_drm_mapping *mapping;
|
struct tegra_drm_mapping *mapping;
|
||||||
struct tegra_drm_context *context;
|
struct tegra_drm_context *context;
|
||||||
|
enum dma_data_direction direction;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
|
if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
|
||||||
@ -180,68 +177,53 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
|
|||||||
|
|
||||||
kref_init(&mapping->ref);
|
kref_init(&mapping->ref);
|
||||||
|
|
||||||
mapping->dev = context->client->base.dev;
|
|
||||||
mapping->bo = tegra_gem_lookup(file, args->handle);
|
mapping->bo = tegra_gem_lookup(file, args->handle);
|
||||||
if (!mapping->bo) {
|
if (!mapping->bo) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto unlock;
|
goto free;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (context->client->base.group) {
|
switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
|
||||||
/* IOMMU domain managed directly using IOMMU API */
|
case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
|
||||||
host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova);
|
direction = DMA_BIDIRECTIONAL;
|
||||||
} else {
|
break;
|
||||||
switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
|
|
||||||
case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
|
|
||||||
mapping->direction = DMA_BIDIRECTIONAL;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case DRM_TEGRA_CHANNEL_MAP_WRITE:
|
case DRM_TEGRA_CHANNEL_MAP_WRITE:
|
||||||
mapping->direction = DMA_FROM_DEVICE;
|
direction = DMA_FROM_DEVICE;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case DRM_TEGRA_CHANNEL_MAP_READ:
|
case DRM_TEGRA_CHANNEL_MAP_READ:
|
||||||
mapping->direction = DMA_TO_DEVICE;
|
direction = DMA_TO_DEVICE;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
err = -EINVAL;
|
||||||
}
|
goto put_gem;
|
||||||
|
|
||||||
mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL);
|
|
||||||
if (IS_ERR(mapping->sgt)) {
|
|
||||||
err = PTR_ERR(mapping->sgt);
|
|
||||||
goto put_gem;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction,
|
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
if (err)
|
|
||||||
goto unpin;
|
|
||||||
|
|
||||||
mapping->iova = sg_dma_address(mapping->sgt->sgl);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction);
|
||||||
|
if (IS_ERR(mapping->map)) {
|
||||||
|
err = PTR_ERR(mapping->map);
|
||||||
|
goto put_gem;
|
||||||
|
}
|
||||||
|
|
||||||
|
mapping->iova = mapping->map->phys;
|
||||||
mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
|
mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
|
||||||
|
|
||||||
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
|
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto unmap;
|
goto unpin;
|
||||||
|
|
||||||
mutex_unlock(&fpriv->lock);
|
mutex_unlock(&fpriv->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
unmap:
|
|
||||||
if (mapping->sgt) {
|
|
||||||
dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction,
|
|
||||||
DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
}
|
|
||||||
unpin:
|
unpin:
|
||||||
host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt);
|
host1x_bo_unpin(mapping->map);
|
||||||
put_gem:
|
put_gem:
|
||||||
host1x_bo_put(mapping->bo);
|
host1x_bo_put(mapping->bo);
|
||||||
|
free:
|
||||||
kfree(mapping);
|
kfree(mapping);
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&fpriv->lock);
|
mutex_unlock(&fpriv->lock);
|
||||||
|
@ -27,10 +27,9 @@ struct tegra_drm_file {
|
|||||||
struct tegra_drm_mapping {
|
struct tegra_drm_mapping {
|
||||||
struct kref ref;
|
struct kref ref;
|
||||||
|
|
||||||
struct device *dev;
|
struct host1x_bo_mapping *map;
|
||||||
struct host1x_bo *bo;
|
struct host1x_bo *bo;
|
||||||
struct sg_table *sgt;
|
|
||||||
enum dma_data_direction direction;
|
|
||||||
dma_addr_t iova;
|
dma_addr_t iova;
|
||||||
dma_addr_t iova_end;
|
dma_addr_t iova_end;
|
||||||
};
|
};
|
||||||
|
@ -134,20 +134,20 @@ EXPORT_SYMBOL(host1x_job_add_wait);
|
|||||||
|
|
||||||
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
||||||
{
|
{
|
||||||
|
unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
|
||||||
struct host1x_client *client = job->client;
|
struct host1x_client *client = job->client;
|
||||||
struct device *dev = client->dev;
|
struct device *dev = client->dev;
|
||||||
struct host1x_job_gather *g;
|
struct host1x_job_gather *g;
|
||||||
struct iommu_domain *domain;
|
|
||||||
struct sg_table *sgt;
|
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
domain = iommu_get_domain_for_dev(dev);
|
|
||||||
job->num_unpins = 0;
|
job->num_unpins = 0;
|
||||||
|
|
||||||
for (i = 0; i < job->num_relocs; i++) {
|
for (i = 0; i < job->num_relocs; i++) {
|
||||||
struct host1x_reloc *reloc = &job->relocs[i];
|
struct host1x_reloc *reloc = &job->relocs[i];
|
||||||
dma_addr_t phys_addr, *phys;
|
enum dma_data_direction direction;
|
||||||
|
struct host1x_bo_mapping *map;
|
||||||
|
struct host1x_bo *bo;
|
||||||
|
|
||||||
reloc->target.bo = host1x_bo_get(reloc->target.bo);
|
reloc->target.bo = host1x_bo_get(reloc->target.bo);
|
||||||
if (!reloc->target.bo) {
|
if (!reloc->target.bo) {
|
||||||
@ -155,64 +155,44 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
bo = reloc->target.bo;
|
||||||
* If the client device is not attached to an IOMMU, the
|
|
||||||
* physical address of the buffer object can be used.
|
|
||||||
*
|
|
||||||
* Similarly, when an IOMMU domain is shared between all
|
|
||||||
* host1x clients, the IOVA is already available, so no
|
|
||||||
* need to map the buffer object again.
|
|
||||||
*
|
|
||||||
* XXX Note that this isn't always safe to do because it
|
|
||||||
* relies on an assumption that no cache maintenance is
|
|
||||||
* needed on the buffer objects.
|
|
||||||
*/
|
|
||||||
if (!domain || client->group)
|
|
||||||
phys = &phys_addr;
|
|
||||||
else
|
|
||||||
phys = NULL;
|
|
||||||
|
|
||||||
sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
|
switch (reloc->flags & mask) {
|
||||||
if (IS_ERR(sgt)) {
|
case HOST1X_RELOC_READ:
|
||||||
err = PTR_ERR(sgt);
|
direction = DMA_TO_DEVICE;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HOST1X_RELOC_WRITE:
|
||||||
|
direction = DMA_FROM_DEVICE;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
|
||||||
|
direction = DMA_BIDIRECTIONAL;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
err = -EINVAL;
|
||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sgt) {
|
map = host1x_bo_pin(dev, bo, direction);
|
||||||
unsigned long mask = HOST1X_RELOC_READ |
|
if (IS_ERR(map)) {
|
||||||
HOST1X_RELOC_WRITE;
|
err = PTR_ERR(map);
|
||||||
enum dma_data_direction dir;
|
goto unpin;
|
||||||
|
|
||||||
switch (reloc->flags & mask) {
|
|
||||||
case HOST1X_RELOC_READ:
|
|
||||||
dir = DMA_TO_DEVICE;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case HOST1X_RELOC_WRITE:
|
|
||||||
dir = DMA_FROM_DEVICE;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
|
|
||||||
dir = DMA_BIDIRECTIONAL;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
err = -EINVAL;
|
|
||||||
goto unpin;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = dma_map_sgtable(dev, sgt, dir, 0);
|
|
||||||
if (err)
|
|
||||||
goto unpin;
|
|
||||||
|
|
||||||
job->unpins[job->num_unpins].dev = dev;
|
|
||||||
job->unpins[job->num_unpins].dir = dir;
|
|
||||||
phys_addr = sg_dma_address(sgt->sgl);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
job->addr_phys[job->num_unpins] = phys_addr;
|
/*
|
||||||
job->unpins[job->num_unpins].bo = reloc->target.bo;
|
* host1x clients are generally not able to do scatter-gather themselves, so fail
|
||||||
job->unpins[job->num_unpins].sgt = sgt;
|
* if the buffer is discontiguous and we fail to map its SG table to a single
|
||||||
|
* contiguous chunk of I/O virtual memory.
|
||||||
|
*/
|
||||||
|
if (map->chunks > 1) {
|
||||||
|
err = -EINVAL;
|
||||||
|
goto unpin;
|
||||||
|
}
|
||||||
|
|
||||||
|
job->addr_phys[job->num_unpins] = map->phys;
|
||||||
|
job->unpins[job->num_unpins].map = map;
|
||||||
job->num_unpins++;
|
job->num_unpins++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,12 +204,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < job->num_cmds; i++) {
|
for (i = 0; i < job->num_cmds; i++) {
|
||||||
|
struct host1x_bo_mapping *map;
|
||||||
size_t gather_size = 0;
|
size_t gather_size = 0;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
dma_addr_t phys_addr;
|
|
||||||
unsigned long shift;
|
unsigned long shift;
|
||||||
struct iova *alloc;
|
struct iova *alloc;
|
||||||
dma_addr_t *phys;
|
|
||||||
unsigned int j;
|
unsigned int j;
|
||||||
|
|
||||||
if (job->cmds[i].is_wait)
|
if (job->cmds[i].is_wait)
|
||||||
@ -243,25 +222,16 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
goto unpin;
|
goto unpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE);
|
||||||
* If the host1x is not attached to an IOMMU, there is no need
|
if (IS_ERR(map)) {
|
||||||
* to map the buffer object for the host1x, since the physical
|
err = PTR_ERR(map);
|
||||||
* address can simply be used.
|
goto unpin;
|
||||||
*/
|
|
||||||
if (!iommu_get_domain_for_dev(host->dev))
|
|
||||||
phys = &phys_addr;
|
|
||||||
else
|
|
||||||
phys = NULL;
|
|
||||||
|
|
||||||
sgt = host1x_bo_pin(host->dev, g->bo, phys);
|
|
||||||
if (IS_ERR(sgt)) {
|
|
||||||
err = PTR_ERR(sgt);
|
|
||||||
goto put;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (host->domain) {
|
if (host->domain) {
|
||||||
for_each_sgtable_sg(sgt, sg, j)
|
for_each_sgtable_sg(map->sgt, sg, j)
|
||||||
gather_size += sg->length;
|
gather_size += sg->length;
|
||||||
|
|
||||||
gather_size = iova_align(&host->iova, gather_size);
|
gather_size = iova_align(&host->iova, gather_size);
|
||||||
|
|
||||||
shift = iova_shift(&host->iova);
|
shift = iova_shift(&host->iova);
|
||||||
@ -272,33 +242,23 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
|
|||||||
goto put;
|
goto put;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = iommu_map_sgtable(host->domain,
|
err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
|
||||||
iova_dma_addr(&host->iova, alloc),
|
map->sgt, IOMMU_READ);
|
||||||
sgt, IOMMU_READ);
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
__free_iova(&host->iova, alloc);
|
__free_iova(&host->iova, alloc);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto put;
|
goto put;
|
||||||
}
|
}
|
||||||
|
|
||||||
job->unpins[job->num_unpins].size = gather_size;
|
map->phys = iova_dma_addr(&host->iova, alloc);
|
||||||
phys_addr = iova_dma_addr(&host->iova, alloc);
|
map->size = gather_size;
|
||||||
} else if (sgt) {
|
|
||||||
err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
|
|
||||||
if (err)
|
|
||||||
goto put;
|
|
||||||
|
|
||||||
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
|
|
||||||
job->unpins[job->num_unpins].dev = host->dev;
|
|
||||||
phys_addr = sg_dma_address(sgt->sgl);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
job->addr_phys[job->num_unpins] = phys_addr;
|
job->addr_phys[job->num_unpins] = map->phys;
|
||||||
job->gather_addr_phys[i] = phys_addr;
|
job->unpins[job->num_unpins].map = map;
|
||||||
|
|
||||||
job->unpins[job->num_unpins].bo = g->bo;
|
|
||||||
job->unpins[job->num_unpins].sgt = sgt;
|
|
||||||
job->num_unpins++;
|
job->num_unpins++;
|
||||||
|
|
||||||
|
job->gather_addr_phys[i] = map->phys;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -690,22 +650,16 @@ void host1x_job_unpin(struct host1x_job *job)
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < job->num_unpins; i++) {
|
for (i = 0; i < job->num_unpins; i++) {
|
||||||
struct host1x_job_unpin_data *unpin = &job->unpins[i];
|
struct host1x_bo_mapping *map = job->unpins[i].map;
|
||||||
struct device *dev = unpin->dev ?: host->dev;
|
struct host1x_bo *bo = map->bo;
|
||||||
struct sg_table *sgt = unpin->sgt;
|
|
||||||
|
|
||||||
if (!job->enable_firewall && unpin->size && host->domain) {
|
if (!job->enable_firewall && map->size && host->domain) {
|
||||||
iommu_unmap(host->domain, job->addr_phys[i],
|
iommu_unmap(host->domain, job->addr_phys[i], map->size);
|
||||||
unpin->size);
|
free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));
|
||||||
free_iova(&host->iova,
|
|
||||||
iova_pfn(&host->iova, job->addr_phys[i]));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unpin->dev && sgt)
|
host1x_bo_unpin(map);
|
||||||
dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
|
host1x_bo_put(bo);
|
||||||
|
|
||||||
host1x_bo_unpin(dev, unpin->bo, sgt);
|
|
||||||
host1x_bo_put(unpin->bo);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
job->num_unpins = 0;
|
job->num_unpins = 0;
|
||||||
|
@ -35,11 +35,7 @@ struct host1x_job_cmd {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct host1x_job_unpin_data {
|
struct host1x_job_unpin_data {
|
||||||
struct host1x_bo *bo;
|
struct host1x_bo_mapping *map;
|
||||||
struct sg_table *sgt;
|
|
||||||
struct device *dev;
|
|
||||||
size_t size;
|
|
||||||
enum dma_data_direction dir;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#define __LINUX_HOST1X_H
|
#define __LINUX_HOST1X_H
|
||||||
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
#include <linux/dma-direction.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
enum host1x_class {
|
enum host1x_class {
|
||||||
@ -82,12 +83,23 @@ struct host1x_client {
|
|||||||
struct host1x_bo;
|
struct host1x_bo;
|
||||||
struct sg_table;
|
struct sg_table;
|
||||||
|
|
||||||
|
struct host1x_bo_mapping {
|
||||||
|
struct dma_buf_attachment *attach;
|
||||||
|
enum dma_data_direction direction;
|
||||||
|
struct host1x_bo *bo;
|
||||||
|
struct sg_table *sgt;
|
||||||
|
unsigned int chunks;
|
||||||
|
struct device *dev;
|
||||||
|
dma_addr_t phys;
|
||||||
|
size_t size;
|
||||||
|
};
|
||||||
|
|
||||||
struct host1x_bo_ops {
|
struct host1x_bo_ops {
|
||||||
struct host1x_bo *(*get)(struct host1x_bo *bo);
|
struct host1x_bo *(*get)(struct host1x_bo *bo);
|
||||||
void (*put)(struct host1x_bo *bo);
|
void (*put)(struct host1x_bo *bo);
|
||||||
struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
|
struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
|
||||||
dma_addr_t *phys);
|
enum dma_data_direction dir);
|
||||||
void (*unpin)(struct device *dev, struct sg_table *sgt);
|
void (*unpin)(struct host1x_bo_mapping *map);
|
||||||
void *(*mmap)(struct host1x_bo *bo);
|
void *(*mmap)(struct host1x_bo *bo);
|
||||||
void (*munmap)(struct host1x_bo *bo, void *addr);
|
void (*munmap)(struct host1x_bo *bo, void *addr);
|
||||||
};
|
};
|
||||||
@ -112,17 +124,15 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
|
|||||||
bo->ops->put(bo);
|
bo->ops->put(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct sg_table *host1x_bo_pin(struct device *dev,
|
static inline struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
|
||||||
struct host1x_bo *bo,
|
enum dma_data_direction dir)
|
||||||
dma_addr_t *phys)
|
|
||||||
{
|
{
|
||||||
return bo->ops->pin(dev, bo, phys);
|
return bo->ops->pin(dev, bo, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
|
static inline void host1x_bo_unpin(struct host1x_bo_mapping *map)
|
||||||
struct sg_table *sgt)
|
|
||||||
{
|
{
|
||||||
bo->ops->unpin(dev, sgt);
|
map->bo->ops->unpin(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
|
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user