drm/virtio: support mapping exported vram
Implement virtgpu specific map_dma_buf callback to support mapping exported vram object dma-bufs. The dma-buf callback is used directly, as vram objects don't have backing pages and thus can't implement the drm_gem_object_funcs.get_sg_table callback. Signed-off-by: David Stevens <stevensd@chromium.org> Link: http://patchwork.freedesktop.org/patch/msgid/20210813005441.608293-1-stevensd@chromium.org Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
This commit is contained in:
parent
f492283b15
commit
ea5ea3d8a1
@ -26,6 +26,7 @@
|
||||
#ifndef VIRTIO_DRV_H
|
||||
#define VIRTIO_DRV_H
|
||||
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
@ -459,4 +460,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
|
||||
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_object **bo_ptr);
|
||||
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
|
||||
struct device *dev,
|
||||
enum dma_data_direction dir);
|
||||
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
#endif
|
||||
|
@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct sg_table *
|
||||
virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
if (virtio_gpu_is_vram(bo))
|
||||
return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
|
||||
|
||||
return drm_gem_map_dma_buf(attach, dir);
|
||||
}
|
||||
|
||||
static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
if (virtio_gpu_is_vram(bo)) {
|
||||
virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_gem_unmap_dma_buf(attach, sgt, dir);
|
||||
}
|
||||
|
||||
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
|
||||
.ops = {
|
||||
.cache_sgt_mapping = true,
|
||||
.attach = virtio_dma_buf_attach,
|
||||
.detach = drm_gem_map_detach,
|
||||
.map_dma_buf = drm_gem_map_dma_buf,
|
||||
.unmap_dma_buf = drm_gem_unmap_dma_buf,
|
||||
.map_dma_buf = virtgpu_gem_map_dma_buf,
|
||||
.unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
|
||||
.release = drm_gem_dmabuf_release,
|
||||
.mmap = drm_gem_dmabuf_mmap,
|
||||
.vmap = drm_gem_dmabuf_vmap,
|
||||
|
@ -1,6 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
|
||||
{
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
|
||||
struct device *dev,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
|
||||
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
|
||||
struct sg_table *sgt;
|
||||
dma_addr_t addr;
|
||||
int ret;
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
|
||||
// Virtio devices can access the dma-buf via its UUID. Return a stub
|
||||
// sg_table so the dma-buf API still works.
|
||||
if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
return sgt;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addr = dma_map_resource(dev, vram->vram_node.start,
|
||||
vram->vram_node.size, dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
ret = dma_mapping_error(dev, addr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
|
||||
sg_dma_address(sgt->sgl) = addr;
|
||||
sg_dma_len(sgt->sgl) = vram->vram_node.size;
|
||||
|
||||
return sgt;
|
||||
out:
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (sgt->nents) {
|
||||
dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
|
||||
sg_dma_len(sgt->sgl), dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
}
|
||||
|
||||
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
|
||||
.open = virtio_gpu_gem_object_open,
|
||||
.close = virtio_gpu_gem_object_close,
|
||||
|
Loading…
x
Reference in New Issue
Block a user