Some helper functions to manage an array of gem objects. v9: use dma_resv_lock_interruptible. v6: - add ticket to struct virtio_gpu_object_array. - add virtio_gpu_array_{lock,unlock}_resv helpers. - add virtio_gpu_array_add_fence helper. v5: some small optimizations (Chia-I Wu). v4: make them virtio-private instead of generic helpers. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-8-kraxel@redhat.com
267 lines
6.5 KiB
C
267 lines
6.5 KiB
C
/*
|
|
* Copyright (C) 2015 Red Hat, Inc.
|
|
* All Rights Reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining
|
|
* a copy of this software and associated documentation files (the
|
|
* "Software"), to deal in the Software without restriction, including
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
* the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the
|
|
* next paragraph) shall be included in all copies or substantial
|
|
* portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
|
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#include <drm/drm_file.h>
|
|
#include <drm/drm_fourcc.h>
|
|
|
|
#include "virtgpu_drv.h"
|
|
|
|
void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
|
|
{
|
|
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
|
|
|
|
if (obj)
|
|
virtio_gpu_object_unref(&obj);
|
|
}
|
|
|
|
struct virtio_gpu_object*
|
|
virtio_gpu_alloc_object(struct drm_device *dev,
|
|
struct virtio_gpu_object_params *params,
|
|
struct virtio_gpu_fence *fence)
|
|
{
|
|
struct virtio_gpu_device *vgdev = dev->dev_private;
|
|
struct virtio_gpu_object *obj;
|
|
int ret;
|
|
|
|
ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
|
|
if (ret)
|
|
return ERR_PTR(ret);
|
|
|
|
return obj;
|
|
}
|
|
|
|
int virtio_gpu_gem_create(struct drm_file *file,
|
|
struct drm_device *dev,
|
|
struct virtio_gpu_object_params *params,
|
|
struct drm_gem_object **obj_p,
|
|
uint32_t *handle_p)
|
|
{
|
|
struct virtio_gpu_object *obj;
|
|
int ret;
|
|
u32 handle;
|
|
|
|
obj = virtio_gpu_alloc_object(dev, params, NULL);
|
|
if (IS_ERR(obj))
|
|
return PTR_ERR(obj);
|
|
|
|
ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
|
|
if (ret) {
|
|
drm_gem_object_release(&obj->gem_base);
|
|
return ret;
|
|
}
|
|
|
|
*obj_p = &obj->gem_base;
|
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
drm_gem_object_put_unlocked(&obj->gem_base);
|
|
|
|
*handle_p = handle;
|
|
return 0;
|
|
}
|
|
|
|
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
|
|
struct drm_device *dev,
|
|
struct drm_mode_create_dumb *args)
|
|
{
|
|
struct drm_gem_object *gobj;
|
|
struct virtio_gpu_object_params params = { 0 };
|
|
int ret;
|
|
uint32_t pitch;
|
|
|
|
if (args->bpp != 32)
|
|
return -EINVAL;
|
|
|
|
pitch = args->width * 4;
|
|
args->size = pitch * args->height;
|
|
args->size = ALIGN(args->size, PAGE_SIZE);
|
|
|
|
params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
|
|
params.width = args->width;
|
|
params.height = args->height;
|
|
params.size = args->size;
|
|
params.dumb = true;
|
|
ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj,
|
|
&args->handle);
|
|
if (ret)
|
|
goto fail;
|
|
|
|
args->pitch = pitch;
|
|
return ret;
|
|
|
|
fail:
|
|
return ret;
|
|
}
|
|
|
|
int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
|
|
struct drm_device *dev,
|
|
uint32_t handle, uint64_t *offset_p)
|
|
{
|
|
struct drm_gem_object *gobj;
|
|
struct virtio_gpu_object *obj;
|
|
|
|
BUG_ON(!offset_p);
|
|
gobj = drm_gem_object_lookup(file_priv, handle);
|
|
if (gobj == NULL)
|
|
return -ENOENT;
|
|
obj = gem_to_virtio_gpu_obj(gobj);
|
|
*offset_p = virtio_gpu_object_mmap_offset(obj);
|
|
drm_gem_object_put_unlocked(gobj);
|
|
return 0;
|
|
}
|
|
|
|
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
|
|
struct drm_file *file)
|
|
{
|
|
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
|
|
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
|
|
struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
|
|
int r;
|
|
|
|
if (!vgdev->has_virgl_3d)
|
|
return 0;
|
|
|
|
r = virtio_gpu_object_reserve(qobj);
|
|
if (r)
|
|
return r;
|
|
|
|
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
|
|
qobj->hw_res_handle);
|
|
virtio_gpu_object_unreserve(qobj);
|
|
return 0;
|
|
}
|
|
|
|
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
|
|
struct drm_file *file)
|
|
{
|
|
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
|
|
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
|
|
struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
|
|
int r;
|
|
|
|
if (!vgdev->has_virgl_3d)
|
|
return;
|
|
|
|
r = virtio_gpu_object_reserve(qobj);
|
|
if (r)
|
|
return;
|
|
|
|
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
|
|
qobj->hw_res_handle);
|
|
virtio_gpu_object_unreserve(qobj);
|
|
}
|
|
|
|
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
|
|
{
|
|
struct virtio_gpu_object_array *objs;
|
|
size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
|
|
|
|
objs = kmalloc(size, GFP_KERNEL);
|
|
if (!objs)
|
|
return NULL;
|
|
|
|
objs->nents = 0;
|
|
objs->total = nents;
|
|
return objs;
|
|
}
|
|
|
|
static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
|
|
{
|
|
kfree(objs);
|
|
}
|
|
|
|
struct virtio_gpu_object_array*
|
|
virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
|
|
{
|
|
struct virtio_gpu_object_array *objs;
|
|
u32 i;
|
|
|
|
objs = virtio_gpu_array_alloc(nents);
|
|
if (!objs)
|
|
return NULL;
|
|
|
|
for (i = 0; i < nents; i++) {
|
|
objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
|
|
if (!objs->objs[i]) {
|
|
objs->nents = i;
|
|
virtio_gpu_array_put_free(objs);
|
|
return NULL;
|
|
}
|
|
}
|
|
objs->nents = i;
|
|
return objs;
|
|
}
|
|
|
|
void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
|
|
struct drm_gem_object *obj)
|
|
{
|
|
if (WARN_ON_ONCE(objs->nents == objs->total))
|
|
return;
|
|
|
|
drm_gem_object_get(obj);
|
|
objs->objs[objs->nents] = obj;
|
|
objs->nents++;
|
|
}
|
|
|
|
int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
|
|
{
|
|
int ret;
|
|
|
|
if (objs->nents == 1) {
|
|
ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
|
|
} else {
|
|
ret = drm_gem_lock_reservations(objs->objs, objs->nents,
|
|
&objs->ticket);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
|
|
{
|
|
if (objs->nents == 1) {
|
|
dma_resv_unlock(objs->objs[0]->resv);
|
|
} else {
|
|
drm_gem_unlock_reservations(objs->objs, objs->nents,
|
|
&objs->ticket);
|
|
}
|
|
}
|
|
|
|
void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
|
|
struct dma_fence *fence)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < objs->nents; i++)
|
|
dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
|
|
}
|
|
|
|
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
|
|
{
|
|
u32 i;
|
|
|
|
for (i = 0; i < objs->nents; i++)
|
|
drm_gem_object_put_unlocked(objs->objs[i]);
|
|
virtio_gpu_array_free(objs);
|
|
}
|