linux/drivers/gpu/drm/udl/udl_gem.c
Thomas Zimmermann 08b22f65b3 drm/udl: Switch to SHMEM
Udl's GEM code and the generic SHMEM are almost identical. Replace
the former with SHMEM. The dmabuf support in udl is being replaced
with generic GEM PRIME functions.

The main difference is in the caching flags for mmap pages. By
default, SHMEM always sets (uncached) write combining. In udl's
memory management code, only imported buffers use write combining.
Memory pages of locally created buffer objects are mmap'ed with
caching enabled. To keep the optimization, udl provides its own
mmap function for GEM objects where it fixes up the mapping flags.

v3:
	- restore udl vmap that enables caching
v2:
	- remove obsolete code in a separate patch

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191107094307.19870-4-tzimmermann@suse.de
2019-11-08 12:30:07 +01:00

329 lines
6.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat
*/
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_prime.h>
#include "udl_drv.h"
/*
* GEM object funcs
*/
static void udl_gem_object_free_object(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
/* Fbdev emulation vmaps the buffer. Unmap it here for consistency
* with the original udl GEM code.
*
* TODO: Switch to generic fbdev emulation and release the
* GEM object with drm_gem_shmem_free_object().
*/
if (shmem->vaddr)
drm_gem_shmem_vunmap(obj, shmem->vaddr);
drm_gem_shmem_free_object(obj);
}
static int udl_gem_object_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_shmem_mmap(obj, vma);
if (ret)
return ret;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (obj->import_attach)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
return 0;
}
static void *udl_gem_object_vmap(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
int ret;
ret = mutex_lock_interruptible(&shmem->vmap_lock);
if (ret)
return ERR_PTR(ret);
if (shmem->vmap_use_count++ > 0)
goto out;
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
goto err_zero_use;
if (obj->import_attach)
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
else
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, PAGE_KERNEL);
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
ret = -ENOMEM;
goto err_put_pages;
}
out:
mutex_unlock(&shmem->vmap_lock);
return shmem->vaddr;
err_put_pages:
drm_gem_shmem_put_pages(shmem);
err_zero_use:
shmem->vmap_use_count = 0;
mutex_unlock(&shmem->vmap_lock);
return ERR_PTR(ret);
}
static const struct drm_gem_object_funcs udl_gem_object_funcs = {
.free = udl_gem_object_free_object,
.print_info = drm_gem_shmem_print_info,
.pin = drm_gem_shmem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = udl_gem_object_vmap,
.vunmap = drm_gem_shmem_vunmap,
.mmap = udl_gem_object_mmap,
};
/*
* Helpers for struct drm_driver
*/
struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
size_t size)
{
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
return NULL;
obj = &shmem->base;
obj->funcs = &udl_gem_object_funcs;
return obj;
}
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct drm_gem_object *obj;
obj = dev->driver->gem_create_object(dev, size);
if (obj == NULL)
return NULL;
if (drm_gem_object_init(dev, obj, size) != 0) {
kfree(obj);
return NULL;
}
return to_udl_bo(obj);
}
static int
udl_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
uint32_t *handle_p)
{
struct udl_gem_object *obj;
int ret;
u32 handle;
size = roundup(size, PAGE_SIZE);
obj = udl_gem_alloc_object(dev, size);
if (obj == NULL)
return -ENOMEM;
ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) {
drm_gem_object_release(&obj->base);
kfree(obj);
return ret;
}
drm_gem_object_put_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
int udl_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
args->size = args->pitch * args->height;
return udl_gem_create(file, dev,
args->size, &args->handle);
}
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_gem_object *obj;
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
obj = vma->vm_private_data;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (obj->import_attach)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return ret;
}
vm_fault_t udl_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (!obj->pages)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
return vmf_insert_page(vma, vmf->address, page);
}
int udl_gem_get_pages(struct udl_gem_object *obj)
{
struct page **pages;
if (obj->pages)
return 0;
pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
obj->pages = pages;
return 0;
}
void udl_gem_put_pages(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
kvfree(obj->pages);
obj->pages = NULL;
return;
}
drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
int udl_gem_vmap(struct udl_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
int ret;
if (obj->base.import_attach) {
obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
if (!obj->vmapping)
return -ENOMEM;
return 0;
}
ret = udl_gem_get_pages(obj);
if (ret)
return ret;
obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
if (!obj->vmapping)
return -ENOMEM;
return 0;
}
void udl_gem_vunmap(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
return;
}
vunmap(obj->vmapping);
udl_gem_put_pages(obj);
}
void udl_gem_free_object(struct drm_gem_object *gem_obj)
{
struct udl_gem_object *obj = to_udl_bo(gem_obj);
if (obj->vmapping)
udl_gem_vunmap(obj);
if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, obj->sg);
put_device(gem_obj->dev->dev);
}
if (obj->pages)
udl_gem_put_pages(obj);
drm_gem_free_mmap_offset(gem_obj);
}
/* the dumb interface doesn't work with the GEM straight MMAP
interface, it expects to do MMAP on the drm fd, like normal */
int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct udl_gem_object *gobj;
struct drm_gem_object *obj;
struct udl_device *udl = to_udl(dev);
int ret = 0;
mutex_lock(&udl->gem_lock);
obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
gobj = to_udl_bo(obj);
ret = udl_gem_get_pages(gobj);
if (ret)
goto out;
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
drm_gem_object_put_unlocked(&gobj->base);
unlock:
mutex_unlock(&udl->gem_lock);
return ret;
}