Merge branch 'prime-merge' of ssh://people.freedesktop.org/~airlied/linux into drm-core-next
* 'prime-merge' of ssh://people.freedesktop.org/~airlied/linux: drm/radeon: add PRIME support (v2) i915: add dmabuf/prime buffer sharing support. nouveau: add PRIME support ttm: add prime sharing support to TTM (v2) udl: add prime fd->handle support. drm/prime: add exported buffers to current fprivs imported buffer list (v2) drm/prime: introduce sg->pages/addr arrays helper
This commit is contained in:
commit
5b2ba70091
@ -201,6 +201,19 @@ free:
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_alloc);
|
||||
|
||||
static void
|
||||
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
|
||||
{
|
||||
if (obj->import_attach) {
|
||||
drm_prime_remove_imported_buf_handle(&filp->prime,
|
||||
obj->import_attach->dmabuf);
|
||||
}
|
||||
if (obj->export_dma_buf) {
|
||||
drm_prime_remove_imported_buf_handle(&filp->prime,
|
||||
obj->export_dma_buf);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the mapping from handle to filp for this object.
|
||||
*/
|
||||
@ -233,9 +246,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
|
||||
idr_remove(&filp->object_idr, handle);
|
||||
spin_unlock(&filp->table_lock);
|
||||
|
||||
if (obj->import_attach)
|
||||
drm_prime_remove_imported_buf_handle(&filp->prime,
|
||||
obj->import_attach->dmabuf);
|
||||
drm_gem_remove_prime_handles(obj, filp);
|
||||
|
||||
if (dev->driver->gem_close_object)
|
||||
dev->driver->gem_close_object(obj, filp);
|
||||
@ -530,9 +541,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
|
||||
struct drm_gem_object *obj = ptr;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
if (obj->import_attach)
|
||||
drm_prime_remove_imported_buf_handle(&file_priv->prime,
|
||||
obj->import_attach->dmabuf);
|
||||
drm_gem_remove_prime_handles(obj, file_priv);
|
||||
|
||||
if (dev->driver->gem_close_object)
|
||||
dev->driver->gem_close_object(obj, file_priv);
|
||||
|
@ -68,6 +68,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
void *buf;
|
||||
int ret;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, handle);
|
||||
if (!obj)
|
||||
@ -100,6 +101,17 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
||||
obj->export_dma_buf = buf;
|
||||
*prime_fd = dma_buf_fd(buf, flags);
|
||||
}
|
||||
/* if we've exported this buffer the cheat and add it to the import list
|
||||
* so we get the correct handle back
|
||||
*/
|
||||
ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
|
||||
obj->export_dma_buf, handle);
|
||||
if (ret) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
mutex_unlock(&file_priv->prime.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_unlock(&file_priv->prime.lock);
|
||||
return 0;
|
||||
}
|
||||
@ -227,6 +239,42 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(drm_prime_pages_to_sg);
|
||||
|
||||
/* export an sg table into an array of pages and addresses
|
||||
this is currently required by the TTM driver in order to do correct fault
|
||||
handling */
|
||||
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
|
||||
dma_addr_t *addrs, int max_pages)
|
||||
{
|
||||
unsigned count;
|
||||
struct scatterlist *sg;
|
||||
struct page *page;
|
||||
u32 len, offset;
|
||||
int pg_index;
|
||||
dma_addr_t addr;
|
||||
|
||||
pg_index = 0;
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
|
||||
len = sg->length;
|
||||
offset = sg->offset;
|
||||
page = sg_page(sg);
|
||||
addr = sg_dma_address(sg);
|
||||
|
||||
while (len > 0) {
|
||||
if (WARN_ON(pg_index >= max_pages))
|
||||
return -1;
|
||||
pages[pg_index] = page;
|
||||
if (addrs)
|
||||
addrs[pg_index] = addr;
|
||||
|
||||
page++;
|
||||
addr += PAGE_SIZE;
|
||||
len -= PAGE_SIZE;
|
||||
pg_index++;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
|
||||
/* helper function to cleanup a GEM/prime object */
|
||||
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
|
||||
{
|
||||
|
@ -38,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
|
||||
dvo_ch7017.o \
|
||||
dvo_ivch.o \
|
||||
dvo_tfp410.o \
|
||||
dvo_sil164.o
|
||||
dvo_sil164.o \
|
||||
i915_gem_dmabuf.o
|
||||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
|
||||
|
@ -1039,7 +1039,7 @@ static struct drm_driver driver = {
|
||||
*/
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
|
||||
.load = i915_driver_load,
|
||||
.unload = i915_driver_unload,
|
||||
.open = i915_driver_open,
|
||||
@ -1062,6 +1062,12 @@ static struct drm_driver driver = {
|
||||
.gem_init_object = i915_gem_init_object,
|
||||
.gem_free_object = i915_gem_free_object,
|
||||
.gem_vm_ops = &i915_gem_vm_ops,
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = i915_gem_prime_export,
|
||||
.gem_prime_import = i915_gem_prime_import,
|
||||
|
||||
.dumb_create = i915_gem_dumb_create,
|
||||
.dumb_map_offset = i915_gem_mmap_gtt,
|
||||
.dumb_destroy = i915_gem_dumb_destroy,
|
||||
|
@ -940,6 +940,8 @@ struct drm_i915_gem_object {
|
||||
struct scatterlist *sg_list;
|
||||
int num_sg;
|
||||
|
||||
/* prime dma-buf support */
|
||||
struct sg_table *sg_table;
|
||||
/**
|
||||
* Used for performing relocations during execbuffer insertion.
|
||||
*/
|
||||
@ -1245,6 +1247,8 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_lastclose(struct drm_device *dev);
|
||||
|
||||
int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
||||
gfp_t gfpmask);
|
||||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
@ -1342,6 +1346,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
|
||||
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
enum i915_cache_level cache_level);
|
||||
|
||||
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
||||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gem_obj, int flags);
|
||||
|
||||
|
||||
/* i915_gem_gtt.c */
|
||||
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
|
||||
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
|
||||
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
|
||||
@ -538,6 +539,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* prime objects have no backing filp to GEM pread/pwrite
|
||||
* pages from.
|
||||
*/
|
||||
if (!obj->base.filp) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_i915_gem_object_pread(obj, args->offset, args->size);
|
||||
|
||||
ret = i915_gem_shmem_pread(dev, obj, args, file);
|
||||
@ -880,6 +889,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* prime objects have no backing filp to GEM pread/pwrite
|
||||
* pages from.
|
||||
*/
|
||||
if (!obj->base.filp) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
|
||||
|
||||
ret = -EFAULT;
|
||||
@ -1021,6 +1038,14 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
if (obj == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
/* prime objects have no backing filp to GEM mmap
|
||||
* pages from.
|
||||
*/
|
||||
if (!obj->filp) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
addr = vm_mmap(obj->filp, 0, args->size,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
args->offset);
|
||||
@ -1302,8 +1327,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
||||
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
int
|
||||
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
||||
gfp_t gfpmask)
|
||||
{
|
||||
@ -1312,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
||||
struct inode *inode;
|
||||
struct page *page;
|
||||
|
||||
if (obj->pages || obj->sg_table)
|
||||
return 0;
|
||||
|
||||
/* Get the list of pages out of our struct file. They'll be pinned
|
||||
* at this point until we release them.
|
||||
*/
|
||||
@ -1353,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
int page_count = obj->base.size / PAGE_SIZE;
|
||||
int i;
|
||||
|
||||
if (!obj->pages)
|
||||
return;
|
||||
|
||||
BUG_ON(obj->madv == __I915_MADV_PURGED);
|
||||
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
@ -3327,6 +3357,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
if (gem_obj->import_attach)
|
||||
drm_prime_gem_destroy(gem_obj, obj->sg_table);
|
||||
|
||||
if (obj->phys_obj)
|
||||
i915_gem_detach_phys_object(dev, obj);
|
||||
|
||||
|
171
drivers/gpu/drm/i915/i915_gem_dmabuf.c
Normal file
171
drivers/gpu/drm/i915/i915_gem_dmabuf.c
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Copyright 2012 Red Hat Inc
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "i915_drv.h"
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
int npages = obj->base.size / PAGE_SIZE;
|
||||
struct sg_table *sg = NULL;
|
||||
int ret;
|
||||
int nents;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (!obj->pages) {
|
||||
ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* link the pages into an SG then map the sg */
|
||||
sg = drm_prime_pages_to_sg(obj->pages, npages);
|
||||
nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return sg;
|
||||
}
|
||||
|
||||
void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *sg, enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
||||
sg_free_table(sg);
|
||||
kfree(sg);
|
||||
}
|
||||
|
||||
void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = dma_buf->priv;
|
||||
|
||||
if (obj->base.export_dma_buf == dma_buf) {
|
||||
/* drop the reference on the export fd holds */
|
||||
obj->base.export_dma_buf = NULL;
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
}
|
||||
}
|
||||
|
||||
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
|
||||
}
|
||||
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
struct dma_buf_ops i915_dmabuf_ops = {
|
||||
.map_dma_buf = i915_gem_map_dma_buf,
|
||||
.unmap_dma_buf = i915_gem_unmap_dma_buf,
|
||||
.release = i915_gem_dmabuf_release,
|
||||
.kmap = i915_gem_dmabuf_kmap,
|
||||
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
|
||||
.kunmap = i915_gem_dmabuf_kunmap,
|
||||
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
|
||||
};
|
||||
|
||||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gem_obj, int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||
|
||||
return dma_buf_export(obj, &i915_dmabuf_ops,
|
||||
obj->base.size, 0600);
|
||||
}
|
||||
|
||||
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sg;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int npages;
|
||||
int size;
|
||||
int ret;
|
||||
|
||||
/* is this one of own objects? */
|
||||
if (dma_buf->ops == &i915_dmabuf_ops) {
|
||||
obj = dma_buf->priv;
|
||||
/* is it from our device? */
|
||||
if (obj->base.dev == dev) {
|
||||
drm_gem_object_reference(&obj->base);
|
||||
return &obj->base;
|
||||
}
|
||||
}
|
||||
|
||||
/* need to attach */
|
||||
attach = dma_buf_attach(dma_buf, dev->dev);
|
||||
if (IS_ERR(attach))
|
||||
return ERR_CAST(attach);
|
||||
|
||||
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
size = dma_buf->size;
|
||||
npages = size / PAGE_SIZE;
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
if (obj == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_unmap;
|
||||
}
|
||||
|
||||
ret = drm_gem_private_object_init(dev, &obj->base, size);
|
||||
if (ret) {
|
||||
kfree(obj);
|
||||
goto fail_unmap;
|
||||
}
|
||||
|
||||
obj->sg_table = sg;
|
||||
obj->base.import_attach = attach;
|
||||
|
||||
return &obj->base;
|
||||
|
||||
fail_unmap:
|
||||
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
|
||||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
return ERR_PTR(ret);
|
||||
}
|
@ -267,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
if (obj->sg_table) {
|
||||
i915_ppgtt_insert_sg_entries(ppgtt,
|
||||
obj->sg_table->sgl,
|
||||
obj->sg_table->nents,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
pte_flags);
|
||||
} else if (dev_priv->mm.gtt->needs_dmar) {
|
||||
BUG_ON(!obj->sg_list);
|
||||
|
||||
i915_ppgtt_insert_sg_entries(ppgtt,
|
||||
@ -371,7 +377,12 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
|
||||
|
||||
if (dev_priv->mm.gtt->needs_dmar) {
|
||||
if (obj->sg_table) {
|
||||
intel_gtt_insert_sg_entries(obj->sg_table->sgl,
|
||||
obj->sg_table->nents,
|
||||
obj->gtt_space->start >> PAGE_SHIFT,
|
||||
agp_type);
|
||||
} else if (dev_priv->mm.gtt->needs_dmar) {
|
||||
BUG_ON(!obj->sg_list);
|
||||
|
||||
intel_gtt_insert_sg_entries(obj->sg_list,
|
||||
|
@ -37,7 +37,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
|
||||
nv50_calc.o \
|
||||
nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
|
||||
nv50_vram.o nvc0_vram.o \
|
||||
nv50_vm.o nvc0_vm.o
|
||||
nv50_vm.o nvc0_vm.o nouveau_prime.o
|
||||
|
||||
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
|
||||
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
|
||||
|
@ -89,12 +89,17 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
|
||||
int
|
||||
nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
|
||||
struct sg_table *sg,
|
||||
struct nouveau_bo **pnvbo)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_bo *nvbo;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
int type = ttm_bo_type_device;
|
||||
|
||||
if (sg)
|
||||
type = ttm_bo_type_sg;
|
||||
|
||||
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
|
||||
if (!nvbo)
|
||||
@ -120,8 +125,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
sizeof(struct nouveau_bo));
|
||||
|
||||
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
|
||||
ttm_bo_type_device, &nvbo->placement,
|
||||
align >> PAGE_SHIFT, 0, false, NULL, acc_size,
|
||||
type, &nvbo->placement,
|
||||
align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
|
||||
nouveau_bo_del_ttm);
|
||||
if (ret) {
|
||||
/* ttm will call nouveau_bo_del_ttm if it fails.. */
|
||||
@ -817,9 +822,14 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
|
||||
} else
|
||||
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
|
||||
nvbo->page_shift == vma->vm->spg_shift) {
|
||||
nouveau_vm_map_sg(vma, 0, new_mem->
|
||||
num_pages << PAGE_SHIFT,
|
||||
new_mem->mm_node);
|
||||
if (((struct nouveau_mem *)new_mem->mm_node)->sg)
|
||||
nouveau_vm_map_sg_table(vma, 0, new_mem->
|
||||
num_pages << PAGE_SHIFT,
|
||||
new_mem->mm_node);
|
||||
else
|
||||
nouveau_vm_map_sg(vma, 0, new_mem->
|
||||
num_pages << PAGE_SHIFT,
|
||||
new_mem->mm_node);
|
||||
} else {
|
||||
nouveau_vm_unmap(vma);
|
||||
}
|
||||
@ -1058,10 +1068,19 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
|
||||
struct drm_device *dev;
|
||||
unsigned i;
|
||||
int r;
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
if (slave && ttm->sg) {
|
||||
/* make userspace faulting work */
|
||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||
ttm_dma->dma_address, ttm->num_pages);
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_priv = nouveau_bdev(ttm->bdev);
|
||||
dev = dev_priv->dev;
|
||||
|
||||
@ -1106,6 +1125,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
struct drm_nouveau_private *dev_priv;
|
||||
struct drm_device *dev;
|
||||
unsigned i;
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
|
||||
if (slave)
|
||||
return;
|
||||
|
||||
dev_priv = nouveau_bdev(ttm->bdev);
|
||||
dev = dev_priv->dev;
|
||||
@ -1181,9 +1204,12 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
|
||||
|
||||
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
|
||||
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
|
||||
else
|
||||
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
|
||||
nouveau_vm_map_sg(vma, 0, size, node);
|
||||
else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
|
||||
if (node->sg)
|
||||
nouveau_vm_map_sg_table(vma, 0, size, node);
|
||||
else
|
||||
nouveau_vm_map_sg(vma, 0, size, node);
|
||||
}
|
||||
|
||||
list_add_tail(&vma->head, &nvbo->vma_list);
|
||||
vma->refcount = 1;
|
||||
|
@ -38,7 +38,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
|
||||
int ret;
|
||||
|
||||
/* allocate buffer object */
|
||||
ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
|
||||
ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -408,7 +408,7 @@ static struct drm_driver driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
|
||||
DRIVER_MODESET,
|
||||
DRIVER_MODESET | DRIVER_PRIME,
|
||||
.load = nouveau_load,
|
||||
.firstopen = nouveau_firstopen,
|
||||
.lastclose = nouveau_lastclose,
|
||||
@ -430,6 +430,12 @@ static struct drm_driver driver = {
|
||||
.reclaim_buffers = drm_core_reclaim_buffers,
|
||||
.ioctls = nouveau_ioctls,
|
||||
.fops = &nouveau_driver_fops,
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = nouveau_gem_prime_export,
|
||||
.gem_prime_import = nouveau_gem_prime_import,
|
||||
|
||||
.gem_init_object = nouveau_gem_object_new,
|
||||
.gem_free_object = nouveau_gem_object_del,
|
||||
.gem_open_object = nouveau_gem_object_open,
|
||||
|
@ -86,6 +86,7 @@ struct nouveau_mem {
|
||||
u32 memtype;
|
||||
u64 offset;
|
||||
u64 size;
|
||||
struct sg_table *sg;
|
||||
};
|
||||
|
||||
struct nouveau_tile_reg {
|
||||
@ -1416,7 +1417,9 @@ extern int nv04_crtc_create(struct drm_device *, int index);
|
||||
extern struct ttm_bo_driver nouveau_bo_driver;
|
||||
extern int nouveau_bo_new(struct drm_device *, int size, int align,
|
||||
uint32_t flags, uint32_t tile_mode,
|
||||
uint32_t tile_flags, struct nouveau_bo **);
|
||||
uint32_t tile_flags,
|
||||
struct sg_table *sg,
|
||||
struct nouveau_bo **);
|
||||
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
|
||||
extern int nouveau_bo_unpin(struct nouveau_bo *);
|
||||
extern int nouveau_bo_map(struct nouveau_bo *);
|
||||
@ -1501,6 +1504,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
|
||||
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
|
||||
struct drm_file *);
|
||||
|
||||
extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
||||
/* nouveau_display.c */
|
||||
int nouveau_display_create(struct drm_device *dev);
|
||||
void nouveau_display_destroy(struct drm_device *dev);
|
||||
|
@ -573,7 +573,7 @@ nouveau_fence_init(struct drm_device *dev)
|
||||
/* Create a shared VRAM heap for cross-channel sync. */
|
||||
if (USE_SEMA(dev)) {
|
||||
ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
|
||||
0, 0, &dev_priv->fence.bo);
|
||||
0, 0, NULL, &dev_priv->fence.bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/dma-buf.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
||||
@ -53,6 +54,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
|
||||
nouveau_bo_unpin(nvbo);
|
||||
}
|
||||
|
||||
if (gem->import_attach)
|
||||
drm_prime_gem_destroy(gem, nvbo->bo.sg);
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
drm_gem_object_release(gem);
|
||||
@ -139,7 +143,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
|
||||
flags |= TTM_PL_FLAG_SYSTEM;
|
||||
|
||||
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
|
||||
tile_flags, pnvbo);
|
||||
tile_flags, NULL, pnvbo);
|
||||
if (ret)
|
||||
return ret;
|
||||
nvbo = *pnvbo;
|
||||
|
@ -416,7 +416,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
||||
|
||||
if (dev_priv->card_type < NV_50) {
|
||||
ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
|
||||
0, 0, &dev_priv->vga_ram);
|
||||
0, 0, NULL, &dev_priv->vga_ram);
|
||||
if (ret == 0)
|
||||
ret = nouveau_bo_pin(dev_priv->vga_ram,
|
||||
TTM_PL_FLAG_VRAM);
|
||||
|
163
drivers/gpu/drm/nouveau/nouveau_prime.c
Normal file
163
drivers/gpu/drm/nouveau/nouveau_prime.c
Normal file
@ -0,0 +1,163 @@
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct nouveau_bo *nvbo = attachment->dmabuf->priv;
|
||||
struct drm_device *dev = nvbo->gem->dev;
|
||||
int npages = nvbo->bo.num_pages;
|
||||
struct sg_table *sg;
|
||||
int nents;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
|
||||
nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return sg;
|
||||
}
|
||||
|
||||
static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *sg, enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
||||
sg_free_table(sg);
|
||||
kfree(sg);
|
||||
}
|
||||
|
||||
static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
|
||||
{
|
||||
struct nouveau_bo *nvbo = dma_buf->priv;
|
||||
|
||||
if (nvbo->gem->export_dma_buf == dma_buf) {
|
||||
nvbo->gem->export_dma_buf = NULL;
|
||||
drm_gem_object_unreference_unlocked(nvbo->gem);
|
||||
}
|
||||
}
|
||||
|
||||
static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
|
||||
}
|
||||
static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
struct dma_buf_ops nouveau_dmabuf_ops = {
|
||||
.map_dma_buf = nouveau_gem_map_dma_buf,
|
||||
.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
|
||||
.release = nouveau_gem_dmabuf_release,
|
||||
.kmap = nouveau_gem_kmap,
|
||||
.kmap_atomic = nouveau_gem_kmap_atomic,
|
||||
.kunmap = nouveau_gem_kunmap,
|
||||
.kunmap_atomic = nouveau_gem_kunmap_atomic,
|
||||
};
|
||||
|
||||
static int
|
||||
nouveau_prime_new(struct drm_device *dev,
|
||||
size_t size,
|
||||
struct sg_table *sg,
|
||||
struct nouveau_bo **pnvbo)
|
||||
{
|
||||
struct nouveau_bo *nvbo;
|
||||
u32 flags = 0;
|
||||
int ret;
|
||||
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
|
||||
ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
|
||||
sg, pnvbo);
|
||||
if (ret)
|
||||
return ret;
|
||||
nvbo = *pnvbo;
|
||||
|
||||
/* we restrict allowed domains on nv50+ to only the types
|
||||
* that were requested at creation time. not possibly on
|
||||
* earlier chips without busting the ABI.
|
||||
*/
|
||||
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
|
||||
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
|
||||
if (!nvbo->gem) {
|
||||
nouveau_bo_ref(NULL, pnvbo);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nvbo->gem->driver_private = nvbo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
|
||||
int ret = 0;
|
||||
|
||||
/* pin buffer into GTT */
|
||||
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
|
||||
if (ret)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
|
||||
}
|
||||
|
||||
struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sg;
|
||||
struct nouveau_bo *nvbo;
|
||||
int ret;
|
||||
|
||||
if (dma_buf->ops == &nouveau_dmabuf_ops) {
|
||||
nvbo = dma_buf->priv;
|
||||
if (nvbo->gem) {
|
||||
if (nvbo->gem->dev == dev) {
|
||||
drm_gem_object_reference(nvbo->gem);
|
||||
return nvbo->gem;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* need to attach */
|
||||
attach = dma_buf_attach(dma_buf, dev->dev);
|
||||
if (IS_ERR(attach))
|
||||
return ERR_PTR(PTR_ERR(attach));
|
||||
|
||||
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
|
||||
if (ret)
|
||||
goto fail_unmap;
|
||||
|
||||
nvbo->gem->import_attach = attach;
|
||||
|
||||
return nvbo->gem;
|
||||
|
||||
fail_unmap:
|
||||
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
|
||||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -290,7 +290,10 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
struct nouveau_mem *node = mem->mm_node;
|
||||
|
||||
/* noop: bound in move_notify() */
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
if (ttm->sg) {
|
||||
node->sg = ttm->sg;
|
||||
} else
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -76,6 +76,63 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
|
||||
nouveau_vm_map_at(vma, 0, node);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem)
|
||||
{
|
||||
struct nouveau_vm *vm = vma->vm;
|
||||
int big = vma->node->type != vm->spg_shift;
|
||||
u32 offset = vma->node->offset + (delta >> 12);
|
||||
u32 bits = vma->node->type - 12;
|
||||
u32 num = length >> vma->node->type;
|
||||
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
|
||||
u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
|
||||
u32 max = 1 << (vm->pgt_bits - bits);
|
||||
unsigned m, sglen;
|
||||
u32 end, len;
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
|
||||
struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
sglen = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
|
||||
end = pte + sglen;
|
||||
if (unlikely(end >= max))
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
for (m = 0; m < len; m++) {
|
||||
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
|
||||
vm->map_sg(vma, pgt, mem, pte, 1, &addr);
|
||||
num--;
|
||||
pte++;
|
||||
|
||||
if (num == 0)
|
||||
goto finish;
|
||||
}
|
||||
if (unlikely(end >= max)) {
|
||||
pde++;
|
||||
pte = 0;
|
||||
}
|
||||
if (m < sglen) {
|
||||
for (; m < sglen; m++) {
|
||||
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
|
||||
vm->map_sg(vma, pgt, mem, pte, 1, &addr);
|
||||
num--;
|
||||
pte++;
|
||||
if (num == 0)
|
||||
goto finish;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
finish:
|
||||
vm->flush(vm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem)
|
||||
|
@ -72,6 +72,9 @@ struct nouveau_vm {
|
||||
u64 phys, u64 delta);
|
||||
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||
|
||||
void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
|
||||
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
|
||||
void (*flush)(struct nouveau_vm *);
|
||||
};
|
||||
@ -90,7 +93,8 @@ void nouveau_vm_unmap(struct nouveau_vma *);
|
||||
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
|
||||
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
|
||||
struct nouveau_mem *);
|
||||
|
||||
void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
|
||||
struct nouveau_mem *mem);
|
||||
/* nv50_vm.c */
|
||||
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
|
||||
struct nouveau_gpuobj *pgt[2]);
|
||||
|
@ -1047,7 +1047,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
|
||||
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
|
||||
|
||||
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, &nv_crtc->cursor.nvbo);
|
||||
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret)
|
||||
|
@ -769,7 +769,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
|
||||
nv_crtc->lut.depth = 0;
|
||||
|
||||
ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, &nv_crtc->lut.nvbo);
|
||||
0, 0x0000, NULL, &nv_crtc->lut.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret)
|
||||
@ -795,7 +795,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
|
||||
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
|
||||
|
||||
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, &nv_crtc->cursor.nvbo);
|
||||
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret)
|
||||
|
@ -117,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
|
||||
evo->user_get = 4;
|
||||
evo->user_put = 0;
|
||||
|
||||
ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
|
||||
ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
|
||||
&evo->pushbuf_bo);
|
||||
if (ret == 0)
|
||||
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
|
||||
@ -333,7 +333,7 @@ nv50_evo_create(struct drm_device *dev)
|
||||
goto err;
|
||||
|
||||
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, &dispc->sem.bo);
|
||||
0, 0x0000, NULL, &dispc->sem.bo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret)
|
||||
|
@ -882,7 +882,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, &nv_crtc->cursor.nvbo);
|
||||
0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret)
|
||||
@ -895,7 +895,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, &nv_crtc->lut.nvbo);
|
||||
0, 0x0000, NULL, &nv_crtc->lut.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (!ret)
|
||||
@ -2030,7 +2030,7 @@ nvd0_display_create(struct drm_device *dev)
|
||||
|
||||
/* small shared memory area we use for notifiers and semaphores */
|
||||
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, &disp->sync);
|
||||
0, 0x0000, NULL, &disp->sync);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
|
||||
if (!ret)
|
||||
|
@ -72,7 +72,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
|
||||
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
|
||||
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
|
||||
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
|
||||
si_blit_shaders.o
|
||||
si_blit_shaders.o radeon_prime.o
|
||||
|
||||
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
|
||||
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
|
||||
|
@ -668,7 +668,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
||||
obj_size = ALIGN(obj_size, 256);
|
||||
|
||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->r600_blit.shader_obj);
|
||||
NULL, &rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("evergreen failed to allocate shader\n");
|
||||
return r;
|
||||
|
@ -1231,7 +1231,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
|
||||
if (rdev->vram_scratch.robj == NULL) {
|
||||
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
|
||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->vram_scratch.robj);
|
||||
NULL, &rdev->vram_scratch.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
@ -2769,7 +2769,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
|
||||
r = radeon_bo_create(rdev, rdev->ih.ring_size,
|
||||
PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
&rdev->ih.ring_obj);
|
||||
NULL, &rdev->ih.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
|
||||
return r;
|
||||
|
@ -552,7 +552,7 @@ int r600_blit_init(struct radeon_device *rdev)
|
||||
obj_size = ALIGN(obj_size, 256);
|
||||
|
||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->r600_blit.shader_obj);
|
||||
NULL, &rdev->r600_blit.shader_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("r600 failed to allocate shader\n");
|
||||
return r;
|
||||
|
@ -103,7 +103,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
|
||||
int time;
|
||||
|
||||
n = RADEON_BENCHMARK_ITERATIONS;
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
@ -115,7 +115,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ int radeon_wb_init(struct radeon_device *rdev)
|
||||
|
||||
if (rdev->wb.wb_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
|
||||
RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
||||
return r;
|
||||
|
@ -105,6 +105,11 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
|
||||
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
int flags);
|
||||
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
int radeon_debugfs_init(struct drm_minor *minor);
|
||||
@ -333,7 +338,8 @@ static const struct file_operations radeon_driver_kms_fops = {
|
||||
static struct drm_driver kms_driver = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
|
||||
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
|
||||
DRIVER_PRIME,
|
||||
.dev_priv_size = 0,
|
||||
.load = radeon_driver_load_kms,
|
||||
.firstopen = radeon_driver_firstopen_kms,
|
||||
@ -368,6 +374,12 @@ static struct drm_driver kms_driver = {
|
||||
.dumb_map_offset = radeon_mode_dumb_mmap,
|
||||
.dumb_destroy = radeon_mode_dumb_destroy,
|
||||
.fops = &radeon_driver_kms_fops,
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = radeon_gem_prime_export,
|
||||
.gem_prime_import = radeon_gem_prime_import,
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
|
@ -80,7 +80,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
||||
if (rdev->gart.robj == NULL) {
|
||||
r = radeon_bo_create(rdev, rdev->gart.table_size,
|
||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->gart.robj);
|
||||
NULL, &rdev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -42,6 +42,8 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
|
||||
|
||||
if (robj) {
|
||||
if (robj->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
|
||||
radeon_bo_unref(&robj);
|
||||
}
|
||||
}
|
||||
@ -59,7 +61,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
if (alignment < PAGE_SIZE) {
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
|
||||
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
||||
|
@ -104,7 +104,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
||||
|
||||
int radeon_bo_create(struct radeon_device *rdev,
|
||||
unsigned long size, int byte_align, bool kernel, u32 domain,
|
||||
struct radeon_bo **bo_ptr)
|
||||
struct sg_table *sg, struct radeon_bo **bo_ptr)
|
||||
{
|
||||
struct radeon_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
@ -120,6 +120,8 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
}
|
||||
if (kernel) {
|
||||
type = ttm_bo_type_kernel;
|
||||
} else if (sg) {
|
||||
type = ttm_bo_type_sg;
|
||||
} else {
|
||||
type = ttm_bo_type_device;
|
||||
}
|
||||
@ -155,7 +157,7 @@ retry:
|
||||
mutex_lock(&rdev->vram_mutex);
|
||||
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, 0, !kernel, NULL,
|
||||
acc_size, &radeon_ttm_bo_destroy);
|
||||
acc_size, sg, &radeon_ttm_bo_destroy);
|
||||
mutex_unlock(&rdev->vram_mutex);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
|
@ -111,9 +111,10 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
|
||||
bool no_wait);
|
||||
|
||||
extern int radeon_bo_create(struct radeon_device *rdev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain,
|
||||
struct radeon_bo **bo_ptr);
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain,
|
||||
struct sg_table *sg,
|
||||
struct radeon_bo **bo_ptr);
|
||||
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
|
||||
extern void radeon_bo_kunmap(struct radeon_bo *bo);
|
||||
extern void radeon_bo_unref(struct radeon_bo **bo);
|
||||
|
176
drivers/gpu/drm/radeon/radeon_prime.c
Normal file
176
drivers/gpu/drm/radeon/radeon_prime.c
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright 2012 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* based on nouveau_prime.c
|
||||
*
|
||||
* Authors: Alex Deucher
|
||||
*/
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
||||
#include "radeon.h"
|
||||
#include "radeon_drm.h"
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct radeon_bo *bo = attachment->dmabuf->priv;
|
||||
struct drm_device *dev = bo->rdev->ddev;
|
||||
int npages = bo->tbo.num_pages;
|
||||
struct sg_table *sg;
|
||||
int nents;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
|
||||
nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return sg;
|
||||
}
|
||||
|
||||
static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *sg, enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
|
||||
sg_free_table(sg);
|
||||
kfree(sg);
|
||||
}
|
||||
|
||||
static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
|
||||
{
|
||||
struct radeon_bo *bo = dma_buf->priv;
|
||||
|
||||
if (bo->gem_base.export_dma_buf == dma_buf) {
|
||||
DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
|
||||
bo->gem_base.export_dma_buf = NULL;
|
||||
drm_gem_object_unreference_unlocked(&bo->gem_base);
|
||||
}
|
||||
}
|
||||
|
||||
static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
|
||||
}
|
||||
static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
struct dma_buf_ops radeon_dmabuf_ops = {
|
||||
.map_dma_buf = radeon_gem_map_dma_buf,
|
||||
.unmap_dma_buf = radeon_gem_unmap_dma_buf,
|
||||
.release = radeon_gem_dmabuf_release,
|
||||
.kmap = radeon_gem_kmap,
|
||||
.kmap_atomic = radeon_gem_kmap_atomic,
|
||||
.kunmap = radeon_gem_kunmap,
|
||||
.kunmap_atomic = radeon_gem_kunmap_atomic,
|
||||
};
|
||||
|
||||
static int radeon_prime_create(struct drm_device *dev,
|
||||
size_t size,
|
||||
struct sg_table *sg,
|
||||
struct radeon_bo **pbo)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_bo *bo;
|
||||
int ret;
|
||||
|
||||
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
|
||||
RADEON_GEM_DOMAIN_GTT, sg, pbo);
|
||||
if (ret)
|
||||
return ret;
|
||||
bo = *pbo;
|
||||
bo->gem_base.driver_private = bo;
|
||||
|
||||
mutex_lock(&rdev->gem.mutex);
|
||||
list_add_tail(&bo->list, &rdev->gem.objects);
|
||||
mutex_unlock(&rdev->gem.mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj,
|
||||
int flags)
|
||||
{
|
||||
struct radeon_bo *bo = gem_to_radeon_bo(obj);
|
||||
int ret = 0;
|
||||
|
||||
/* pin buffer into GTT */
|
||||
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
|
||||
}
|
||||
|
||||
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sg;
|
||||
struct radeon_bo *bo;
|
||||
int ret;
|
||||
|
||||
if (dma_buf->ops == &radeon_dmabuf_ops) {
|
||||
bo = dma_buf->priv;
|
||||
if (bo->gem_base.dev == dev) {
|
||||
drm_gem_object_reference(&bo->gem_base);
|
||||
return &bo->gem_base;
|
||||
}
|
||||
}
|
||||
|
||||
/* need to attach */
|
||||
attach = dma_buf_attach(dma_buf, dev->dev);
|
||||
if (IS_ERR(attach))
|
||||
return ERR_CAST(attach);
|
||||
|
||||
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
|
||||
if (ret)
|
||||
goto fail_unmap;
|
||||
|
||||
bo->gem_base.import_attach = attach;
|
||||
|
||||
return &bo->gem_base;
|
||||
|
||||
fail_unmap:
|
||||
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
|
||||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
return ERR_PTR(ret);
|
||||
}
|
@ -390,8 +390,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
|
||||
/* Allocate ring buffer */
|
||||
if (ring->ring_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
&ring->ring_obj);
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
NULL, &ring->ring_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) ring create failed\n", r);
|
||||
return r;
|
||||
|
@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
|
||||
RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
|
||||
return r;
|
||||
|
@ -59,7 +59,7 @@ void radeon_test_moves(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
&vram_obj);
|
||||
NULL, &vram_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create VRAM object\n");
|
||||
goto out_cleanup;
|
||||
@ -78,7 +78,7 @@ void radeon_test_moves(struct radeon_device *rdev)
|
||||
void **vram_start, **vram_end;
|
||||
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
|
||||
RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||
goto out_cleanup;
|
||||
|
@ -611,10 +611,18 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned i;
|
||||
int r;
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
if (slave && ttm->sg) {
|
||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||
gtt->ttm.dma_address, ttm->num_pages);
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rdev = radeon_get_rdev(ttm->bdev);
|
||||
#if __OS_HAS_AGP
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
@ -655,6 +663,10 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned i;
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
|
||||
if (slave)
|
||||
return;
|
||||
|
||||
rdev = radeon_get_rdev(ttm->bdev);
|
||||
#if __OS_HAS_AGP
|
||||
@ -726,8 +738,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
return r;
|
||||
}
|
||||
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
&rdev->stollen_vga_memory);
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &rdev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -2976,7 +2976,8 @@ int si_rlc_init(struct radeon_device *rdev)
|
||||
/* save restore block */
|
||||
if (rdev->rlc.save_restore_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_obj);
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL,
|
||||
&rdev->rlc.save_restore_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
|
||||
return r;
|
||||
@ -3000,7 +3001,8 @@ int si_rlc_init(struct radeon_device *rdev)
|
||||
/* clear state block */
|
||||
if (rdev->rlc.clear_state_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_obj);
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL,
|
||||
&rdev->rlc.clear_state_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
si_rlc_fini(rdev);
|
||||
|
@ -343,6 +343,16 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
||||
if (unlikely(bo->ttm == NULL))
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
case ttm_bo_type_sg:
|
||||
bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
||||
page_flags | TTM_PAGE_FLAG_SG,
|
||||
glob->dummy_read_page);
|
||||
if (unlikely(bo->ttm == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
bo->ttm->sg = bo->sg;
|
||||
break;
|
||||
default:
|
||||
pr_err("Illegal buffer object type\n");
|
||||
ret = -EINVAL;
|
||||
@ -1169,6 +1179,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
bool interruptible,
|
||||
struct file *persistent_swap_storage,
|
||||
size_t acc_size,
|
||||
struct sg_table *sg,
|
||||
void (*destroy) (struct ttm_buffer_object *))
|
||||
{
|
||||
int ret = 0;
|
||||
@ -1223,6 +1234,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
bo->seq_valid = false;
|
||||
bo->persistent_swap_storage = persistent_swap_storage;
|
||||
bo->acc_size = acc_size;
|
||||
bo->sg = sg;
|
||||
atomic_inc(&bo->glob->bo_count);
|
||||
|
||||
ret = ttm_bo_check_placement(bo, placement);
|
||||
@ -1233,7 +1245,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
* For ttm_bo_type_device buffers, allocate
|
||||
* address space from the device.
|
||||
*/
|
||||
if (bo->type == ttm_bo_type_device) {
|
||||
if (bo->type == ttm_bo_type_device ||
|
||||
bo->type == ttm_bo_type_sg) {
|
||||
ret = ttm_bo_setup_vm(bo);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
@ -1312,7 +1325,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
|
||||
|
||||
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
|
||||
buffer_start, interruptible,
|
||||
persistent_swap_storage, acc_size, NULL);
|
||||
persistent_swap_storage, acc_size, NULL, NULL);
|
||||
if (likely(ret == 0))
|
||||
*p_bo = bo;
|
||||
|
||||
|
@ -57,7 +57,7 @@ static const struct file_operations udl_driver_fops = {
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM,
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
|
||||
.load = udl_driver_load,
|
||||
.unload = udl_driver_unload,
|
||||
|
||||
@ -70,6 +70,10 @@ static struct drm_driver driver = {
|
||||
.dumb_map_offset = udl_gem_mmap,
|
||||
.dumb_destroy = udl_dumb_destroy,
|
||||
.fops = &udl_driver_fops,
|
||||
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_import = udl_gem_prime_import,
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
|
@ -66,6 +66,7 @@ struct udl_gem_object {
|
||||
struct drm_gem_object base;
|
||||
struct page **pages;
|
||||
void *vmapping;
|
||||
struct sg_table *sg;
|
||||
};
|
||||
|
||||
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
|
||||
@ -118,6 +119,8 @@ int udl_gem_init_object(struct drm_gem_object *obj);
|
||||
void udl_gem_free_object(struct drm_gem_object *gem_obj);
|
||||
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
||||
int udl_gem_vmap(struct udl_gem_object *obj);
|
||||
void udl_gem_vunmap(struct udl_gem_object *obj);
|
||||
|
@ -593,11 +593,20 @@ udl_fb_user_fb_create(struct drm_device *dev,
|
||||
struct drm_gem_object *obj;
|
||||
struct udl_framebuffer *ufb;
|
||||
int ret;
|
||||
uint32_t size;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
|
||||
if (obj == NULL)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
size = mode_cmd->pitches[0] * mode_cmd->height;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
|
||||
if (size > obj->size) {
|
||||
DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
|
||||
if (ufb == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "drmP.h"
|
||||
#include "udl_drv.h"
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size)
|
||||
@ -161,6 +162,12 @@ static void udl_gem_put_pages(struct udl_gem_object *obj)
|
||||
int page_count = obj->base.size / PAGE_SIZE;
|
||||
int i;
|
||||
|
||||
if (obj->base.import_attach) {
|
||||
drm_free_large(obj->pages);
|
||||
obj->pages = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < page_count; i++)
|
||||
page_cache_release(obj->pages[i]);
|
||||
|
||||
@ -195,6 +202,9 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
{
|
||||
struct udl_gem_object *obj = to_udl_bo(gem_obj);
|
||||
|
||||
if (gem_obj->import_attach)
|
||||
drm_prime_gem_destroy(gem_obj, obj->sg);
|
||||
|
||||
if (obj->vmapping)
|
||||
udl_gem_vunmap(obj);
|
||||
|
||||
@ -239,3 +249,68 @@ unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int udl_prime_create(struct drm_device *dev,
|
||||
size_t size,
|
||||
struct sg_table *sg,
|
||||
struct udl_gem_object **obj_p)
|
||||
{
|
||||
struct udl_gem_object *obj;
|
||||
int npages;
|
||||
int i;
|
||||
struct scatterlist *iter;
|
||||
|
||||
npages = size / PAGE_SIZE;
|
||||
|
||||
*obj_p = NULL;
|
||||
obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
obj->sg = sg;
|
||||
obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (obj->pages == NULL) {
|
||||
DRM_ERROR("obj pages is NULL %d\n", npages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
|
||||
|
||||
*obj_p = obj;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sg;
|
||||
struct udl_gem_object *uobj;
|
||||
int ret;
|
||||
|
||||
/* need to attach */
|
||||
attach = dma_buf_attach(dma_buf, dev->dev);
|
||||
if (IS_ERR(attach))
|
||||
return ERR_PTR(PTR_ERR(attach));
|
||||
|
||||
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
goto fail_detach;
|
||||
}
|
||||
|
||||
ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
|
||||
if (ret) {
|
||||
goto fail_unmap;
|
||||
}
|
||||
|
||||
uobj->base.import_attach = attach;
|
||||
|
||||
return &uobj->base;
|
||||
|
||||
fail_unmap:
|
||||
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
|
||||
fail_detach:
|
||||
dma_buf_detach(dma_buf, attach);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -1567,7 +1567,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
||||
ttm_bo_type_device, placement,
|
||||
0, 0, interruptible,
|
||||
NULL, acc_size, bo_free);
|
||||
NULL, acc_size, NULL, bo_free);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1558,6 +1558,8 @@ extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
|
||||
extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
|
||||
dma_addr_t *addrs, int max_pages);
|
||||
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
|
||||
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
|
||||
|
||||
|
@ -124,11 +124,15 @@ struct ttm_mem_reg {
|
||||
*
|
||||
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
|
||||
* but they cannot be accessed from user-space. For kernel-only use.
|
||||
*
|
||||
* @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
|
||||
* driver.
|
||||
*/
|
||||
|
||||
enum ttm_bo_type {
|
||||
ttm_bo_type_device,
|
||||
ttm_bo_type_kernel
|
||||
ttm_bo_type_kernel,
|
||||
ttm_bo_type_sg
|
||||
};
|
||||
|
||||
struct ttm_tt;
|
||||
@ -271,6 +275,8 @@ struct ttm_buffer_object {
|
||||
|
||||
unsigned long offset;
|
||||
uint32_t cur_placement;
|
||||
|
||||
struct sg_table *sg;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -503,6 +509,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
bool interrubtible,
|
||||
struct file *persistent_swap_storage,
|
||||
size_t acc_size,
|
||||
struct sg_table *sg,
|
||||
void (*destroy) (struct ttm_buffer_object *));
|
||||
|
||||
/**
|
||||
|
@ -81,6 +81,7 @@ struct ttm_backend_func {
|
||||
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
|
||||
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
|
||||
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
|
||||
#define TTM_PAGE_FLAG_SG (1 << 8)
|
||||
|
||||
enum ttm_caching_state {
|
||||
tt_uncached,
|
||||
@ -116,6 +117,7 @@ struct ttm_tt {
|
||||
struct page **pages;
|
||||
uint32_t page_flags;
|
||||
unsigned long num_pages;
|
||||
struct sg_table *sg; /* for SG objects via dma-buf */
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_backend *be;
|
||||
struct file *swap_storage;
|
||||
|
Loading…
x
Reference in New Issue
Block a user