2012-05-10 15:25:09 +02:00
/*
* Copyright 2012 Red Hat Inc
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE .
*
* Authors :
* Dave Airlie < airlied @ redhat . com >
*/
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
2012-05-10 15:25:09 +02:00
# include "i915_drv.h"
# include <linux/dma-buf.h>
2013-08-08 09:10:38 +02:00
static struct drm_i915_gem_object * dma_buf_to_obj ( struct dma_buf * buf )
{
return to_intel_bo ( buf - > priv ) ;
}
2012-05-23 14:09:32 +01:00
static struct sg_table * i915_gem_map_dma_buf ( struct dma_buf_attachment * attachment ,
2012-06-01 15:20:22 +01:00
enum dma_data_direction dir )
2012-05-10 15:25:09 +02:00
{
2013-08-08 09:10:38 +02:00
struct drm_i915_gem_object * obj = dma_buf_to_obj ( attachment - > dmabuf ) ;
2012-06-01 15:20:22 +01:00
struct sg_table * st ;
struct scatterlist * src , * dst ;
int ret , i ;
2012-05-10 15:25:09 +02:00
2012-06-01 15:20:22 +01:00
ret = i915_mutex_lock_interruptible ( obj - > base . dev ) ;
2012-05-10 15:25:09 +02:00
if ( ret )
2013-08-26 19:50:55 -03:00
goto err ;
2012-05-10 15:25:09 +02:00
2012-06-07 15:38:42 +01:00
ret = i915_gem_object_get_pages ( obj ) ;
2013-08-26 19:50:55 -03:00
if ( ret )
goto err_unlock ;
i915_gem_object_pin_pages ( obj ) ;
2012-06-01 15:20:22 +01:00
/* Copy sg so that we make an independent mapping */
st = kmalloc ( sizeof ( struct sg_table ) , GFP_KERNEL ) ;
if ( st = = NULL ) {
2013-08-26 19:50:55 -03:00
ret = - ENOMEM ;
goto err_unpin ;
2012-05-10 15:25:09 +02:00
}
2012-06-01 15:20:22 +01:00
ret = sg_alloc_table ( st , obj - > pages - > nents , GFP_KERNEL ) ;
2013-08-26 19:50:55 -03:00
if ( ret )
goto err_free ;
2012-06-01 15:20:22 +01:00
src = obj - > pages - > sgl ;
dst = st - > sgl ;
for ( i = 0 ; i < obj - > pages - > nents ; i + + ) {
2013-02-18 19:28:02 +02:00
sg_set_page ( dst , sg_page ( src ) , src - > length , 0 ) ;
2012-06-01 15:20:22 +01:00
dst = sg_next ( dst ) ;
src = sg_next ( src ) ;
}
if ( ! dma_map_sg ( attachment - > dev , st - > sgl , st - > nents , dir ) ) {
2013-08-26 19:50:55 -03:00
ret = - ENOMEM ;
goto err_free_sg ;
2012-05-10 15:25:09 +02:00
}
2012-06-01 15:20:22 +01:00
mutex_unlock ( & obj - > base . dev - > struct_mutex ) ;
return st ;
2013-08-26 19:50:55 -03:00
err_free_sg :
sg_free_table ( st ) ;
err_free :
kfree ( st ) ;
err_unpin :
i915_gem_object_unpin_pages ( obj ) ;
err_unlock :
mutex_unlock ( & obj - > base . dev - > struct_mutex ) ;
err :
return ERR_PTR ( ret ) ;
2012-05-10 15:25:09 +02:00
}
2012-05-23 14:09:32 +01:00
static void i915_gem_unmap_dma_buf ( struct dma_buf_attachment * attachment ,
2012-09-04 21:02:58 +01:00
struct sg_table * sg ,
enum dma_data_direction dir )
2012-05-10 15:25:09 +02:00
{
2013-08-08 09:10:38 +02:00
struct drm_i915_gem_object * obj = dma_buf_to_obj ( attachment - > dmabuf ) ;
2013-08-08 09:10:37 +02:00
mutex_lock ( & obj - > base . dev - > struct_mutex ) ;
2012-05-10 15:25:09 +02:00
dma_unmap_sg ( attachment - > dev , sg - > sgl , sg - > nents , dir ) ;
sg_free_table ( sg ) ;
kfree ( sg ) ;
2013-08-08 09:10:37 +02:00
i915_gem_object_unpin_pages ( obj ) ;
mutex_unlock ( & obj - > base . dev - > struct_mutex ) ;
2012-05-10 15:25:09 +02:00
}
2012-05-22 13:09:21 +01:00
static void * i915_gem_dmabuf_vmap ( struct dma_buf * dma_buf )
{
2013-08-08 09:10:38 +02:00
struct drm_i915_gem_object * obj = dma_buf_to_obj ( dma_buf ) ;
2012-05-22 13:09:21 +01:00
struct drm_device * dev = obj - > base . dev ;
2013-02-18 19:28:02 +02:00
struct sg_page_iter sg_iter ;
2012-06-01 15:20:22 +01:00
struct page * * pages ;
int ret , i ;
2012-05-22 13:09:21 +01:00
ret = i915_mutex_lock_interruptible ( dev ) ;
if ( ret )
return ERR_PTR ( ret ) ;
if ( obj - > dma_buf_vmapping ) {
obj - > vmapping_count + + ;
goto out_unlock ;
}
2012-06-07 15:38:42 +01:00
ret = i915_gem_object_get_pages ( obj ) ;
2012-06-01 15:20:22 +01:00
if ( ret )
2013-11-29 11:44:59 +00:00
goto err ;
i915_gem_object_pin_pages ( obj ) ;
2012-05-22 13:09:21 +01:00
2012-06-01 15:20:22 +01:00
ret = - ENOMEM ;
2013-02-18 19:28:02 +02:00
pages = drm_malloc_ab ( obj - > base . size > > PAGE_SHIFT , sizeof ( * pages ) ) ;
2012-06-01 15:20:22 +01:00
if ( pages = = NULL )
2013-11-29 11:44:59 +00:00
goto err_unpin ;
2012-06-01 15:20:22 +01:00
2013-02-18 19:28:02 +02:00
i = 0 ;
2013-05-01 14:23:41 +10:00
for_each_sg_page ( obj - > pages - > sgl , & sg_iter , obj - > pages - > nents , 0 )
2013-03-26 15:14:18 +02:00
pages [ i + + ] = sg_page_iter_page ( & sg_iter ) ;
2012-06-01 15:20:22 +01:00
2013-02-18 19:28:02 +02:00
obj - > dma_buf_vmapping = vmap ( pages , i , 0 , PAGE_KERNEL ) ;
2012-06-01 15:20:22 +01:00
drm_free_large ( pages ) ;
if ( ! obj - > dma_buf_vmapping )
2013-11-29 11:44:59 +00:00
goto err_unpin ;
2012-05-22 13:09:21 +01:00
obj - > vmapping_count = 1 ;
out_unlock :
mutex_unlock ( & dev - > struct_mutex ) ;
return obj - > dma_buf_vmapping ;
2012-06-01 15:20:22 +01:00
2013-11-29 11:44:59 +00:00
err_unpin :
i915_gem_object_unpin_pages ( obj ) ;
err :
2012-06-01 15:20:22 +01:00
mutex_unlock ( & dev - > struct_mutex ) ;
return ERR_PTR ( ret ) ;
2012-05-22 13:09:21 +01:00
}
static void i915_gem_dmabuf_vunmap ( struct dma_buf * dma_buf , void * vaddr )
{
2013-08-08 09:10:38 +02:00
struct drm_i915_gem_object * obj = dma_buf_to_obj ( dma_buf ) ;
2012-05-22 13:09:21 +01:00
struct drm_device * dev = obj - > base . dev ;
int ret ;
ret = i915_mutex_lock_interruptible ( dev ) ;
if ( ret )
return ;
2012-09-04 21:02:54 +01:00
if ( - - obj - > vmapping_count = = 0 ) {
2012-05-22 13:09:21 +01:00
vunmap ( obj - > dma_buf_vmapping ) ;
obj - > dma_buf_vmapping = NULL ;
2012-09-04 21:02:54 +01:00
i915_gem_object_unpin_pages ( obj ) ;
2012-05-22 13:09:21 +01:00
}
mutex_unlock ( & dev - > struct_mutex ) ;
}
2012-05-10 15:25:09 +02:00
static void * i915_gem_dmabuf_kmap_atomic ( struct dma_buf * dma_buf , unsigned long page_num )
{
return NULL ;
}
static void i915_gem_dmabuf_kunmap_atomic ( struct dma_buf * dma_buf , unsigned long page_num , void * addr )
{
}
static void * i915_gem_dmabuf_kmap ( struct dma_buf * dma_buf , unsigned long page_num )
{
return NULL ;
}
static void i915_gem_dmabuf_kunmap ( struct dma_buf * dma_buf , unsigned long page_num , void * addr )
{
}
2012-05-29 15:11:22 +01:00
static int i915_gem_dmabuf_mmap ( struct dma_buf * dma_buf , struct vm_area_struct * vma )
{
return - EINVAL ;
}
2012-08-16 10:15:34 +10:00
static int i915_gem_begin_cpu_access ( struct dma_buf * dma_buf , size_t start , size_t length , enum dma_data_direction direction )
{
2013-08-08 09:10:38 +02:00
struct drm_i915_gem_object * obj = dma_buf_to_obj ( dma_buf ) ;
2012-08-16 10:15:34 +10:00
struct drm_device * dev = obj - > base . dev ;
int ret ;
bool write = ( direction = = DMA_BIDIRECTIONAL | | direction = = DMA_TO_DEVICE ) ;
ret = i915_mutex_lock_interruptible ( dev ) ;
if ( ret )
return ret ;
ret = i915_gem_object_set_to_cpu_domain ( obj , write ) ;
mutex_unlock ( & dev - > struct_mutex ) ;
return ret ;
}
2012-05-23 14:09:32 +01:00
static const struct dma_buf_ops i915_dmabuf_ops = {
2012-05-10 15:25:09 +02:00
. map_dma_buf = i915_gem_map_dma_buf ,
. unmap_dma_buf = i915_gem_unmap_dma_buf ,
2013-08-15 00:02:30 +02:00
. release = drm_gem_dmabuf_release ,
2012-05-10 15:25:09 +02:00
. kmap = i915_gem_dmabuf_kmap ,
. kmap_atomic = i915_gem_dmabuf_kmap_atomic ,
. kunmap = i915_gem_dmabuf_kunmap ,
. kunmap_atomic = i915_gem_dmabuf_kunmap_atomic ,
2012-05-29 15:11:22 +01:00
. mmap = i915_gem_dmabuf_mmap ,
2012-05-22 13:09:21 +01:00
. vmap = i915_gem_dmabuf_vmap ,
. vunmap = i915_gem_dmabuf_vunmap ,
2012-08-16 10:15:34 +10:00
. begin_cpu_access = i915_gem_begin_cpu_access ,
2012-05-10 15:25:09 +02:00
} ;
struct dma_buf * i915_gem_prime_export ( struct drm_device * dev ,
2012-06-01 15:20:22 +01:00
struct drm_gem_object * gem_obj , int flags )
2012-05-10 15:25:09 +02:00
{
2013-08-08 09:10:38 +02:00
return dma_buf_export ( gem_obj , & i915_dmabuf_ops , gem_obj - > size , flags ) ;
2012-05-10 15:25:09 +02:00
}
2012-09-04 21:02:58 +01:00
static int i915_gem_object_get_pages_dmabuf ( struct drm_i915_gem_object * obj )
{
struct sg_table * sg ;
sg = dma_buf_map_attachment ( obj - > base . import_attach , DMA_BIDIRECTIONAL ) ;
if ( IS_ERR ( sg ) )
return PTR_ERR ( sg ) ;
obj - > pages = sg ;
obj - > has_dma_mapping = true ;
return 0 ;
2012-05-10 15:25:09 +02:00
}
2012-09-04 21:02:58 +01:00
static void i915_gem_object_put_pages_dmabuf ( struct drm_i915_gem_object * obj )
{
dma_buf_unmap_attachment ( obj - > base . import_attach ,
obj - > pages , DMA_BIDIRECTIONAL ) ;
obj - > has_dma_mapping = false ;
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
. get_pages = i915_gem_object_get_pages_dmabuf ,
. put_pages = i915_gem_object_put_pages_dmabuf ,
} ;
2012-05-10 15:25:09 +02:00
struct drm_gem_object * i915_gem_prime_import ( struct drm_device * dev ,
2012-06-01 15:20:22 +01:00
struct dma_buf * dma_buf )
2012-05-10 15:25:09 +02:00
{
struct dma_buf_attachment * attach ;
struct drm_i915_gem_object * obj ;
int ret ;
/* is this one of own objects? */
if ( dma_buf - > ops = = & i915_dmabuf_ops ) {
2013-08-08 09:10:38 +02:00
obj = dma_buf_to_obj ( dma_buf ) ;
2012-05-10 15:25:09 +02:00
/* is it from our device? */
if ( obj - > base . dev = = dev ) {
2012-09-27 15:30:06 +09:00
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf .
*/
2012-05-10 15:25:09 +02:00
drm_gem_object_reference ( & obj - > base ) ;
return & obj - > base ;
}
}
/* need to attach */
attach = dma_buf_attach ( dma_buf , dev - > dev ) ;
if ( IS_ERR ( attach ) )
return ERR_CAST ( attach ) ;
2013-04-19 11:11:56 +10:00
get_dma_buf ( dma_buf ) ;
2012-11-15 11:32:30 +00:00
obj = i915_gem_object_alloc ( dev ) ;
2012-05-10 15:25:09 +02:00
if ( obj = = NULL ) {
ret = - ENOMEM ;
2012-09-04 21:02:58 +01:00
goto fail_detach ;
2012-05-10 15:25:09 +02:00
}
2013-07-11 11:56:32 +02:00
drm_gem_private_object_init ( dev , & obj - > base , dma_buf - > size ) ;
2012-09-04 21:02:58 +01:00
i915_gem_object_init ( obj , & i915_gem_object_dmabuf_ops ) ;
2012-05-10 15:25:09 +02:00
obj - > base . import_attach = attach ;
return & obj - > base ;
fail_detach :
dma_buf_detach ( dma_buf , attach ) ;
2013-04-19 11:11:56 +10:00
dma_buf_put ( dma_buf ) ;
2012-05-10 15:25:09 +02:00
return ERR_PTR ( ret ) ;
}