2012-04-23 16:01:28 +04:00
/* exynos_drm_dmabuf.c
*
* Copyright ( c ) 2012 Samsung Electronics Co . , Ltd .
* Author : Inki Dae < inki . dae @ samsung . com >
*
2012-12-17 21:30:17 +04:00
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
2012-04-23 16:01:28 +04:00
*/
2012-10-02 21:01:07 +04:00
# include <drm/drmP.h>
# include <drm/exynos_drm.h>
2012-04-23 16:01:28 +04:00
# include "exynos_drm_drv.h"
# include "exynos_drm_gem.h"
# include <linux/dma-buf.h>
2012-11-28 14:09:31 +04:00
struct exynos_drm_dmabuf_attachment {
struct sg_table sgt ;
enum dma_data_direction dir ;
2013-01-11 08:46:58 +04:00
bool is_mapped ;
2012-11-28 14:09:31 +04:00
} ;
static int exynos_gem_attach_dma_buf ( struct dma_buf * dmabuf ,
struct device * dev ,
struct dma_buf_attachment * attach )
2012-04-23 16:01:28 +04:00
{
2012-11-28 14:09:31 +04:00
struct exynos_drm_dmabuf_attachment * exynos_attach ;
2012-04-23 16:01:28 +04:00
2012-11-28 14:09:31 +04:00
exynos_attach = kzalloc ( sizeof ( * exynos_attach ) , GFP_KERNEL ) ;
if ( ! exynos_attach )
return - ENOMEM ;
2012-04-23 16:01:28 +04:00
2012-11-28 14:09:31 +04:00
exynos_attach - > dir = DMA_NONE ;
attach - > priv = exynos_attach ;
2012-04-23 16:01:28 +04:00
2012-11-28 14:09:31 +04:00
return 0 ;
}
2012-04-23 16:01:28 +04:00
2012-11-28 14:09:31 +04:00
static void exynos_gem_detach_dma_buf ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attach )
{
struct exynos_drm_dmabuf_attachment * exynos_attach = attach - > priv ;
struct sg_table * sgt ;
if ( ! exynos_attach )
return ;
sgt = & exynos_attach - > sgt ;
if ( exynos_attach - > dir ! = DMA_NONE )
dma_unmap_sg ( attach - > dev , sgt - > sgl , sgt - > nents ,
exynos_attach - > dir ) ;
sg_free_table ( sgt ) ;
kfree ( exynos_attach ) ;
attach - > priv = NULL ;
2012-04-23 16:01:28 +04:00
}
static struct sg_table *
exynos_gem_map_dma_buf ( struct dma_buf_attachment * attach ,
enum dma_data_direction dir )
{
2012-11-28 14:09:31 +04:00
struct exynos_drm_dmabuf_attachment * exynos_attach = attach - > priv ;
2012-04-23 16:01:28 +04:00
struct exynos_drm_gem_obj * gem_obj = attach - > dmabuf - > priv ;
struct drm_device * dev = gem_obj - > base . dev ;
struct exynos_drm_gem_buf * buf ;
2012-11-28 14:09:31 +04:00
struct scatterlist * rd , * wr ;
2012-04-23 16:01:28 +04:00
struct sg_table * sgt = NULL ;
2012-11-28 14:09:31 +04:00
unsigned int i ;
int nents , ret ;
2012-04-23 16:01:28 +04:00
DRM_DEBUG_PRIME ( " %s \n " , __FILE__ ) ;
2012-11-28 14:09:31 +04:00
/* just return current sgt if already requested. */
2013-01-11 08:46:58 +04:00
if ( exynos_attach - > dir = = dir & & exynos_attach - > is_mapped )
2012-11-28 14:09:31 +04:00
return & exynos_attach - > sgt ;
2012-04-23 16:01:28 +04:00
buf = gem_obj - > buffer ;
2012-10-20 18:53:42 +04:00
if ( ! buf ) {
DRM_ERROR ( " buffer is null. \n " ) ;
2012-11-28 14:09:31 +04:00
return ERR_PTR ( - ENOMEM ) ;
}
sgt = & exynos_attach - > sgt ;
ret = sg_alloc_table ( sgt , buf - > sgt - > orig_nents , GFP_KERNEL ) ;
if ( ret ) {
DRM_ERROR ( " failed to alloc sgt. \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
2012-04-23 16:01:28 +04:00
}
2012-10-20 18:53:42 +04:00
mutex_lock ( & dev - > struct_mutex ) ;
2012-04-23 16:01:28 +04:00
2012-11-28 14:09:31 +04:00
rd = buf - > sgt - > sgl ;
wr = sgt - > sgl ;
for ( i = 0 ; i < sgt - > orig_nents ; + + i ) {
sg_set_page ( wr , sg_page ( rd ) , rd - > length , rd - > offset ) ;
rd = sg_next ( rd ) ;
wr = sg_next ( wr ) ;
}
2012-10-20 18:53:42 +04:00
2013-01-11 08:46:58 +04:00
if ( dir ! = DMA_NONE ) {
nents = dma_map_sg ( attach - > dev , sgt - > sgl , sgt - > orig_nents , dir ) ;
if ( ! nents ) {
DRM_ERROR ( " failed to map sgl with iommu. \n " ) ;
sg_free_table ( sgt ) ;
sgt = ERR_PTR ( - EIO ) ;
goto err_unlock ;
}
2012-10-20 18:53:42 +04:00
}
2012-04-23 16:01:28 +04:00
2013-01-11 08:46:58 +04:00
exynos_attach - > is_mapped = true ;
2012-11-28 14:09:31 +04:00
exynos_attach - > dir = dir ;
attach - > priv = exynos_attach ;
2012-11-20 14:32:56 +04:00
DRM_DEBUG_PRIME ( " buffer size = 0x%lx \n " , buf - > size ) ;
2012-04-23 16:01:28 +04:00
err_unlock :
mutex_unlock ( & dev - > struct_mutex ) ;
return sgt ;
}
static void exynos_gem_unmap_dma_buf ( struct dma_buf_attachment * attach ,
struct sg_table * sgt ,
enum dma_data_direction dir )
{
2012-11-28 14:09:31 +04:00
/* Nothing to do. */
2012-04-23 16:01:28 +04:00
}
static void exynos_dmabuf_release ( struct dma_buf * dmabuf )
{
struct exynos_drm_gem_obj * exynos_gem_obj = dmabuf - > priv ;
DRM_DEBUG_PRIME ( " %s \n " , __FILE__ ) ;
/*
* exynos_dmabuf_release ( ) call means that file object ' s
* f_count is 0 and it calls drm_gem_object_handle_unreference ( )
* to drop the references that these values had been increased
* at drm_prime_handle_to_fd ( )
*/
if ( exynos_gem_obj - > base . export_dma_buf = = dmabuf ) {
exynos_gem_obj - > base . export_dma_buf = NULL ;
/*
* drop this gem object refcount to release allocated buffer
* and resources .
*/
drm_gem_object_unreference_unlocked ( & exynos_gem_obj - > base ) ;
}
}
static void * exynos_gem_dmabuf_kmap_atomic ( struct dma_buf * dma_buf ,
unsigned long page_num )
{
/* TODO */
return NULL ;
}
static void exynos_gem_dmabuf_kunmap_atomic ( struct dma_buf * dma_buf ,
unsigned long page_num ,
void * addr )
{
/* TODO */
}
static void * exynos_gem_dmabuf_kmap ( struct dma_buf * dma_buf ,
unsigned long page_num )
{
/* TODO */
return NULL ;
}
static void exynos_gem_dmabuf_kunmap ( struct dma_buf * dma_buf ,
unsigned long page_num , void * addr )
{
/* TODO */
}
2012-09-05 14:31:56 +04:00
static int exynos_gem_dmabuf_mmap ( struct dma_buf * dma_buf ,
struct vm_area_struct * vma )
{
return - ENOTTY ;
}
2012-04-23 16:01:28 +04:00
static struct dma_buf_ops exynos_dmabuf_ops = {
2012-11-28 14:09:31 +04:00
. attach = exynos_gem_attach_dma_buf ,
. detach = exynos_gem_detach_dma_buf ,
2012-04-23 16:01:28 +04:00
. map_dma_buf = exynos_gem_map_dma_buf ,
. unmap_dma_buf = exynos_gem_unmap_dma_buf ,
. kmap = exynos_gem_dmabuf_kmap ,
. kmap_atomic = exynos_gem_dmabuf_kmap_atomic ,
. kunmap = exynos_gem_dmabuf_kunmap ,
. kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic ,
2012-09-05 14:31:56 +04:00
. mmap = exynos_gem_dmabuf_mmap ,
2012-04-23 16:01:28 +04:00
. release = exynos_dmabuf_release ,
} ;
struct dma_buf * exynos_dmabuf_prime_export ( struct drm_device * drm_dev ,
struct drm_gem_object * obj , int flags )
{
struct exynos_drm_gem_obj * exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
return dma_buf_export ( exynos_gem_obj , & exynos_dmabuf_ops ,
2012-12-20 11:39:35 +04:00
exynos_gem_obj - > base . size , flags ) ;
2012-04-23 16:01:28 +04:00
}
struct drm_gem_object * exynos_dmabuf_prime_import ( struct drm_device * drm_dev ,
struct dma_buf * dma_buf )
{
struct dma_buf_attachment * attach ;
struct sg_table * sgt ;
struct scatterlist * sgl ;
struct exynos_drm_gem_obj * exynos_gem_obj ;
struct exynos_drm_gem_buf * buffer ;
2012-06-07 11:15:07 +04:00
int ret ;
2012-04-23 16:01:28 +04:00
DRM_DEBUG_PRIME ( " %s \n " , __FILE__ ) ;
/* is this one of own objects? */
if ( dma_buf - > ops = = & exynos_dmabuf_ops ) {
struct drm_gem_object * obj ;
exynos_gem_obj = dma_buf - > priv ;
obj = & exynos_gem_obj - > base ;
/* is it from our device? */
if ( obj - > dev = = drm_dev ) {
2012-09-27 10:30:06 +04:00
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf .
*/
2012-04-23 16:01:28 +04:00
drm_gem_object_reference ( obj ) ;
2012-09-27 10:30:06 +04:00
dma_buf_put ( dma_buf ) ;
2012-04-23 16:01:28 +04:00
return obj ;
}
}
attach = dma_buf_attach ( dma_buf , drm_dev - > dev ) ;
if ( IS_ERR ( attach ) )
return ERR_PTR ( - EINVAL ) ;
sgt = dma_buf_map_attachment ( attach , DMA_BIDIRECTIONAL ) ;
2012-06-25 22:22:57 +04:00
if ( IS_ERR_OR_NULL ( sgt ) ) {
2012-04-23 16:01:28 +04:00
ret = PTR_ERR ( sgt ) ;
goto err_buf_detach ;
}
buffer = kzalloc ( sizeof ( * buffer ) , GFP_KERNEL ) ;
if ( ! buffer ) {
DRM_ERROR ( " failed to allocate exynos_drm_gem_buf. \n " ) ;
ret = - ENOMEM ;
goto err_unmap_attach ;
}
exynos_gem_obj = exynos_drm_gem_init ( drm_dev , dma_buf - > size ) ;
if ( ! exynos_gem_obj ) {
ret = - ENOMEM ;
2012-10-20 18:53:42 +04:00
goto err_free_buffer ;
2012-04-23 16:01:28 +04:00
}
sgl = sgt - > sgl ;
2012-10-20 18:53:42 +04:00
buffer - > size = dma_buf - > size ;
buffer - > dma_addr = sg_dma_address ( sgl ) ;
2012-06-07 11:15:07 +04:00
2012-10-20 18:53:42 +04:00
if ( sgt - > nents = = 1 ) {
2012-06-07 11:15:07 +04:00
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj - > flags | = EXYNOS_BO_CONTIG ;
} else {
2012-10-20 18:53:42 +04:00
/*
* this case could be CONTIG or NONCONTIG type but for now
* sets NONCONTIG .
* TODO . we have to find a way that exporter can notify
* the type of its own buffer to importer .
*/
2012-06-07 11:15:07 +04:00
exynos_gem_obj - > flags | = EXYNOS_BO_NONCONTIG ;
2012-04-23 16:01:28 +04:00
}
exynos_gem_obj - > buffer = buffer ;
buffer - > sgt = sgt ;
exynos_gem_obj - > base . import_attach = attach ;
DRM_DEBUG_PRIME ( " dma_addr = 0x%x, size = 0x%lx \n " , buffer - > dma_addr ,
buffer - > size ) ;
return & exynos_gem_obj - > base ;
err_free_buffer :
kfree ( buffer ) ;
buffer = NULL ;
err_unmap_attach :
dma_buf_unmap_attachment ( attach , sgt , DMA_BIDIRECTIONAL ) ;
err_buf_detach :
dma_buf_detach ( dma_buf , attach ) ;
return ERR_PTR ( ret ) ;
}
MODULE_AUTHOR ( " Inki Dae <inki.dae@samsung.com> " ) ;
MODULE_DESCRIPTION ( " Samsung SoC DRM DMABUF Module " ) ;
MODULE_LICENSE ( " GPL " ) ;