2014-11-12 18:33:53 -08:00
/*
* udl_dmabuf . c
*
* Copyright ( c ) 2014 The Chromium OS Authors
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <drm/drmP.h>
# include "udl_drv.h"
# include <linux/shmem_fs.h>
# include <linux/dma-buf.h>
struct udl_drm_dmabuf_attachment {
struct sg_table sgt ;
enum dma_data_direction dir ;
bool is_mapped ;
} ;
static int udl_attach_dma_buf ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attach )
{
struct udl_drm_dmabuf_attachment * udl_attach ;
DRM_DEBUG_PRIME ( " [DEV:%s] size:%zd \n " , dev_name ( attach - > dev ) ,
attach - > dmabuf - > size ) ;
udl_attach = kzalloc ( sizeof ( * udl_attach ) , GFP_KERNEL ) ;
if ( ! udl_attach )
return - ENOMEM ;
udl_attach - > dir = DMA_NONE ;
attach - > priv = udl_attach ;
return 0 ;
}
static void udl_detach_dma_buf ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attach )
{
struct udl_drm_dmabuf_attachment * udl_attach = attach - > priv ;
struct sg_table * sgt ;
if ( ! udl_attach )
return ;
DRM_DEBUG_PRIME ( " [DEV:%s] size:%zd \n " , dev_name ( attach - > dev ) ,
attach - > dmabuf - > size ) ;
sgt = & udl_attach - > sgt ;
if ( udl_attach - > dir ! = DMA_NONE )
dma_unmap_sg ( attach - > dev , sgt - > sgl , sgt - > nents ,
udl_attach - > dir ) ;
sg_free_table ( sgt ) ;
kfree ( udl_attach ) ;
attach - > priv = NULL ;
}
static struct sg_table * udl_map_dma_buf ( struct dma_buf_attachment * attach ,
enum dma_data_direction dir )
{
struct udl_drm_dmabuf_attachment * udl_attach = attach - > priv ;
struct udl_gem_object * obj = to_udl_bo ( attach - > dmabuf - > priv ) ;
struct drm_device * dev = obj - > base . dev ;
2018-03-27 10:23:54 +02:00
struct udl_device * udl = dev - > dev_private ;
2014-11-12 18:33:53 -08:00
struct scatterlist * rd , * wr ;
struct sg_table * sgt = NULL ;
unsigned int i ;
int page_count ;
int nents , ret ;
DRM_DEBUG_PRIME ( " [DEV:%s] size:%zd dir=%d \n " , dev_name ( attach - > dev ) ,
attach - > dmabuf - > size , dir ) ;
/* just return current sgt if already requested. */
if ( udl_attach - > dir = = dir & & udl_attach - > is_mapped )
return & udl_attach - > sgt ;
if ( ! obj - > pages ) {
2014-11-25 12:04:02 -08:00
ret = udl_gem_get_pages ( obj ) ;
if ( ret ) {
DRM_ERROR ( " failed to map pages. \n " ) ;
return ERR_PTR ( ret ) ;
}
2014-11-12 18:33:53 -08:00
}
page_count = obj - > base . size / PAGE_SIZE ;
obj - > sg = drm_prime_pages_to_sg ( obj - > pages , page_count ) ;
2014-11-25 12:04:03 -08:00
if ( IS_ERR ( obj - > sg ) ) {
DRM_ERROR ( " failed to allocate sgt. \n " ) ;
return ERR_CAST ( obj - > sg ) ;
2014-11-12 18:33:53 -08:00
}
sgt = & udl_attach - > sgt ;
ret = sg_alloc_table ( sgt , obj - > sg - > orig_nents , GFP_KERNEL ) ;
if ( ret ) {
DRM_ERROR ( " failed to alloc sgt. \n " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2018-03-27 10:23:54 +02:00
mutex_lock ( & udl - > gem_lock ) ;
2014-11-12 18:33:53 -08:00
rd = obj - > sg - > sgl ;
wr = sgt - > sgl ;
for ( i = 0 ; i < sgt - > orig_nents ; + + i ) {
sg_set_page ( wr , sg_page ( rd ) , rd - > length , rd - > offset ) ;
rd = sg_next ( rd ) ;
wr = sg_next ( wr ) ;
}
if ( dir ! = DMA_NONE ) {
nents = dma_map_sg ( attach - > dev , sgt - > sgl , sgt - > orig_nents , dir ) ;
if ( ! nents ) {
DRM_ERROR ( " failed to map sgl with iommu. \n " ) ;
sg_free_table ( sgt ) ;
sgt = ERR_PTR ( - EIO ) ;
goto err_unlock ;
}
}
udl_attach - > is_mapped = true ;
udl_attach - > dir = dir ;
attach - > priv = udl_attach ;
err_unlock :
2018-03-27 10:23:54 +02:00
mutex_unlock ( & udl - > gem_lock ) ;
2014-11-12 18:33:53 -08:00
return sgt ;
}
static void udl_unmap_dma_buf ( struct dma_buf_attachment * attach ,
struct sg_table * sgt ,
enum dma_data_direction dir )
{
/* Nothing to do. */
DRM_DEBUG_PRIME ( " [DEV:%s] size:%zd dir:%d \n " , dev_name ( attach - > dev ) ,
attach - > dmabuf - > size , dir ) ;
}
static void * udl_dmabuf_kmap ( struct dma_buf * dma_buf , unsigned long page_num )
{
/* TODO */
return NULL ;
}
static void udl_dmabuf_kunmap ( struct dma_buf * dma_buf ,
unsigned long page_num , void * addr )
{
/* TODO */
}
static int udl_dmabuf_mmap ( struct dma_buf * dma_buf ,
struct vm_area_struct * vma )
{
/* TODO */
return - EINVAL ;
}
2017-07-01 18:06:46 +05:30
static const struct dma_buf_ops udl_dmabuf_ops = {
2014-11-12 18:33:53 -08:00
. attach = udl_attach_dma_buf ,
. detach = udl_detach_dma_buf ,
. map_dma_buf = udl_map_dma_buf ,
. unmap_dma_buf = udl_unmap_dma_buf ,
2017-04-19 13:36:10 -06:00
. map = udl_dmabuf_kmap ,
. unmap = udl_dmabuf_kunmap ,
2014-11-12 18:33:53 -08:00
. mmap = udl_dmabuf_mmap ,
. release = drm_gem_dmabuf_release ,
} ;
struct dma_buf * udl_gem_prime_export ( struct drm_device * dev ,
struct drm_gem_object * obj , int flags )
{
2015-01-23 12:53:43 +05:30
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
exp_info . ops = & udl_dmabuf_ops ;
exp_info . size = obj - > size ;
exp_info . flags = flags ;
exp_info . priv = obj ;
2016-10-05 13:21:44 +01:00
return drm_gem_dmabuf_export ( dev , & exp_info ) ;
2014-11-12 18:33:53 -08:00
}
static int udl_prime_create ( struct drm_device * dev ,
size_t size ,
struct sg_table * sg ,
struct udl_gem_object * * obj_p )
{
struct udl_gem_object * obj ;
int npages ;
npages = size / PAGE_SIZE ;
* obj_p = NULL ;
obj = udl_gem_alloc_object ( dev , npages * PAGE_SIZE ) ;
if ( ! obj )
return - ENOMEM ;
obj - > sg = sg ;
2017-05-17 14:23:12 +02:00
obj - > pages = kvmalloc_array ( npages , sizeof ( struct page * ) , GFP_KERNEL ) ;
2014-11-12 18:33:53 -08:00
if ( obj - > pages = = NULL ) {
DRM_ERROR ( " obj pages is NULL %d \n " , npages ) ;
return - ENOMEM ;
}
drm_prime_sg_to_page_addr_arrays ( sg , obj - > pages , NULL , npages ) ;
* obj_p = obj ;
return 0 ;
}
struct drm_gem_object * udl_gem_prime_import ( struct drm_device * dev ,
struct dma_buf * dma_buf )
{
struct dma_buf_attachment * attach ;
struct sg_table * sg ;
struct udl_gem_object * uobj ;
int ret ;
/* need to attach */
get_device ( dev - > dev ) ;
attach = dma_buf_attach ( dma_buf , dev - > dev ) ;
if ( IS_ERR ( attach ) ) {
put_device ( dev - > dev ) ;
return ERR_CAST ( attach ) ;
}
get_dma_buf ( dma_buf ) ;
sg = dma_buf_map_attachment ( attach , DMA_BIDIRECTIONAL ) ;
if ( IS_ERR ( sg ) ) {
ret = PTR_ERR ( sg ) ;
goto fail_detach ;
}
ret = udl_prime_create ( dev , dma_buf - > size , sg , & uobj ) ;
if ( ret )
goto fail_unmap ;
uobj - > base . import_attach = attach ;
uobj - > flags = UDL_BO_WC ;
return & uobj - > base ;
fail_unmap :
dma_buf_unmap_attachment ( attach , sg , DMA_BIDIRECTIONAL ) ;
fail_detach :
dma_buf_detach ( dma_buf , attach ) ;
dma_buf_put ( dma_buf ) ;
put_device ( dev - > dev ) ;
return ERR_PTR ( ret ) ;
}