2010-11-29 11:53:34 -03:00
/*
* videobuf2 - dma - sg . c - dma scatter / gather memory allocator for videobuf2
*
* Copyright ( C ) 2010 Samsung Electronics
*
* Author : Andrzej Pietrasiewicz < andrzej . p @ samsung . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation .
*/
# include <linux/module.h>
# include <linux/mm.h>
# include <linux/scatterlist.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
# include <media/videobuf2-core.h>
# include <media/videobuf2-memops.h>
# include <media/videobuf2-dma-sg.h>
2013-03-02 05:12:08 -03:00
static int debug ;
module_param ( debug , int , 0644 ) ;
# define dprintk(level, fmt, arg...) \
do { \
if ( debug > = level ) \
printk ( KERN_DEBUG " vb2-dma-sg: " fmt , # # arg ) ; \
} while ( 0 )
2014-11-18 09:51:01 -03:00
struct vb2_dma_sg_conf {
struct device * dev ;
} ;
2010-11-29 11:53:34 -03:00
struct vb2_dma_sg_buf {
2014-11-18 09:51:01 -03:00
struct device * dev ;
2010-11-29 11:53:34 -03:00
void * vaddr ;
struct page * * pages ;
int offset ;
2014-11-18 09:50:58 -03:00
enum dma_data_direction dma_dir ;
2013-08-02 10:20:00 -03:00
struct sg_table sg_table ;
2014-11-18 09:51:03 -03:00
/*
* This will point to sg_table when used with the MMAP or USERPTR
* memory model , and to the dma_buf sglist when used with the
* DMABUF memory model .
*/
struct sg_table * dma_sgt ;
2013-08-02 10:20:00 -03:00
size_t size ;
unsigned int num_pages ;
2010-11-29 11:53:34 -03:00
atomic_t refcount ;
struct vb2_vmarea_handler handler ;
2013-11-26 09:58:44 -03:00
struct vm_area_struct * vma ;
2014-11-18 09:51:03 -03:00
struct dma_buf_attachment * db_attach ;
2010-11-29 11:53:34 -03:00
} ;
static void vb2_dma_sg_put ( void * buf_priv ) ;
2013-08-02 10:19:59 -03:00
static int vb2_dma_sg_alloc_compacted ( struct vb2_dma_sg_buf * buf ,
gfp_t gfp_flags )
{
unsigned int last_page = 0 ;
2013-08-02 10:20:00 -03:00
int size = buf - > size ;
2013-08-02 10:19:59 -03:00
while ( size > 0 ) {
struct page * pages ;
int order ;
int i ;
order = get_order ( size ) ;
/* Dont over allocate*/
if ( ( PAGE_SIZE < < order ) > size )
order - - ;
pages = NULL ;
while ( ! pages ) {
pages = alloc_pages ( GFP_KERNEL | __GFP_ZERO |
__GFP_NOWARN | gfp_flags , order ) ;
if ( pages )
break ;
if ( order = = 0 ) {
while ( last_page - - )
__free_page ( buf - > pages [ last_page ] ) ;
return - ENOMEM ;
}
order - - ;
}
split_page ( pages , order ) ;
2013-08-02 10:20:00 -03:00
for ( i = 0 ; i < ( 1 < < order ) ; i + + )
buf - > pages [ last_page + + ] = & pages [ i ] ;
2013-08-02 10:19:59 -03:00
size - = PAGE_SIZE < < order ;
}
return 0 ;
}
2014-11-18 09:50:59 -03:00
static void * vb2_dma_sg_alloc ( void * alloc_ctx , unsigned long size ,
enum dma_data_direction dma_dir , gfp_t gfp_flags )
2010-11-29 11:53:34 -03:00
{
2014-11-18 09:51:01 -03:00
struct vb2_dma_sg_conf * conf = alloc_ctx ;
2010-11-29 11:53:34 -03:00
struct vb2_dma_sg_buf * buf ;
2014-11-24 08:50:31 -03:00
struct sg_table * sgt ;
2013-08-02 10:19:59 -03:00
int ret ;
2013-08-02 10:20:00 -03:00
int num_pages ;
2014-11-18 09:51:08 -03:00
DEFINE_DMA_ATTRS ( attrs ) ;
dma_set_attr ( DMA_ATTR_SKIP_CPU_SYNC , & attrs ) ;
2010-11-29 11:53:34 -03:00
2014-11-18 09:51:01 -03:00
if ( WARN_ON ( alloc_ctx = = NULL ) )
return NULL ;
2010-11-29 11:53:34 -03:00
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return NULL ;
buf - > vaddr = NULL ;
2014-11-18 09:50:59 -03:00
buf - > dma_dir = dma_dir ;
2010-11-29 11:53:34 -03:00
buf - > offset = 0 ;
2013-08-02 10:20:00 -03:00
buf - > size = size ;
2013-04-19 07:18:01 -03:00
/* size is already page aligned */
2013-08-02 10:20:00 -03:00
buf - > num_pages = size > > PAGE_SHIFT ;
2014-11-18 09:51:03 -03:00
buf - > dma_sgt = & buf - > sg_table ;
2010-11-29 11:53:34 -03:00
2013-08-02 10:20:00 -03:00
buf - > pages = kzalloc ( buf - > num_pages * sizeof ( struct page * ) ,
2010-11-29 11:53:34 -03:00
GFP_KERNEL ) ;
if ( ! buf - > pages )
goto fail_pages_array_alloc ;
2013-08-02 10:19:59 -03:00
ret = vb2_dma_sg_alloc_compacted ( buf , gfp_flags ) ;
if ( ret )
goto fail_pages_alloc ;
2010-11-29 11:53:34 -03:00
2014-11-18 09:51:03 -03:00
ret = sg_alloc_table_from_pages ( buf - > dma_sgt , buf - > pages ,
2014-08-01 09:18:50 -03:00
buf - > num_pages , 0 , size , GFP_KERNEL ) ;
2013-08-02 10:20:00 -03:00
if ( ret )
goto fail_table_alloc ;
2014-11-18 09:51:01 -03:00
/* Prevent the device from being released while the buffer is used */
buf - > dev = get_device ( conf - > dev ) ;
2014-11-24 08:50:31 -03:00
sgt = & buf - > sg_table ;
2014-11-18 09:51:08 -03:00
/*
* No need to sync to the device , this will happen later when the
* prepare ( ) memop is called .
*/
2015-04-29 09:00:45 -03:00
sgt - > nents = dma_map_sg_attrs ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
buf - > dma_dir , & attrs ) ;
if ( ! sgt - > nents )
2014-11-24 08:50:31 -03:00
goto fail_map ;
2010-11-29 11:53:34 -03:00
buf - > handler . refcount = & buf - > refcount ;
buf - > handler . put = vb2_dma_sg_put ;
buf - > handler . arg = buf ;
atomic_inc ( & buf - > refcount ) ;
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " %s: Allocated buffer of %d pages \n " ,
2013-08-02 10:20:00 -03:00
__func__ , buf - > num_pages ) ;
2010-11-29 11:53:34 -03:00
return buf ;
2014-11-24 08:50:31 -03:00
fail_map :
put_device ( buf - > dev ) ;
2014-11-18 09:51:03 -03:00
sg_free_table ( buf - > dma_sgt ) ;
2013-08-02 10:20:00 -03:00
fail_table_alloc :
num_pages = buf - > num_pages ;
while ( num_pages - - )
__free_page ( buf - > pages [ num_pages ] ) ;
2010-11-29 11:53:34 -03:00
fail_pages_alloc :
2011-01-28 09:42:51 -03:00
kfree ( buf - > pages ) ;
2010-11-29 11:53:34 -03:00
fail_pages_array_alloc :
kfree ( buf ) ;
return NULL ;
}
static void vb2_dma_sg_put ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2014-11-24 08:50:31 -03:00
struct sg_table * sgt = & buf - > sg_table ;
2013-08-02 10:20:00 -03:00
int i = buf - > num_pages ;
2010-11-29 11:53:34 -03:00
if ( atomic_dec_and_test ( & buf - > refcount ) ) {
2014-11-18 09:51:08 -03:00
DEFINE_DMA_ATTRS ( attrs ) ;
dma_set_attr ( DMA_ATTR_SKIP_CPU_SYNC , & attrs ) ;
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " %s: Freeing buffer of %d pages \n " , __func__ ,
2013-08-02 10:20:00 -03:00
buf - > num_pages ) ;
2015-04-29 09:00:45 -03:00
dma_unmap_sg_attrs ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
2014-11-18 09:51:08 -03:00
buf - > dma_dir , & attrs ) ;
2010-11-29 11:53:34 -03:00
if ( buf - > vaddr )
2013-08-02 10:20:00 -03:00
vm_unmap_ram ( buf - > vaddr , buf - > num_pages ) ;
2014-11-18 09:51:03 -03:00
sg_free_table ( buf - > dma_sgt ) ;
2010-11-29 11:53:34 -03:00
while ( - - i > = 0 )
__free_page ( buf - > pages [ i ] ) ;
kfree ( buf - > pages ) ;
2014-11-18 09:51:01 -03:00
put_device ( buf - > dev ) ;
2010-11-29 11:53:34 -03:00
kfree ( buf ) ;
}
}
2014-11-24 08:50:31 -03:00
static void vb2_dma_sg_prepare ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2014-11-18 09:51:03 -03:00
struct sg_table * sgt = buf - > dma_sgt ;
/* DMABUF exporter will flush the cache for us */
if ( buf - > db_attach )
return ;
2014-11-24 08:50:31 -03:00
dma_sync_sg_for_device ( buf - > dev , sgt - > sgl , sgt - > nents , buf - > dma_dir ) ;
}
static void vb2_dma_sg_finish ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2014-11-18 09:51:03 -03:00
struct sg_table * sgt = buf - > dma_sgt ;
/* DMABUF exporter will flush the cache for us */
if ( buf - > db_attach )
return ;
2014-11-24 08:50:31 -03:00
dma_sync_sg_for_cpu ( buf - > dev , sgt - > sgl , sgt - > nents , buf - > dma_dir ) ;
}
2013-11-26 09:58:44 -03:00
static inline int vma_is_io ( struct vm_area_struct * vma )
{
return ! ! ( vma - > vm_flags & ( VM_IO | VM_PFNMAP ) ) ;
}
2010-11-29 11:53:34 -03:00
static void * vb2_dma_sg_get_userptr ( void * alloc_ctx , unsigned long vaddr ,
2014-11-18 09:50:58 -03:00
unsigned long size ,
enum dma_data_direction dma_dir )
2010-11-29 11:53:34 -03:00
{
2014-11-24 08:50:31 -03:00
struct vb2_dma_sg_conf * conf = alloc_ctx ;
2010-11-29 11:53:34 -03:00
struct vb2_dma_sg_buf * buf ;
unsigned long first , last ;
2013-08-02 10:20:00 -03:00
int num_pages_from_user ;
2013-11-26 09:58:44 -03:00
struct vm_area_struct * vma ;
2014-11-24 08:50:31 -03:00
struct sg_table * sgt ;
2014-11-18 09:51:08 -03:00
DEFINE_DMA_ATTRS ( attrs ) ;
dma_set_attr ( DMA_ATTR_SKIP_CPU_SYNC , & attrs ) ;
2010-11-29 11:53:34 -03:00
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return NULL ;
buf - > vaddr = NULL ;
2014-11-24 08:50:31 -03:00
buf - > dev = conf - > dev ;
2014-11-18 09:50:58 -03:00
buf - > dma_dir = dma_dir ;
2010-11-29 11:53:34 -03:00
buf - > offset = vaddr & ~ PAGE_MASK ;
2013-08-02 10:20:00 -03:00
buf - > size = size ;
2014-11-18 09:51:03 -03:00
buf - > dma_sgt = & buf - > sg_table ;
2010-11-29 11:53:34 -03:00
first = ( vaddr & PAGE_MASK ) > > PAGE_SHIFT ;
last = ( ( vaddr + size - 1 ) & PAGE_MASK ) > > PAGE_SHIFT ;
2013-08-02 10:20:00 -03:00
buf - > num_pages = last - first + 1 ;
2010-11-29 11:53:34 -03:00
2013-08-02 10:20:00 -03:00
buf - > pages = kzalloc ( buf - > num_pages * sizeof ( struct page * ) ,
2010-11-29 11:53:34 -03:00
GFP_KERNEL ) ;
if ( ! buf - > pages )
2013-11-20 18:02:52 -03:00
goto userptr_fail_alloc_pages ;
2010-11-29 11:53:34 -03:00
2015-03-17 08:56:31 -03:00
down_read ( & current - > mm - > mmap_sem ) ;
2013-11-26 09:58:44 -03:00
vma = find_vma ( current - > mm , vaddr ) ;
if ( ! vma ) {
dprintk ( 1 , " no vma for address %lu \n " , vaddr ) ;
goto userptr_fail_find_vma ;
}
if ( vma - > vm_end < vaddr + size ) {
dprintk ( 1 , " vma at %lu is too small for %lu bytes \n " ,
vaddr , size ) ;
goto userptr_fail_find_vma ;
}
buf - > vma = vb2_get_vma ( vma ) ;
if ( ! buf - > vma ) {
dprintk ( 1 , " failed to copy vma \n " ) ;
goto userptr_fail_find_vma ;
}
if ( vma_is_io ( buf - > vma ) ) {
for ( num_pages_from_user = 0 ;
num_pages_from_user < buf - > num_pages ;
+ + num_pages_from_user , vaddr + = PAGE_SIZE ) {
unsigned long pfn ;
2014-04-25 13:11:29 -03:00
if ( follow_pfn ( vma , vaddr , & pfn ) ) {
2013-11-26 09:58:44 -03:00
dprintk ( 1 , " no page for address %lu \n " , vaddr ) ;
break ;
}
buf - > pages [ num_pages_from_user ] = pfn_to_page ( pfn ) ;
}
} else
num_pages_from_user = get_user_pages ( current , current - > mm ,
2010-11-29 11:53:34 -03:00
vaddr & PAGE_MASK ,
2013-08-02 10:20:00 -03:00
buf - > num_pages ,
2014-11-18 09:50:58 -03:00
buf - > dma_dir = = DMA_FROM_DEVICE ,
2010-11-29 11:53:34 -03:00
1 , /* force */
buf - > pages ,
NULL ) ;
2015-03-17 08:56:31 -03:00
up_read ( & current - > mm - > mmap_sem ) ;
2011-11-17 05:32:17 -03:00
2013-08-02 10:20:00 -03:00
if ( num_pages_from_user ! = buf - > num_pages )
2010-11-29 11:53:34 -03:00
goto userptr_fail_get_user_pages ;
2014-11-18 09:51:03 -03:00
if ( sg_alloc_table_from_pages ( buf - > dma_sgt , buf - > pages ,
2013-08-02 10:20:00 -03:00
buf - > num_pages , buf - > offset , size , 0 ) )
goto userptr_fail_alloc_table_from_pages ;
2014-11-24 08:50:31 -03:00
sgt = & buf - > sg_table ;
2014-11-18 09:51:08 -03:00
/*
* No need to sync to the device , this will happen later when the
* prepare ( ) memop is called .
*/
2015-04-29 09:00:45 -03:00
sgt - > nents = dma_map_sg_attrs ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
buf - > dma_dir , & attrs ) ;
if ( ! sgt - > nents )
2014-11-24 08:50:31 -03:00
goto userptr_fail_map ;
2015-04-29 09:00:45 -03:00
2010-11-29 11:53:34 -03:00
return buf ;
2014-11-24 08:50:31 -03:00
userptr_fail_map :
sg_free_table ( & buf - > sg_table ) ;
2013-08-02 10:20:00 -03:00
userptr_fail_alloc_table_from_pages :
2010-11-29 11:53:34 -03:00
userptr_fail_get_user_pages :
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " get_user_pages requested/got: %d/%d] \n " ,
2013-11-06 15:48:38 -03:00
buf - > num_pages , num_pages_from_user ) ;
2013-11-26 09:58:44 -03:00
if ( ! vma_is_io ( buf - > vma ) )
while ( - - num_pages_from_user > = 0 )
put_page ( buf - > pages [ num_pages_from_user ] ) ;
2015-03-17 08:56:31 -03:00
down_read ( & current - > mm - > mmap_sem ) ;
2013-11-26 09:58:44 -03:00
vb2_put_vma ( buf - > vma ) ;
userptr_fail_find_vma :
2015-03-17 08:56:31 -03:00
up_read ( & current - > mm - > mmap_sem ) ;
2011-01-28 09:42:51 -03:00
kfree ( buf - > pages ) ;
2013-11-20 18:02:52 -03:00
userptr_fail_alloc_pages :
2010-11-29 11:53:34 -03:00
kfree ( buf ) ;
return NULL ;
}
/*
* @ put_userptr : inform the allocator that a USERPTR buffer will no longer
* be used
*/
static void vb2_dma_sg_put_userptr ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2014-11-24 08:50:31 -03:00
struct sg_table * sgt = & buf - > sg_table ;
2013-08-02 10:20:00 -03:00
int i = buf - > num_pages ;
2014-11-18 09:51:08 -03:00
DEFINE_DMA_ATTRS ( attrs ) ;
dma_set_attr ( DMA_ATTR_SKIP_CPU_SYNC , & attrs ) ;
2010-11-29 11:53:34 -03:00
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " %s: Releasing userspace buffer of %d pages \n " ,
2013-08-02 10:20:00 -03:00
__func__ , buf - > num_pages ) ;
2015-04-29 09:00:45 -03:00
dma_unmap_sg_attrs ( buf - > dev , sgt - > sgl , sgt - > orig_nents , buf - > dma_dir ,
& attrs ) ;
2010-11-29 11:53:34 -03:00
if ( buf - > vaddr )
2013-08-02 10:20:00 -03:00
vm_unmap_ram ( buf - > vaddr , buf - > num_pages ) ;
2014-11-18 09:51:03 -03:00
sg_free_table ( buf - > dma_sgt ) ;
2010-11-29 11:53:34 -03:00
while ( - - i > = 0 ) {
2014-11-18 09:50:58 -03:00
if ( buf - > dma_dir = = DMA_FROM_DEVICE )
2010-11-29 11:53:34 -03:00
set_page_dirty_lock ( buf - > pages [ i ] ) ;
2013-11-26 09:58:44 -03:00
if ( ! vma_is_io ( buf - > vma ) )
put_page ( buf - > pages [ i ] ) ;
2010-11-29 11:53:34 -03:00
}
kfree ( buf - > pages ) ;
2015-03-17 08:56:31 -03:00
down_read ( & current - > mm - > mmap_sem ) ;
2013-11-26 09:58:44 -03:00
vb2_put_vma ( buf - > vma ) ;
2015-03-17 08:56:31 -03:00
up_read ( & current - > mm - > mmap_sem ) ;
2010-11-29 11:53:34 -03:00
kfree ( buf ) ;
}
static void * vb2_dma_sg_vaddr ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
BUG_ON ( ! buf ) ;
2014-11-18 09:51:03 -03:00
if ( ! buf - > vaddr ) {
if ( buf - > db_attach )
buf - > vaddr = dma_buf_vmap ( buf - > db_attach - > dmabuf ) ;
else
buf - > vaddr = vm_map_ram ( buf - > pages ,
buf - > num_pages , - 1 , PAGE_KERNEL ) ;
}
2010-11-29 11:53:34 -03:00
/* add offset in case userptr is not page-aligned */
2014-11-18 09:51:03 -03:00
return buf - > vaddr ? buf - > vaddr + buf - > offset : NULL ;
2010-11-29 11:53:34 -03:00
}
static unsigned int vb2_dma_sg_num_users ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
return atomic_read ( & buf - > refcount ) ;
}
static int vb2_dma_sg_mmap ( void * buf_priv , struct vm_area_struct * vma )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
unsigned long uaddr = vma - > vm_start ;
unsigned long usize = vma - > vm_end - vma - > vm_start ;
int i = 0 ;
if ( ! buf ) {
printk ( KERN_ERR " No memory to map \n " ) ;
return - EINVAL ;
}
do {
int ret ;
ret = vm_insert_page ( vma , uaddr , buf - > pages [ i + + ] ) ;
if ( ret ) {
printk ( KERN_ERR " Remapping memory, error: %d \n " , ret ) ;
return ret ;
}
uaddr + = PAGE_SIZE ;
usize - = PAGE_SIZE ;
} while ( usize > 0 ) ;
/*
* Use common vm_area operations to track buffer refcount .
*/
vma - > vm_private_data = & buf - > handler ;
vma - > vm_ops = & vb2_common_vm_ops ;
vma - > vm_ops - > open ( vma ) ;
return 0 ;
}
2014-11-18 09:51:04 -03:00
/*********************************************/
/* DMABUF ops for exporters */
/*********************************************/
struct vb2_dma_sg_attachment {
struct sg_table sgt ;
enum dma_data_direction dma_dir ;
} ;
static int vb2_dma_sg_dmabuf_ops_attach ( struct dma_buf * dbuf , struct device * dev ,
struct dma_buf_attachment * dbuf_attach )
{
struct vb2_dma_sg_attachment * attach ;
unsigned int i ;
struct scatterlist * rd , * wr ;
struct sg_table * sgt ;
struct vb2_dma_sg_buf * buf = dbuf - > priv ;
int ret ;
attach = kzalloc ( sizeof ( * attach ) , GFP_KERNEL ) ;
if ( ! attach )
return - ENOMEM ;
sgt = & attach - > sgt ;
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
* map the same scatter list to multiple attachments at the same time .
*/
ret = sg_alloc_table ( sgt , buf - > dma_sgt - > orig_nents , GFP_KERNEL ) ;
if ( ret ) {
kfree ( attach ) ;
return - ENOMEM ;
}
rd = buf - > dma_sgt - > sgl ;
wr = sgt - > sgl ;
for ( i = 0 ; i < sgt - > orig_nents ; + + i ) {
sg_set_page ( wr , sg_page ( rd ) , rd - > length , rd - > offset ) ;
rd = sg_next ( rd ) ;
wr = sg_next ( wr ) ;
}
attach - > dma_dir = DMA_NONE ;
dbuf_attach - > priv = attach ;
return 0 ;
}
static void vb2_dma_sg_dmabuf_ops_detach ( struct dma_buf * dbuf ,
struct dma_buf_attachment * db_attach )
{
struct vb2_dma_sg_attachment * attach = db_attach - > priv ;
struct sg_table * sgt ;
if ( ! attach )
return ;
sgt = & attach - > sgt ;
/* release the scatterlist cache */
if ( attach - > dma_dir ! = DMA_NONE )
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dma_dir ) ;
sg_free_table ( sgt ) ;
kfree ( attach ) ;
db_attach - > priv = NULL ;
}
static struct sg_table * vb2_dma_sg_dmabuf_ops_map (
struct dma_buf_attachment * db_attach , enum dma_data_direction dma_dir )
{
struct vb2_dma_sg_attachment * attach = db_attach - > priv ;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex * lock = & db_attach - > dmabuf - > lock ;
struct sg_table * sgt ;
mutex_lock ( lock ) ;
sgt = & attach - > sgt ;
/* return previously mapped sg table */
if ( attach - > dma_dir = = dma_dir ) {
mutex_unlock ( lock ) ;
return sgt ;
}
/* release any previous cache */
if ( attach - > dma_dir ! = DMA_NONE ) {
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dma_dir ) ;
attach - > dma_dir = DMA_NONE ;
}
/* mapping to the client with new direction */
2015-04-29 09:00:45 -03:00
sgt - > nents = dma_map_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
dma_dir ) ;
if ( ! sgt - > nents ) {
2014-11-18 09:51:04 -03:00
pr_err ( " failed to map scatterlist \n " ) ;
mutex_unlock ( lock ) ;
return ERR_PTR ( - EIO ) ;
}
attach - > dma_dir = dma_dir ;
mutex_unlock ( lock ) ;
return sgt ;
}
static void vb2_dma_sg_dmabuf_ops_unmap ( struct dma_buf_attachment * db_attach ,
struct sg_table * sgt , enum dma_data_direction dma_dir )
{
/* nothing to be done here */
}
static void vb2_dma_sg_dmabuf_ops_release ( struct dma_buf * dbuf )
{
/* drop reference obtained in vb2_dma_sg_get_dmabuf */
vb2_dma_sg_put ( dbuf - > priv ) ;
}
static void * vb2_dma_sg_dmabuf_ops_kmap ( struct dma_buf * dbuf , unsigned long pgnum )
{
struct vb2_dma_sg_buf * buf = dbuf - > priv ;
return buf - > vaddr ? buf - > vaddr + pgnum * PAGE_SIZE : NULL ;
}
static void * vb2_dma_sg_dmabuf_ops_vmap ( struct dma_buf * dbuf )
{
struct vb2_dma_sg_buf * buf = dbuf - > priv ;
return vb2_dma_sg_vaddr ( buf ) ;
}
static int vb2_dma_sg_dmabuf_ops_mmap ( struct dma_buf * dbuf ,
struct vm_area_struct * vma )
{
return vb2_dma_sg_mmap ( dbuf - > priv , vma ) ;
}
static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
. attach = vb2_dma_sg_dmabuf_ops_attach ,
. detach = vb2_dma_sg_dmabuf_ops_detach ,
. map_dma_buf = vb2_dma_sg_dmabuf_ops_map ,
. unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap ,
. kmap = vb2_dma_sg_dmabuf_ops_kmap ,
. kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap ,
. vmap = vb2_dma_sg_dmabuf_ops_vmap ,
. mmap = vb2_dma_sg_dmabuf_ops_mmap ,
. release = vb2_dma_sg_dmabuf_ops_release ,
} ;
static struct dma_buf * vb2_dma_sg_get_dmabuf ( void * buf_priv , unsigned long flags )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
struct dma_buf * dbuf ;
2015-01-23 12:53:43 +05:30
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
exp_info . ops = & vb2_dma_sg_dmabuf_ops ;
exp_info . size = buf - > size ;
exp_info . flags = flags ;
exp_info . priv = buf ;
2014-11-18 09:51:04 -03:00
if ( WARN_ON ( ! buf - > dma_sgt ) )
return NULL ;
2015-01-23 12:53:43 +05:30
dbuf = dma_buf_export ( & exp_info ) ;
2014-11-18 09:51:04 -03:00
if ( IS_ERR ( dbuf ) )
return NULL ;
/* dmabuf keeps reference to vb2 buffer */
atomic_inc ( & buf - > refcount ) ;
return dbuf ;
}
2014-11-18 09:51:03 -03:00
/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
static int vb2_dma_sg_map_dmabuf ( void * mem_priv )
{
struct vb2_dma_sg_buf * buf = mem_priv ;
struct sg_table * sgt ;
if ( WARN_ON ( ! buf - > db_attach ) ) {
pr_err ( " trying to pin a non attached buffer \n " ) ;
return - EINVAL ;
}
if ( WARN_ON ( buf - > dma_sgt ) ) {
pr_err ( " dmabuf buffer is already pinned \n " ) ;
return 0 ;
}
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment ( buf - > db_attach , buf - > dma_dir ) ;
if ( IS_ERR ( sgt ) ) {
pr_err ( " Error getting dmabuf scatterlist \n " ) ;
return - EINVAL ;
}
buf - > dma_sgt = sgt ;
buf - > vaddr = NULL ;
return 0 ;
}
static void vb2_dma_sg_unmap_dmabuf ( void * mem_priv )
{
struct vb2_dma_sg_buf * buf = mem_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
if ( WARN_ON ( ! buf - > db_attach ) ) {
pr_err ( " trying to unpin a not attached buffer \n " ) ;
return ;
}
if ( WARN_ON ( ! sgt ) ) {
pr_err ( " dmabuf buffer is already unpinned \n " ) ;
return ;
}
if ( buf - > vaddr ) {
dma_buf_vunmap ( buf - > db_attach - > dmabuf , buf - > vaddr ) ;
buf - > vaddr = NULL ;
}
dma_buf_unmap_attachment ( buf - > db_attach , sgt , buf - > dma_dir ) ;
buf - > dma_sgt = NULL ;
}
static void vb2_dma_sg_detach_dmabuf ( void * mem_priv )
{
struct vb2_dma_sg_buf * buf = mem_priv ;
/* if vb2 works correctly you should never detach mapped buffer */
if ( WARN_ON ( buf - > dma_sgt ) )
vb2_dma_sg_unmap_dmabuf ( buf ) ;
/* detach this attachment */
dma_buf_detach ( buf - > db_attach - > dmabuf , buf - > db_attach ) ;
kfree ( buf ) ;
}
static void * vb2_dma_sg_attach_dmabuf ( void * alloc_ctx , struct dma_buf * dbuf ,
unsigned long size , enum dma_data_direction dma_dir )
{
struct vb2_dma_sg_conf * conf = alloc_ctx ;
struct vb2_dma_sg_buf * buf ;
struct dma_buf_attachment * dba ;
if ( dbuf - > size < size )
return ERR_PTR ( - EFAULT ) ;
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
buf - > dev = conf - > dev ;
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach ( dbuf , buf - > dev ) ;
if ( IS_ERR ( dba ) ) {
pr_err ( " failed to attach dmabuf \n " ) ;
kfree ( buf ) ;
return dba ;
}
buf - > dma_dir = dma_dir ;
buf - > size = size ;
buf - > db_attach = dba ;
return buf ;
}
2010-11-29 11:53:34 -03:00
static void * vb2_dma_sg_cookie ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2014-11-18 09:51:03 -03:00
return buf - > dma_sgt ;
2010-11-29 11:53:34 -03:00
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
. alloc = vb2_dma_sg_alloc ,
. put = vb2_dma_sg_put ,
. get_userptr = vb2_dma_sg_get_userptr ,
. put_userptr = vb2_dma_sg_put_userptr ,
2014-11-24 08:50:31 -03:00
. prepare = vb2_dma_sg_prepare ,
. finish = vb2_dma_sg_finish ,
2010-11-29 11:53:34 -03:00
. vaddr = vb2_dma_sg_vaddr ,
. mmap = vb2_dma_sg_mmap ,
. num_users = vb2_dma_sg_num_users ,
2014-11-18 09:51:04 -03:00
. get_dmabuf = vb2_dma_sg_get_dmabuf ,
2014-11-18 09:51:03 -03:00
. map_dmabuf = vb2_dma_sg_map_dmabuf ,
. unmap_dmabuf = vb2_dma_sg_unmap_dmabuf ,
. attach_dmabuf = vb2_dma_sg_attach_dmabuf ,
. detach_dmabuf = vb2_dma_sg_detach_dmabuf ,
2010-11-29 11:53:34 -03:00
. cookie = vb2_dma_sg_cookie ,
} ;
EXPORT_SYMBOL_GPL ( vb2_dma_sg_memops ) ;
2014-11-18 09:51:01 -03:00
void * vb2_dma_sg_init_ctx ( struct device * dev )
{
struct vb2_dma_sg_conf * conf ;
conf = kzalloc ( sizeof ( * conf ) , GFP_KERNEL ) ;
if ( ! conf )
return ERR_PTR ( - ENOMEM ) ;
conf - > dev = dev ;
return conf ;
}
EXPORT_SYMBOL_GPL ( vb2_dma_sg_init_ctx ) ;
void vb2_dma_sg_cleanup_ctx ( void * alloc_ctx )
{
if ( ! IS_ERR_OR_NULL ( alloc_ctx ) )
kfree ( alloc_ctx ) ;
}
EXPORT_SYMBOL_GPL ( vb2_dma_sg_cleanup_ctx ) ;
2010-11-29 11:53:34 -03:00
MODULE_DESCRIPTION ( " dma scatter/gather memory handling routines for videobuf2 " ) ;
MODULE_AUTHOR ( " Andrzej Pietrasiewicz " ) ;
MODULE_LICENSE ( " GPL " ) ;