2010-11-29 11:53:34 -03:00
/*
* videobuf2 - dma - sg . c - dma scatter / gather memory allocator for videobuf2
*
* Copyright ( C ) 2010 Samsung Electronics
*
* Author : Andrzej Pietrasiewicz < andrzej . p @ samsung . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation .
*/
# include <linux/module.h>
# include <linux/mm.h>
# include <linux/scatterlist.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
# include <media/videobuf2-core.h>
# include <media/videobuf2-memops.h>
# include <media/videobuf2-dma-sg.h>
2013-03-02 05:12:08 -03:00
static int debug ;
module_param ( debug , int , 0644 ) ;
# define dprintk(level, fmt, arg...) \
do { \
if ( debug > = level ) \
printk ( KERN_DEBUG " vb2-dma-sg: " fmt , # # arg ) ; \
} while ( 0 )
2010-11-29 11:53:34 -03:00
struct vb2_dma_sg_buf {
void * vaddr ;
struct page * * pages ;
int offset ;
2014-11-18 09:50:58 -03:00
enum dma_data_direction dma_dir ;
2013-08-02 10:20:00 -03:00
struct sg_table sg_table ;
size_t size ;
unsigned int num_pages ;
2010-11-29 11:53:34 -03:00
atomic_t refcount ;
struct vb2_vmarea_handler handler ;
2013-11-26 09:58:44 -03:00
struct vm_area_struct * vma ;
2010-11-29 11:53:34 -03:00
} ;
static void vb2_dma_sg_put ( void * buf_priv ) ;
2013-08-02 10:19:59 -03:00
static int vb2_dma_sg_alloc_compacted ( struct vb2_dma_sg_buf * buf ,
gfp_t gfp_flags )
{
unsigned int last_page = 0 ;
2013-08-02 10:20:00 -03:00
int size = buf - > size ;
2013-08-02 10:19:59 -03:00
while ( size > 0 ) {
struct page * pages ;
int order ;
int i ;
order = get_order ( size ) ;
/* Dont over allocate*/
if ( ( PAGE_SIZE < < order ) > size )
order - - ;
pages = NULL ;
while ( ! pages ) {
pages = alloc_pages ( GFP_KERNEL | __GFP_ZERO |
__GFP_NOWARN | gfp_flags , order ) ;
if ( pages )
break ;
if ( order = = 0 ) {
while ( last_page - - )
__free_page ( buf - > pages [ last_page ] ) ;
return - ENOMEM ;
}
order - - ;
}
split_page ( pages , order ) ;
2013-08-02 10:20:00 -03:00
for ( i = 0 ; i < ( 1 < < order ) ; i + + )
buf - > pages [ last_page + + ] = & pages [ i ] ;
2013-08-02 10:19:59 -03:00
size - = PAGE_SIZE < < order ;
}
return 0 ;
}
2014-11-18 09:50:59 -03:00
static void * vb2_dma_sg_alloc ( void * alloc_ctx , unsigned long size ,
enum dma_data_direction dma_dir , gfp_t gfp_flags )
2010-11-29 11:53:34 -03:00
{
struct vb2_dma_sg_buf * buf ;
2013-08-02 10:19:59 -03:00
int ret ;
2013-08-02 10:20:00 -03:00
int num_pages ;
2010-11-29 11:53:34 -03:00
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return NULL ;
buf - > vaddr = NULL ;
2014-11-18 09:50:59 -03:00
buf - > dma_dir = dma_dir ;
2010-11-29 11:53:34 -03:00
buf - > offset = 0 ;
2013-08-02 10:20:00 -03:00
buf - > size = size ;
2013-04-19 07:18:01 -03:00
/* size is already page aligned */
2013-08-02 10:20:00 -03:00
buf - > num_pages = size > > PAGE_SHIFT ;
2010-11-29 11:53:34 -03:00
2013-08-02 10:20:00 -03:00
buf - > pages = kzalloc ( buf - > num_pages * sizeof ( struct page * ) ,
2010-11-29 11:53:34 -03:00
GFP_KERNEL ) ;
if ( ! buf - > pages )
goto fail_pages_array_alloc ;
2013-08-02 10:19:59 -03:00
ret = vb2_dma_sg_alloc_compacted ( buf , gfp_flags ) ;
if ( ret )
goto fail_pages_alloc ;
2010-11-29 11:53:34 -03:00
2013-08-02 10:20:00 -03:00
ret = sg_alloc_table_from_pages ( & buf - > sg_table , buf - > pages ,
2014-08-01 09:18:50 -03:00
buf - > num_pages , 0 , size , GFP_KERNEL ) ;
2013-08-02 10:20:00 -03:00
if ( ret )
goto fail_table_alloc ;
2010-11-29 11:53:34 -03:00
buf - > handler . refcount = & buf - > refcount ;
buf - > handler . put = vb2_dma_sg_put ;
buf - > handler . arg = buf ;
atomic_inc ( & buf - > refcount ) ;
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " %s: Allocated buffer of %d pages \n " ,
2013-08-02 10:20:00 -03:00
__func__ , buf - > num_pages ) ;
2010-11-29 11:53:34 -03:00
return buf ;
2013-08-02 10:20:00 -03:00
fail_table_alloc :
num_pages = buf - > num_pages ;
while ( num_pages - - )
__free_page ( buf - > pages [ num_pages ] ) ;
2010-11-29 11:53:34 -03:00
fail_pages_alloc :
2011-01-28 09:42:51 -03:00
kfree ( buf - > pages ) ;
2010-11-29 11:53:34 -03:00
fail_pages_array_alloc :
kfree ( buf ) ;
return NULL ;
}
static void vb2_dma_sg_put ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2013-08-02 10:20:00 -03:00
int i = buf - > num_pages ;
2010-11-29 11:53:34 -03:00
if ( atomic_dec_and_test ( & buf - > refcount ) ) {
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " %s: Freeing buffer of %d pages \n " , __func__ ,
2013-08-02 10:20:00 -03:00
buf - > num_pages ) ;
2010-11-29 11:53:34 -03:00
if ( buf - > vaddr )
2013-08-02 10:20:00 -03:00
vm_unmap_ram ( buf - > vaddr , buf - > num_pages ) ;
sg_free_table ( & buf - > sg_table ) ;
2010-11-29 11:53:34 -03:00
while ( - - i > = 0 )
__free_page ( buf - > pages [ i ] ) ;
kfree ( buf - > pages ) ;
kfree ( buf ) ;
}
}
2013-11-26 09:58:44 -03:00
static inline int vma_is_io ( struct vm_area_struct * vma )
{
return ! ! ( vma - > vm_flags & ( VM_IO | VM_PFNMAP ) ) ;
}
2010-11-29 11:53:34 -03:00
static void * vb2_dma_sg_get_userptr ( void * alloc_ctx , unsigned long vaddr ,
2014-11-18 09:50:58 -03:00
unsigned long size ,
enum dma_data_direction dma_dir )
2010-11-29 11:53:34 -03:00
{
struct vb2_dma_sg_buf * buf ;
unsigned long first , last ;
2013-08-02 10:20:00 -03:00
int num_pages_from_user ;
2013-11-26 09:58:44 -03:00
struct vm_area_struct * vma ;
2010-11-29 11:53:34 -03:00
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return NULL ;
buf - > vaddr = NULL ;
2014-11-18 09:50:58 -03:00
buf - > dma_dir = dma_dir ;
2010-11-29 11:53:34 -03:00
buf - > offset = vaddr & ~ PAGE_MASK ;
2013-08-02 10:20:00 -03:00
buf - > size = size ;
2010-11-29 11:53:34 -03:00
first = ( vaddr & PAGE_MASK ) > > PAGE_SHIFT ;
last = ( ( vaddr + size - 1 ) & PAGE_MASK ) > > PAGE_SHIFT ;
2013-08-02 10:20:00 -03:00
buf - > num_pages = last - first + 1 ;
2010-11-29 11:53:34 -03:00
2013-08-02 10:20:00 -03:00
buf - > pages = kzalloc ( buf - > num_pages * sizeof ( struct page * ) ,
2010-11-29 11:53:34 -03:00
GFP_KERNEL ) ;
if ( ! buf - > pages )
2013-11-20 18:02:52 -03:00
goto userptr_fail_alloc_pages ;
2010-11-29 11:53:34 -03:00
2013-11-26 09:58:44 -03:00
vma = find_vma ( current - > mm , vaddr ) ;
if ( ! vma ) {
dprintk ( 1 , " no vma for address %lu \n " , vaddr ) ;
goto userptr_fail_find_vma ;
}
if ( vma - > vm_end < vaddr + size ) {
dprintk ( 1 , " vma at %lu is too small for %lu bytes \n " ,
vaddr , size ) ;
goto userptr_fail_find_vma ;
}
buf - > vma = vb2_get_vma ( vma ) ;
if ( ! buf - > vma ) {
dprintk ( 1 , " failed to copy vma \n " ) ;
goto userptr_fail_find_vma ;
}
if ( vma_is_io ( buf - > vma ) ) {
for ( num_pages_from_user = 0 ;
num_pages_from_user < buf - > num_pages ;
+ + num_pages_from_user , vaddr + = PAGE_SIZE ) {
unsigned long pfn ;
2014-04-25 13:11:29 -03:00
if ( follow_pfn ( vma , vaddr , & pfn ) ) {
2013-11-26 09:58:44 -03:00
dprintk ( 1 , " no page for address %lu \n " , vaddr ) ;
break ;
}
buf - > pages [ num_pages_from_user ] = pfn_to_page ( pfn ) ;
}
} else
num_pages_from_user = get_user_pages ( current , current - > mm ,
2010-11-29 11:53:34 -03:00
vaddr & PAGE_MASK ,
2013-08-02 10:20:00 -03:00
buf - > num_pages ,
2014-11-18 09:50:58 -03:00
buf - > dma_dir = = DMA_FROM_DEVICE ,
2010-11-29 11:53:34 -03:00
1 , /* force */
buf - > pages ,
NULL ) ;
2011-11-17 05:32:17 -03:00
2013-08-02 10:20:00 -03:00
if ( num_pages_from_user ! = buf - > num_pages )
2010-11-29 11:53:34 -03:00
goto userptr_fail_get_user_pages ;
2013-08-02 10:20:00 -03:00
if ( sg_alloc_table_from_pages ( & buf - > sg_table , buf - > pages ,
buf - > num_pages , buf - > offset , size , 0 ) )
goto userptr_fail_alloc_table_from_pages ;
2010-11-29 11:53:34 -03:00
return buf ;
2013-08-02 10:20:00 -03:00
userptr_fail_alloc_table_from_pages :
2010-11-29 11:53:34 -03:00
userptr_fail_get_user_pages :
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " get_user_pages requested/got: %d/%d] \n " ,
2013-11-06 15:48:38 -03:00
buf - > num_pages , num_pages_from_user ) ;
2013-11-26 09:58:44 -03:00
if ( ! vma_is_io ( buf - > vma ) )
while ( - - num_pages_from_user > = 0 )
put_page ( buf - > pages [ num_pages_from_user ] ) ;
vb2_put_vma ( buf - > vma ) ;
userptr_fail_find_vma :
2011-01-28 09:42:51 -03:00
kfree ( buf - > pages ) ;
2013-11-20 18:02:52 -03:00
userptr_fail_alloc_pages :
2010-11-29 11:53:34 -03:00
kfree ( buf ) ;
return NULL ;
}
/*
* @ put_userptr : inform the allocator that a USERPTR buffer will no longer
* be used
*/
static void vb2_dma_sg_put_userptr ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2013-08-02 10:20:00 -03:00
int i = buf - > num_pages ;
2010-11-29 11:53:34 -03:00
2013-03-02 05:12:08 -03:00
dprintk ( 1 , " %s: Releasing userspace buffer of %d pages \n " ,
2013-08-02 10:20:00 -03:00
__func__ , buf - > num_pages ) ;
2010-11-29 11:53:34 -03:00
if ( buf - > vaddr )
2013-08-02 10:20:00 -03:00
vm_unmap_ram ( buf - > vaddr , buf - > num_pages ) ;
sg_free_table ( & buf - > sg_table ) ;
2010-11-29 11:53:34 -03:00
while ( - - i > = 0 ) {
2014-11-18 09:50:58 -03:00
if ( buf - > dma_dir = = DMA_FROM_DEVICE )
2010-11-29 11:53:34 -03:00
set_page_dirty_lock ( buf - > pages [ i ] ) ;
2013-11-26 09:58:44 -03:00
if ( ! vma_is_io ( buf - > vma ) )
put_page ( buf - > pages [ i ] ) ;
2010-11-29 11:53:34 -03:00
}
kfree ( buf - > pages ) ;
2013-11-26 09:58:44 -03:00
vb2_put_vma ( buf - > vma ) ;
2010-11-29 11:53:34 -03:00
kfree ( buf ) ;
}
static void * vb2_dma_sg_vaddr ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
BUG_ON ( ! buf ) ;
if ( ! buf - > vaddr )
buf - > vaddr = vm_map_ram ( buf - > pages ,
2013-08-02 10:20:00 -03:00
buf - > num_pages ,
2010-11-29 11:53:34 -03:00
- 1 ,
PAGE_KERNEL ) ;
/* add offset in case userptr is not page-aligned */
return buf - > vaddr + buf - > offset ;
}
static unsigned int vb2_dma_sg_num_users ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
return atomic_read ( & buf - > refcount ) ;
}
static int vb2_dma_sg_mmap ( void * buf_priv , struct vm_area_struct * vma )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
unsigned long uaddr = vma - > vm_start ;
unsigned long usize = vma - > vm_end - vma - > vm_start ;
int i = 0 ;
if ( ! buf ) {
printk ( KERN_ERR " No memory to map \n " ) ;
return - EINVAL ;
}
do {
int ret ;
ret = vm_insert_page ( vma , uaddr , buf - > pages [ i + + ] ) ;
if ( ret ) {
printk ( KERN_ERR " Remapping memory, error: %d \n " , ret ) ;
return ret ;
}
uaddr + = PAGE_SIZE ;
usize - = PAGE_SIZE ;
} while ( usize > 0 ) ;
/*
* Use common vm_area operations to track buffer refcount .
*/
vma - > vm_private_data = & buf - > handler ;
vma - > vm_ops = & vb2_common_vm_ops ;
vma - > vm_ops - > open ( vma ) ;
return 0 ;
}
static void * vb2_dma_sg_cookie ( void * buf_priv )
{
struct vb2_dma_sg_buf * buf = buf_priv ;
2013-08-02 10:20:00 -03:00
return & buf - > sg_table ;
2010-11-29 11:53:34 -03:00
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
. alloc = vb2_dma_sg_alloc ,
. put = vb2_dma_sg_put ,
. get_userptr = vb2_dma_sg_get_userptr ,
. put_userptr = vb2_dma_sg_put_userptr ,
. vaddr = vb2_dma_sg_vaddr ,
. mmap = vb2_dma_sg_mmap ,
. num_users = vb2_dma_sg_num_users ,
. cookie = vb2_dma_sg_cookie ,
} ;
EXPORT_SYMBOL_GPL ( vb2_dma_sg_memops ) ;
MODULE_DESCRIPTION ( " dma scatter/gather memory handling routines for videobuf2 " ) ;
MODULE_AUTHOR ( " Andrzej Pietrasiewicz " ) ;
MODULE_LICENSE ( " GPL " ) ;