2010-10-11 17:58:53 +04:00
/*
* videobuf2 - vmalloc . c - vmalloc memory allocator for videobuf2
*
* Copyright ( C ) 2010 Samsung Electronics
*
2011-03-13 21:23:32 +03:00
* Author : Pawel Osciak < pawel @ osciak . com >
2010-10-11 17:58:53 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation .
*/
2012-02-16 19:19:08 +04:00
# include <linux/io.h>
2010-10-11 17:58:53 +04:00
# include <linux/module.h>
# include <linux/mm.h>
2011-10-13 14:30:51 +04:00
# include <linux/sched.h>
2010-10-11 17:58:53 +04:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
2015-09-22 16:30:29 +03:00
# include <media/videobuf2-v4l2.h>
2012-08-03 14:23:54 +04:00
# include <media/videobuf2-vmalloc.h>
2010-10-11 17:58:53 +04:00
# include <media/videobuf2-memops.h>
struct vb2_vmalloc_buf {
void * vaddr ;
2015-07-13 17:55:48 +03:00
struct frame_vector * vec ;
2014-11-18 15:50:58 +03:00
enum dma_data_direction dma_dir ;
2010-10-11 17:58:53 +04:00
unsigned long size ;
atomic_t refcount ;
struct vb2_vmarea_handler handler ;
2012-06-14 17:37:46 +04:00
struct dma_buf * dbuf ;
2010-10-11 17:58:53 +04:00
} ;
static void vb2_vmalloc_put ( void * buf_priv ) ;
2014-11-18 15:50:59 +03:00
static void * vb2_vmalloc_alloc ( void * alloc_ctx , unsigned long size ,
enum dma_data_direction dma_dir , gfp_t gfp_flags )
2010-10-11 17:58:53 +04:00
{
struct vb2_vmalloc_buf * buf ;
2013-03-01 22:44:20 +04:00
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL | gfp_flags ) ;
2010-10-11 17:58:53 +04:00
if ( ! buf )
return NULL ;
buf - > size = size ;
buf - > vaddr = vmalloc_user ( buf - > size ) ;
2014-11-18 15:50:59 +03:00
buf - > dma_dir = dma_dir ;
2010-10-11 17:58:53 +04:00
buf - > handler . refcount = & buf - > refcount ;
buf - > handler . put = vb2_vmalloc_put ;
buf - > handler . arg = buf ;
if ( ! buf - > vaddr ) {
2011-10-13 14:30:51 +04:00
pr_debug ( " vmalloc of size %ld failed \n " , buf - > size ) ;
2010-10-11 17:58:53 +04:00
kfree ( buf ) ;
return NULL ;
}
atomic_inc ( & buf - > refcount ) ;
return buf ;
}
static void vb2_vmalloc_put ( void * buf_priv )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
if ( atomic_dec_and_test ( & buf - > refcount ) ) {
vfree ( buf - > vaddr ) ;
kfree ( buf ) ;
}
}
2011-10-13 14:30:51 +04:00
static void * vb2_vmalloc_get_userptr ( void * alloc_ctx , unsigned long vaddr ,
2014-11-18 15:50:58 +03:00
unsigned long size ,
enum dma_data_direction dma_dir )
2011-10-13 14:30:51 +04:00
{
struct vb2_vmalloc_buf * buf ;
2015-07-13 17:55:48 +03:00
struct frame_vector * vec ;
int n_pages , offset , i ;
2011-10-13 14:30:51 +04:00
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( ! buf )
return NULL ;
2014-11-18 15:50:58 +03:00
buf - > dma_dir = dma_dir ;
2011-10-13 14:30:51 +04:00
offset = vaddr & ~ PAGE_MASK ;
buf - > size = size ;
2015-07-13 17:55:48 +03:00
vec = vb2_create_framevec ( vaddr , size , dma_dir = = DMA_FROM_DEVICE ) ;
if ( IS_ERR ( vec ) )
goto fail_pfnvec_create ;
buf - > vec = vec ;
n_pages = frame_vector_count ( vec ) ;
if ( frame_vector_to_pages ( vec ) < 0 ) {
unsigned long * nums = frame_vector_pfns ( vec ) ;
/*
* We cannot get page pointers for these pfns . Check memory is
* physically contiguous and use direct mapping .
*/
for ( i = 1 ; i < n_pages ; i + + )
if ( nums [ i - 1 ] + 1 ! = nums [ i ] )
goto fail_map ;
buf - > vaddr = ( __force void * )
ioremap_nocache ( nums [ 0 ] < < PAGE_SHIFT , size ) ;
2012-02-16 19:19:08 +04:00
} else {
2015-07-13 17:55:48 +03:00
buf - > vaddr = vm_map_ram ( frame_vector_pages ( vec ) , n_pages , - 1 ,
2012-02-16 19:19:08 +04:00
PAGE_KERNEL ) ;
}
2011-10-13 14:30:51 +04:00
2015-07-13 17:55:48 +03:00
if ( ! buf - > vaddr )
goto fail_map ;
2011-10-13 14:30:51 +04:00
buf - > vaddr + = offset ;
return buf ;
2015-07-13 17:55:48 +03:00
fail_map :
vb2_destroy_framevec ( vec ) ;
fail_pfnvec_create :
2011-10-13 14:30:51 +04:00
kfree ( buf ) ;
return NULL ;
}
static void vb2_vmalloc_put_userptr ( void * buf_priv )
2010-10-11 17:58:53 +04:00
{
struct vb2_vmalloc_buf * buf = buf_priv ;
2011-10-13 14:30:51 +04:00
unsigned long vaddr = ( unsigned long ) buf - > vaddr & PAGE_MASK ;
unsigned int i ;
2015-07-13 17:55:48 +03:00
struct page * * pages ;
unsigned int n_pages ;
2011-10-13 14:30:51 +04:00
2015-07-13 17:55:48 +03:00
if ( ! buf - > vec - > is_pfns ) {
n_pages = frame_vector_count ( buf - > vec ) ;
pages = frame_vector_pages ( buf - > vec ) ;
2012-02-16 19:19:08 +04:00
if ( vaddr )
2015-07-13 17:55:48 +03:00
vm_unmap_ram ( ( void * ) vaddr , n_pages ) ;
if ( buf - > dma_dir = = DMA_FROM_DEVICE )
for ( i = 0 ; i < n_pages ; i + + )
set_page_dirty_lock ( pages [ i ] ) ;
2012-02-16 19:19:08 +04:00
} else {
2014-12-13 14:52:54 +03:00
iounmap ( ( __force void __iomem * ) buf - > vaddr ) ;
2011-10-13 14:30:51 +04:00
}
2015-07-13 17:55:48 +03:00
vb2_destroy_framevec ( buf - > vec ) ;
2011-10-13 14:30:51 +04:00
kfree ( buf ) ;
}
2010-10-11 17:58:53 +04:00
2011-10-13 14:30:51 +04:00
static void * vb2_vmalloc_vaddr ( void * buf_priv )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
2010-10-11 17:58:53 +04:00
if ( ! buf - > vaddr ) {
2011-10-13 14:30:51 +04:00
pr_err ( " Address of an unallocated plane requested "
" or cannot map user pointer \n " ) ;
2010-10-11 17:58:53 +04:00
return NULL ;
}
return buf - > vaddr ;
}
static unsigned int vb2_vmalloc_num_users ( void * buf_priv )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
return atomic_read ( & buf - > refcount ) ;
}
static int vb2_vmalloc_mmap ( void * buf_priv , struct vm_area_struct * vma )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
int ret ;
if ( ! buf ) {
2011-10-13 14:30:51 +04:00
pr_err ( " No memory to map \n " ) ;
2010-10-11 17:58:53 +04:00
return - EINVAL ;
}
ret = remap_vmalloc_range ( vma , buf - > vaddr , 0 ) ;
if ( ret ) {
2011-10-13 14:30:51 +04:00
pr_err ( " Remapping vmalloc memory, error: %d \n " , ret ) ;
2010-10-11 17:58:53 +04:00
return ret ;
}
/*
* Make sure that vm_areas for 2 buffers won ' t be merged together
*/
vma - > vm_flags | = VM_DONTEXPAND ;
/*
* Use common vm_area operations to track buffer refcount .
*/
vma - > vm_private_data = & buf - > handler ;
vma - > vm_ops = & vb2_common_vm_ops ;
vma - > vm_ops - > open ( vma ) ;
return 0 ;
}
2014-12-15 16:40:28 +03:00
# ifdef CONFIG_HAS_DMA
2014-11-18 15:51:05 +03:00
/*********************************************/
/* DMABUF ops for exporters */
/*********************************************/
struct vb2_vmalloc_attachment {
struct sg_table sgt ;
enum dma_data_direction dma_dir ;
} ;
static int vb2_vmalloc_dmabuf_ops_attach ( struct dma_buf * dbuf , struct device * dev ,
struct dma_buf_attachment * dbuf_attach )
{
struct vb2_vmalloc_attachment * attach ;
struct vb2_vmalloc_buf * buf = dbuf - > priv ;
int num_pages = PAGE_ALIGN ( buf - > size ) / PAGE_SIZE ;
struct sg_table * sgt ;
struct scatterlist * sg ;
void * vaddr = buf - > vaddr ;
int ret ;
int i ;
attach = kzalloc ( sizeof ( * attach ) , GFP_KERNEL ) ;
if ( ! attach )
return - ENOMEM ;
sgt = & attach - > sgt ;
ret = sg_alloc_table ( sgt , num_pages , GFP_KERNEL ) ;
if ( ret ) {
kfree ( attach ) ;
return ret ;
}
for_each_sg ( sgt - > sgl , sg , sgt - > nents , i ) {
struct page * page = vmalloc_to_page ( vaddr ) ;
if ( ! page ) {
sg_free_table ( sgt ) ;
kfree ( attach ) ;
return - ENOMEM ;
}
sg_set_page ( sg , page , PAGE_SIZE , 0 ) ;
vaddr + = PAGE_SIZE ;
}
attach - > dma_dir = DMA_NONE ;
dbuf_attach - > priv = attach ;
return 0 ;
}
static void vb2_vmalloc_dmabuf_ops_detach ( struct dma_buf * dbuf ,
struct dma_buf_attachment * db_attach )
{
struct vb2_vmalloc_attachment * attach = db_attach - > priv ;
struct sg_table * sgt ;
if ( ! attach )
return ;
sgt = & attach - > sgt ;
/* release the scatterlist cache */
if ( attach - > dma_dir ! = DMA_NONE )
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dma_dir ) ;
sg_free_table ( sgt ) ;
kfree ( attach ) ;
db_attach - > priv = NULL ;
}
static struct sg_table * vb2_vmalloc_dmabuf_ops_map (
struct dma_buf_attachment * db_attach , enum dma_data_direction dma_dir )
{
struct vb2_vmalloc_attachment * attach = db_attach - > priv ;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex * lock = & db_attach - > dmabuf - > lock ;
struct sg_table * sgt ;
mutex_lock ( lock ) ;
sgt = & attach - > sgt ;
/* return previously mapped sg table */
if ( attach - > dma_dir = = dma_dir ) {
mutex_unlock ( lock ) ;
return sgt ;
}
/* release any previous cache */
if ( attach - > dma_dir ! = DMA_NONE ) {
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dma_dir ) ;
attach - > dma_dir = DMA_NONE ;
}
/* mapping to the client with new direction */
2015-04-29 15:00:47 +03:00
sgt - > nents = dma_map_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
dma_dir ) ;
if ( ! sgt - > nents ) {
2014-11-18 15:51:05 +03:00
pr_err ( " failed to map scatterlist \n " ) ;
mutex_unlock ( lock ) ;
return ERR_PTR ( - EIO ) ;
}
attach - > dma_dir = dma_dir ;
mutex_unlock ( lock ) ;
return sgt ;
}
static void vb2_vmalloc_dmabuf_ops_unmap ( struct dma_buf_attachment * db_attach ,
struct sg_table * sgt , enum dma_data_direction dma_dir )
{
/* nothing to be done here */
}
static void vb2_vmalloc_dmabuf_ops_release ( struct dma_buf * dbuf )
{
/* drop reference obtained in vb2_vmalloc_get_dmabuf */
vb2_vmalloc_put ( dbuf - > priv ) ;
}
static void * vb2_vmalloc_dmabuf_ops_kmap ( struct dma_buf * dbuf , unsigned long pgnum )
{
struct vb2_vmalloc_buf * buf = dbuf - > priv ;
return buf - > vaddr + pgnum * PAGE_SIZE ;
}
static void * vb2_vmalloc_dmabuf_ops_vmap ( struct dma_buf * dbuf )
{
struct vb2_vmalloc_buf * buf = dbuf - > priv ;
return buf - > vaddr ;
}
static int vb2_vmalloc_dmabuf_ops_mmap ( struct dma_buf * dbuf ,
struct vm_area_struct * vma )
{
return vb2_vmalloc_mmap ( dbuf - > priv , vma ) ;
}
static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
. attach = vb2_vmalloc_dmabuf_ops_attach ,
. detach = vb2_vmalloc_dmabuf_ops_detach ,
. map_dma_buf = vb2_vmalloc_dmabuf_ops_map ,
. unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap ,
. kmap = vb2_vmalloc_dmabuf_ops_kmap ,
. kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap ,
. vmap = vb2_vmalloc_dmabuf_ops_vmap ,
. mmap = vb2_vmalloc_dmabuf_ops_mmap ,
. release = vb2_vmalloc_dmabuf_ops_release ,
} ;
static struct dma_buf * vb2_vmalloc_get_dmabuf ( void * buf_priv , unsigned long flags )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
struct dma_buf * dbuf ;
2015-01-23 10:23:43 +03:00
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
exp_info . ops = & vb2_vmalloc_dmabuf_ops ;
exp_info . size = buf - > size ;
exp_info . flags = flags ;
exp_info . priv = buf ;
2014-11-18 15:51:05 +03:00
if ( WARN_ON ( ! buf - > vaddr ) )
return NULL ;
2015-01-23 10:23:43 +03:00
dbuf = dma_buf_export ( & exp_info ) ;
2014-11-18 15:51:05 +03:00
if ( IS_ERR ( dbuf ) )
return NULL ;
/* dmabuf keeps reference to vb2 buffer */
atomic_inc ( & buf - > refcount ) ;
return dbuf ;
}
2014-12-15 16:40:28 +03:00
# endif /* CONFIG_HAS_DMA */
2014-11-18 15:51:05 +03:00
2012-06-14 17:37:46 +04:00
/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
static int vb2_vmalloc_map_dmabuf ( void * mem_priv )
{
struct vb2_vmalloc_buf * buf = mem_priv ;
buf - > vaddr = dma_buf_vmap ( buf - > dbuf ) ;
return buf - > vaddr ? 0 : - EFAULT ;
}
static void vb2_vmalloc_unmap_dmabuf ( void * mem_priv )
{
struct vb2_vmalloc_buf * buf = mem_priv ;
dma_buf_vunmap ( buf - > dbuf , buf - > vaddr ) ;
buf - > vaddr = NULL ;
}
static void vb2_vmalloc_detach_dmabuf ( void * mem_priv )
{
struct vb2_vmalloc_buf * buf = mem_priv ;
if ( buf - > vaddr )
dma_buf_vunmap ( buf - > dbuf , buf - > vaddr ) ;
kfree ( buf ) ;
}
static void * vb2_vmalloc_attach_dmabuf ( void * alloc_ctx , struct dma_buf * dbuf ,
2014-11-18 15:50:58 +03:00
unsigned long size , enum dma_data_direction dma_dir )
2012-06-14 17:37:46 +04:00
{
struct vb2_vmalloc_buf * buf ;
if ( dbuf - > size < size )
return ERR_PTR ( - EFAULT ) ;
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
buf - > dbuf = dbuf ;
2014-11-18 15:50:58 +03:00
buf - > dma_dir = dma_dir ;
2012-06-14 17:37:46 +04:00
buf - > size = size ;
return buf ;
}
2010-10-11 17:58:53 +04:00
const struct vb2_mem_ops vb2_vmalloc_memops = {
. alloc = vb2_vmalloc_alloc ,
. put = vb2_vmalloc_put ,
2011-10-13 14:30:51 +04:00
. get_userptr = vb2_vmalloc_get_userptr ,
. put_userptr = vb2_vmalloc_put_userptr ,
2014-12-15 16:40:28 +03:00
# ifdef CONFIG_HAS_DMA
2014-11-18 15:51:05 +03:00
. get_dmabuf = vb2_vmalloc_get_dmabuf ,
2014-12-15 16:40:28 +03:00
# endif
2012-06-14 17:37:46 +04:00
. map_dmabuf = vb2_vmalloc_map_dmabuf ,
. unmap_dmabuf = vb2_vmalloc_unmap_dmabuf ,
. attach_dmabuf = vb2_vmalloc_attach_dmabuf ,
. detach_dmabuf = vb2_vmalloc_detach_dmabuf ,
2010-10-11 17:58:53 +04:00
. vaddr = vb2_vmalloc_vaddr ,
. mmap = vb2_vmalloc_mmap ,
. num_users = vb2_vmalloc_num_users ,
} ;
EXPORT_SYMBOL_GPL ( vb2_vmalloc_memops ) ;
MODULE_DESCRIPTION ( " vmalloc memory handling routines for videobuf2 " ) ;
2011-03-13 21:23:32 +03:00
MODULE_AUTHOR ( " Pawel Osciak <pawel@osciak.com> " ) ;
2010-10-11 17:58:53 +04:00
MODULE_LICENSE ( " GPL " ) ;