2010-10-11 10:58:53 -03:00
/*
* videobuf2 - vmalloc . c - vmalloc memory allocator for videobuf2
*
* Copyright ( C ) 2010 Samsung Electronics
*
2011-03-13 15:23:32 -03:00
* Author : Pawel Osciak < pawel @ osciak . com >
2010-10-11 10:58:53 -03:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation .
*/
2012-02-16 12:19:08 -03:00
# include <linux/io.h>
2010-10-11 10:58:53 -03:00
# include <linux/module.h>
# include <linux/mm.h>
2017-03-06 11:21:00 -03:00
# include <linux/refcount.h>
2011-10-13 07:30:51 -03:00
# include <linux/sched.h>
2010-10-11 10:58:53 -03:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
2015-09-22 10:30:29 -03:00
# include <media/videobuf2-v4l2.h>
2012-08-03 07:23:54 -03:00
# include <media/videobuf2-vmalloc.h>
2010-10-11 10:58:53 -03:00
# include <media/videobuf2-memops.h>
struct vb2_vmalloc_buf {
void * vaddr ;
2015-07-13 11:55:48 -03:00
struct frame_vector * vec ;
2014-11-18 09:50:58 -03:00
enum dma_data_direction dma_dir ;
2010-10-11 10:58:53 -03:00
unsigned long size ;
2017-03-06 11:21:00 -03:00
refcount_t refcount ;
2010-10-11 10:58:53 -03:00
struct vb2_vmarea_handler handler ;
2012-06-14 10:37:46 -03:00
struct dma_buf * dbuf ;
2010-10-11 10:58:53 -03:00
} ;
static void vb2_vmalloc_put ( void * buf_priv ) ;
2016-08-03 13:46:00 -07:00
static void * vb2_vmalloc_alloc ( struct device * dev , unsigned long attrs ,
2016-04-15 09:15:05 -03:00
unsigned long size , enum dma_data_direction dma_dir ,
gfp_t gfp_flags )
2010-10-11 10:58:53 -03:00
{
struct vb2_vmalloc_buf * buf ;
2013-03-01 15:44:20 -03:00
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL | gfp_flags ) ;
2010-10-11 10:58:53 -03:00
if ( ! buf )
2016-07-21 09:14:02 -03:00
return ERR_PTR ( - ENOMEM ) ;
2010-10-11 10:58:53 -03:00
buf - > size = size ;
buf - > vaddr = vmalloc_user ( buf - > size ) ;
2014-11-18 09:50:59 -03:00
buf - > dma_dir = dma_dir ;
2010-10-11 10:58:53 -03:00
buf - > handler . refcount = & buf - > refcount ;
buf - > handler . put = vb2_vmalloc_put ;
buf - > handler . arg = buf ;
if ( ! buf - > vaddr ) {
2011-10-13 07:30:51 -03:00
pr_debug ( " vmalloc of size %ld failed \n " , buf - > size ) ;
2010-10-11 10:58:53 -03:00
kfree ( buf ) ;
2016-07-21 09:14:02 -03:00
return ERR_PTR ( - ENOMEM ) ;
2010-10-11 10:58:53 -03:00
}
2017-03-06 11:21:00 -03:00
refcount_set ( & buf - > refcount , 1 ) ;
2010-10-11 10:58:53 -03:00
return buf ;
}
static void vb2_vmalloc_put ( void * buf_priv )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
2017-03-06 11:21:00 -03:00
if ( refcount_dec_and_test ( & buf - > refcount ) ) {
2010-10-11 10:58:53 -03:00
vfree ( buf - > vaddr ) ;
kfree ( buf ) ;
}
}
2016-04-15 09:15:05 -03:00
static void * vb2_vmalloc_get_userptr ( struct device * dev , unsigned long vaddr ,
2014-11-18 09:50:58 -03:00
unsigned long size ,
enum dma_data_direction dma_dir )
2011-10-13 07:30:51 -03:00
{
struct vb2_vmalloc_buf * buf ;
2015-07-13 11:55:48 -03:00
struct frame_vector * vec ;
int n_pages , offset , i ;
2016-07-21 09:14:02 -03:00
int ret = - ENOMEM ;
2011-10-13 07:30:51 -03:00
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( ! buf )
2016-07-21 09:14:02 -03:00
return ERR_PTR ( - ENOMEM ) ;
2011-10-13 07:30:51 -03:00
2014-11-18 09:50:58 -03:00
buf - > dma_dir = dma_dir ;
2011-10-13 07:30:51 -03:00
offset = vaddr & ~ PAGE_MASK ;
buf - > size = size ;
2017-08-21 07:34:10 -04:00
vec = vb2_create_framevec ( vaddr , size , dma_dir = = DMA_FROM_DEVICE | |
dma_dir = = DMA_BIDIRECTIONAL ) ;
2016-07-21 09:14:02 -03:00
if ( IS_ERR ( vec ) ) {
ret = PTR_ERR ( vec ) ;
2015-07-13 11:55:48 -03:00
goto fail_pfnvec_create ;
2016-07-21 09:14:02 -03:00
}
2015-07-13 11:55:48 -03:00
buf - > vec = vec ;
n_pages = frame_vector_count ( vec ) ;
if ( frame_vector_to_pages ( vec ) < 0 ) {
unsigned long * nums = frame_vector_pfns ( vec ) ;
/*
* We cannot get page pointers for these pfns . Check memory is
* physically contiguous and use direct mapping .
*/
for ( i = 1 ; i < n_pages ; i + + )
if ( nums [ i - 1 ] + 1 ! = nums [ i ] )
goto fail_map ;
buf - > vaddr = ( __force void * )
2018-02-06 03:02:23 -05:00
ioremap_nocache ( __pfn_to_phys ( nums [ 0 ] ) , size + offset ) ;
2012-02-16 12:19:08 -03:00
} else {
2015-07-13 11:55:48 -03:00
buf - > vaddr = vm_map_ram ( frame_vector_pages ( vec ) , n_pages , - 1 ,
2012-02-16 12:19:08 -03:00
PAGE_KERNEL ) ;
}
2011-10-13 07:30:51 -03:00
2015-07-13 11:55:48 -03:00
if ( ! buf - > vaddr )
goto fail_map ;
2011-10-13 07:30:51 -03:00
buf - > vaddr + = offset ;
return buf ;
2015-07-13 11:55:48 -03:00
fail_map :
vb2_destroy_framevec ( vec ) ;
fail_pfnvec_create :
2011-10-13 07:30:51 -03:00
kfree ( buf ) ;
2016-07-21 09:14:02 -03:00
return ERR_PTR ( ret ) ;
2011-10-13 07:30:51 -03:00
}
static void vb2_vmalloc_put_userptr ( void * buf_priv )
2010-10-11 10:58:53 -03:00
{
struct vb2_vmalloc_buf * buf = buf_priv ;
2011-10-13 07:30:51 -03:00
unsigned long vaddr = ( unsigned long ) buf - > vaddr & PAGE_MASK ;
unsigned int i ;
2015-07-13 11:55:48 -03:00
struct page * * pages ;
unsigned int n_pages ;
2011-10-13 07:30:51 -03:00
2015-07-13 11:55:48 -03:00
if ( ! buf - > vec - > is_pfns ) {
n_pages = frame_vector_count ( buf - > vec ) ;
pages = frame_vector_pages ( buf - > vec ) ;
2012-02-16 12:19:08 -03:00
if ( vaddr )
2015-07-13 11:55:48 -03:00
vm_unmap_ram ( ( void * ) vaddr , n_pages ) ;
2017-08-21 07:34:10 -04:00
if ( buf - > dma_dir = = DMA_FROM_DEVICE | |
buf - > dma_dir = = DMA_BIDIRECTIONAL )
2015-07-13 11:55:48 -03:00
for ( i = 0 ; i < n_pages ; i + + )
set_page_dirty_lock ( pages [ i ] ) ;
2012-02-16 12:19:08 -03:00
} else {
2014-12-13 08:52:54 -03:00
iounmap ( ( __force void __iomem * ) buf - > vaddr ) ;
2011-10-13 07:30:51 -03:00
}
2015-07-13 11:55:48 -03:00
vb2_destroy_framevec ( buf - > vec ) ;
2011-10-13 07:30:51 -03:00
kfree ( buf ) ;
}
2010-10-11 10:58:53 -03:00
2011-10-13 07:30:51 -03:00
static void * vb2_vmalloc_vaddr ( void * buf_priv )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
2010-10-11 10:58:53 -03:00
if ( ! buf - > vaddr ) {
[media] v4l2-core: don't break long lines
Due to the 80-cols restrictions, and latter due to checkpatch
warnings, several strings were broken into multiple lines. This
is not considered a good practice anymore, as it makes harder
to grep for strings at the source code.
As we're right now fixing other drivers due to KERN_CONT, we need
to be able to identify what printk strings don't end with a "\n".
It is a way easier to detect those if we don't break long lines.
So, join those continuation lines.
The patch was generated via the script below, and manually
adjusted if needed.
</script>
use Text::Tabs;
while (<>) {
if ($next ne "") {
$c=$_;
if ($c =~ /^\s+\"(.*)/) {
$c2=$1;
$next =~ s/\"\n$//;
$n = expand($next);
$funpos = index($n, '(');
$pos = index($c2, '",');
if ($funpos && $pos > 0) {
$s1 = substr $c2, 0, $pos + 2;
$s2 = ' ' x ($funpos + 1) . substr $c2, $pos + 2;
$s2 =~ s/^\s+//;
$s2 = ' ' x ($funpos + 1) . $s2 if ($s2 ne "");
print unexpand("$next$s1\n");
print unexpand("$s2\n") if ($s2 ne "");
} else {
print "$next$c2\n";
}
$next="";
next;
} else {
print $next;
}
$next="";
} else {
if (m/\"$/) {
if (!m/\\n\"$/) {
$next=$_;
next;
}
}
}
print $_;
}
</script>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2016-10-18 17:44:22 -02:00
pr_err ( " Address of an unallocated plane requested or cannot map user pointer \n " ) ;
2010-10-11 10:58:53 -03:00
return NULL ;
}
return buf - > vaddr ;
}
static unsigned int vb2_vmalloc_num_users ( void * buf_priv )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
2017-03-06 11:21:00 -03:00
return refcount_read ( & buf - > refcount ) ;
2010-10-11 10:58:53 -03:00
}
static int vb2_vmalloc_mmap ( void * buf_priv , struct vm_area_struct * vma )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
int ret ;
if ( ! buf ) {
2011-10-13 07:30:51 -03:00
pr_err ( " No memory to map \n " ) ;
2010-10-11 10:58:53 -03:00
return - EINVAL ;
}
ret = remap_vmalloc_range ( vma , buf - > vaddr , 0 ) ;
if ( ret ) {
2011-10-13 07:30:51 -03:00
pr_err ( " Remapping vmalloc memory, error: %d \n " , ret ) ;
2010-10-11 10:58:53 -03:00
return ret ;
}
/*
* Make sure that vm_areas for 2 buffers won ' t be merged together
*/
vma - > vm_flags | = VM_DONTEXPAND ;
/*
* Use common vm_area operations to track buffer refcount .
*/
vma - > vm_private_data = & buf - > handler ;
vma - > vm_ops = & vb2_common_vm_ops ;
vma - > vm_ops - > open ( vma ) ;
return 0 ;
}
2014-12-15 10:40:28 -03:00
# ifdef CONFIG_HAS_DMA
2014-11-18 09:51:05 -03:00
/*********************************************/
/* DMABUF ops for exporters */
/*********************************************/
struct vb2_vmalloc_attachment {
struct sg_table sgt ;
enum dma_data_direction dma_dir ;
} ;
static int vb2_vmalloc_dmabuf_ops_attach ( struct dma_buf * dbuf , struct device * dev ,
struct dma_buf_attachment * dbuf_attach )
{
struct vb2_vmalloc_attachment * attach ;
struct vb2_vmalloc_buf * buf = dbuf - > priv ;
int num_pages = PAGE_ALIGN ( buf - > size ) / PAGE_SIZE ;
struct sg_table * sgt ;
struct scatterlist * sg ;
void * vaddr = buf - > vaddr ;
int ret ;
int i ;
attach = kzalloc ( sizeof ( * attach ) , GFP_KERNEL ) ;
if ( ! attach )
return - ENOMEM ;
sgt = & attach - > sgt ;
ret = sg_alloc_table ( sgt , num_pages , GFP_KERNEL ) ;
if ( ret ) {
kfree ( attach ) ;
return ret ;
}
for_each_sg ( sgt - > sgl , sg , sgt - > nents , i ) {
struct page * page = vmalloc_to_page ( vaddr ) ;
if ( ! page ) {
sg_free_table ( sgt ) ;
kfree ( attach ) ;
return - ENOMEM ;
}
sg_set_page ( sg , page , PAGE_SIZE , 0 ) ;
vaddr + = PAGE_SIZE ;
}
attach - > dma_dir = DMA_NONE ;
dbuf_attach - > priv = attach ;
return 0 ;
}
static void vb2_vmalloc_dmabuf_ops_detach ( struct dma_buf * dbuf ,
struct dma_buf_attachment * db_attach )
{
struct vb2_vmalloc_attachment * attach = db_attach - > priv ;
struct sg_table * sgt ;
if ( ! attach )
return ;
sgt = & attach - > sgt ;
/* release the scatterlist cache */
if ( attach - > dma_dir ! = DMA_NONE )
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dma_dir ) ;
sg_free_table ( sgt ) ;
kfree ( attach ) ;
db_attach - > priv = NULL ;
}
static struct sg_table * vb2_vmalloc_dmabuf_ops_map (
struct dma_buf_attachment * db_attach , enum dma_data_direction dma_dir )
{
struct vb2_vmalloc_attachment * attach = db_attach - > priv ;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex * lock = & db_attach - > dmabuf - > lock ;
struct sg_table * sgt ;
mutex_lock ( lock ) ;
sgt = & attach - > sgt ;
/* return previously mapped sg table */
if ( attach - > dma_dir = = dma_dir ) {
mutex_unlock ( lock ) ;
return sgt ;
}
/* release any previous cache */
if ( attach - > dma_dir ! = DMA_NONE ) {
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dma_dir ) ;
attach - > dma_dir = DMA_NONE ;
}
/* mapping to the client with new direction */
2015-04-29 09:00:47 -03:00
sgt - > nents = dma_map_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
dma_dir ) ;
if ( ! sgt - > nents ) {
2014-11-18 09:51:05 -03:00
pr_err ( " failed to map scatterlist \n " ) ;
mutex_unlock ( lock ) ;
return ERR_PTR ( - EIO ) ;
}
attach - > dma_dir = dma_dir ;
mutex_unlock ( lock ) ;
return sgt ;
}
static void vb2_vmalloc_dmabuf_ops_unmap ( struct dma_buf_attachment * db_attach ,
struct sg_table * sgt , enum dma_data_direction dma_dir )
{
/* nothing to be done here */
}
static void vb2_vmalloc_dmabuf_ops_release ( struct dma_buf * dbuf )
{
/* drop reference obtained in vb2_vmalloc_get_dmabuf */
vb2_vmalloc_put ( dbuf - > priv ) ;
}
static void * vb2_vmalloc_dmabuf_ops_kmap ( struct dma_buf * dbuf , unsigned long pgnum )
{
struct vb2_vmalloc_buf * buf = dbuf - > priv ;
return buf - > vaddr + pgnum * PAGE_SIZE ;
}
static void * vb2_vmalloc_dmabuf_ops_vmap ( struct dma_buf * dbuf )
{
struct vb2_vmalloc_buf * buf = dbuf - > priv ;
return buf - > vaddr ;
}
static int vb2_vmalloc_dmabuf_ops_mmap ( struct dma_buf * dbuf ,
struct vm_area_struct * vma )
{
return vb2_vmalloc_mmap ( dbuf - > priv , vma ) ;
}
2017-07-01 07:37:26 -04:00
static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
2014-11-18 09:51:05 -03:00
. attach = vb2_vmalloc_dmabuf_ops_attach ,
. detach = vb2_vmalloc_dmabuf_ops_detach ,
. map_dma_buf = vb2_vmalloc_dmabuf_ops_map ,
. unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap ,
2017-04-19 13:36:10 -06:00
. map = vb2_vmalloc_dmabuf_ops_kmap ,
. map_atomic = vb2_vmalloc_dmabuf_ops_kmap ,
2014-11-18 09:51:05 -03:00
. vmap = vb2_vmalloc_dmabuf_ops_vmap ,
. mmap = vb2_vmalloc_dmabuf_ops_mmap ,
. release = vb2_vmalloc_dmabuf_ops_release ,
} ;
static struct dma_buf * vb2_vmalloc_get_dmabuf ( void * buf_priv , unsigned long flags )
{
struct vb2_vmalloc_buf * buf = buf_priv ;
struct dma_buf * dbuf ;
2015-01-23 12:53:43 +05:30
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
exp_info . ops = & vb2_vmalloc_dmabuf_ops ;
exp_info . size = buf - > size ;
exp_info . flags = flags ;
exp_info . priv = buf ;
2014-11-18 09:51:05 -03:00
if ( WARN_ON ( ! buf - > vaddr ) )
return NULL ;
2015-01-23 12:53:43 +05:30
dbuf = dma_buf_export ( & exp_info ) ;
2014-11-18 09:51:05 -03:00
if ( IS_ERR ( dbuf ) )
return NULL ;
/* dmabuf keeps reference to vb2 buffer */
2017-03-06 11:21:00 -03:00
refcount_inc ( & buf - > refcount ) ;
2014-11-18 09:51:05 -03:00
return dbuf ;
}
2014-12-15 10:40:28 -03:00
# endif /* CONFIG_HAS_DMA */
2014-11-18 09:51:05 -03:00
2012-06-14 10:37:46 -03:00
/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
static int vb2_vmalloc_map_dmabuf ( void * mem_priv )
{
struct vb2_vmalloc_buf * buf = mem_priv ;
buf - > vaddr = dma_buf_vmap ( buf - > dbuf ) ;
return buf - > vaddr ? 0 : - EFAULT ;
}
static void vb2_vmalloc_unmap_dmabuf ( void * mem_priv )
{
struct vb2_vmalloc_buf * buf = mem_priv ;
dma_buf_vunmap ( buf - > dbuf , buf - > vaddr ) ;
buf - > vaddr = NULL ;
}
static void vb2_vmalloc_detach_dmabuf ( void * mem_priv )
{
struct vb2_vmalloc_buf * buf = mem_priv ;
if ( buf - > vaddr )
dma_buf_vunmap ( buf - > dbuf , buf - > vaddr ) ;
kfree ( buf ) ;
}
2016-04-15 09:15:05 -03:00
static void * vb2_vmalloc_attach_dmabuf ( struct device * dev , struct dma_buf * dbuf ,
2014-11-18 09:50:58 -03:00
unsigned long size , enum dma_data_direction dma_dir )
2012-06-14 10:37:46 -03:00
{
struct vb2_vmalloc_buf * buf ;
if ( dbuf - > size < size )
return ERR_PTR ( - EFAULT ) ;
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
buf - > dbuf = dbuf ;
2014-11-18 09:50:58 -03:00
buf - > dma_dir = dma_dir ;
2012-06-14 10:37:46 -03:00
buf - > size = size ;
return buf ;
}
2010-10-11 10:58:53 -03:00
const struct vb2_mem_ops vb2_vmalloc_memops = {
. alloc = vb2_vmalloc_alloc ,
. put = vb2_vmalloc_put ,
2011-10-13 07:30:51 -03:00
. get_userptr = vb2_vmalloc_get_userptr ,
. put_userptr = vb2_vmalloc_put_userptr ,
2014-12-15 10:40:28 -03:00
# ifdef CONFIG_HAS_DMA
2014-11-18 09:51:05 -03:00
. get_dmabuf = vb2_vmalloc_get_dmabuf ,
2014-12-15 10:40:28 -03:00
# endif
2012-06-14 10:37:46 -03:00
. map_dmabuf = vb2_vmalloc_map_dmabuf ,
. unmap_dmabuf = vb2_vmalloc_unmap_dmabuf ,
. attach_dmabuf = vb2_vmalloc_attach_dmabuf ,
. detach_dmabuf = vb2_vmalloc_detach_dmabuf ,
2010-10-11 10:58:53 -03:00
. vaddr = vb2_vmalloc_vaddr ,
. mmap = vb2_vmalloc_mmap ,
. num_users = vb2_vmalloc_num_users ,
} ;
EXPORT_SYMBOL_GPL ( vb2_vmalloc_memops ) ;
MODULE_DESCRIPTION ( " vmalloc memory handling routines for videobuf2 " ) ;
2011-03-13 15:23:32 -03:00
MODULE_AUTHOR ( " Pawel Osciak <pawel@osciak.com> " ) ;
2010-10-11 10:58:53 -03:00
MODULE_LICENSE ( " GPL " ) ;