2010-10-11 17:59:36 +04:00
/*
* videobuf2 - dma - contig . c - DMA contig memory allocator for videobuf2
*
* Copyright ( C ) 2010 Samsung Electronics
*
2011-03-13 21:23:32 +03:00
* Author : Pawel Osciak < pawel @ osciak . com >
2010-10-11 17:59:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation .
*/
2012-06-14 17:37:45 +04:00
# include <linux/dma-buf.h>
2010-10-11 17:59:36 +04:00
# include <linux/module.h>
2012-06-14 17:37:42 +04:00
# include <linux/scatterlist.h>
# include <linux/sched.h>
2010-10-11 17:59:36 +04:00
# include <linux/slab.h>
# include <linux/dma-mapping.h>
# include <media/videobuf2-core.h>
2012-04-25 02:08:12 +04:00
# include <media/videobuf2-dma-contig.h>
2010-10-11 17:59:36 +04:00
# include <media/videobuf2-memops.h>
struct vb2_dc_conf {
struct device * dev ;
} ;
struct vb2_dc_buf {
2012-06-14 17:37:40 +04:00
struct device * dev ;
2010-10-11 17:59:36 +04:00
void * vaddr ;
unsigned long size ;
2012-06-14 17:37:41 +04:00
dma_addr_t dma_addr ;
2012-06-14 17:37:42 +04:00
enum dma_data_direction dma_dir ;
struct sg_table * dma_sgt ;
2012-06-14 17:37:41 +04:00
/* MMAP related */
2010-10-11 17:59:36 +04:00
struct vb2_vmarea_handler handler ;
2012-06-14 17:37:41 +04:00
atomic_t refcount ;
2012-06-14 18:32:25 +04:00
struct sg_table * sgt_base ;
2012-06-14 17:37:41 +04:00
/* USERPTR related */
struct vm_area_struct * vma ;
2012-06-14 17:37:45 +04:00
/* DMABUF related */
struct dma_buf_attachment * db_attach ;
2010-10-11 17:59:36 +04:00
} ;
2012-06-14 17:37:42 +04:00
/*********************************************/
/* scatterlist table functions */
/*********************************************/
static void vb2_dc_sgt_foreach_page ( struct sg_table * sgt ,
void ( * cb ) ( struct page * pg ) )
{
struct scatterlist * s ;
unsigned int i ;
for_each_sg ( sgt - > sgl , s , sgt - > orig_nents , i ) {
struct page * page = sg_page ( s ) ;
unsigned int n_pages = PAGE_ALIGN ( s - > offset + s - > length )
> > PAGE_SHIFT ;
unsigned int j ;
for ( j = 0 ; j < n_pages ; + + j , + + page )
cb ( page ) ;
}
}
static unsigned long vb2_dc_get_contiguous_size ( struct sg_table * sgt )
{
struct scatterlist * s ;
dma_addr_t expected = sg_dma_address ( sgt - > sgl ) ;
unsigned int i ;
unsigned long size = 0 ;
for_each_sg ( sgt - > sgl , s , sgt - > nents , i ) {
if ( sg_dma_address ( s ) ! = expected )
break ;
expected = sg_dma_address ( s ) + sg_dma_len ( s ) ;
size + = sg_dma_len ( s ) ;
}
return size ;
}
2012-06-14 17:37:41 +04:00
/*********************************************/
/* callbacks for all buffers */
/*********************************************/
static void * vb2_dc_cookie ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
return & buf - > dma_addr ;
}
static void * vb2_dc_vaddr ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
return buf - > vaddr ;
}
static unsigned int vb2_dc_num_users ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
return atomic_read ( & buf - > refcount ) ;
}
2012-06-14 17:37:44 +04:00
static void vb2_dc_prepare ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
2012-06-14 17:37:45 +04:00
/* DMABUF exporter will flush the cache for us */
if ( ! sgt | | buf - > db_attach )
2012-06-14 17:37:44 +04:00
return ;
dma_sync_sg_for_device ( buf - > dev , sgt - > sgl , sgt - > nents , buf - > dma_dir ) ;
}
static void vb2_dc_finish ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
2012-06-14 17:37:45 +04:00
/* DMABUF exporter will flush the cache for us */
if ( ! sgt | | buf - > db_attach )
2012-06-14 17:37:44 +04:00
return ;
dma_sync_sg_for_cpu ( buf - > dev , sgt - > sgl , sgt - > nents , buf - > dma_dir ) ;
}
2012-06-14 17:37:41 +04:00
/*********************************************/
/* callbacks for MMAP buffers */
/*********************************************/
static void vb2_dc_put ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
if ( ! atomic_dec_and_test ( & buf - > refcount ) )
return ;
2012-06-14 18:32:25 +04:00
if ( buf - > sgt_base ) {
sg_free_table ( buf - > sgt_base ) ;
kfree ( buf - > sgt_base ) ;
}
2012-06-14 17:37:41 +04:00
dma_free_coherent ( buf - > dev , buf - > size , buf - > vaddr , buf - > dma_addr ) ;
2012-08-07 20:19:49 +04:00
put_device ( buf - > dev ) ;
2012-06-14 17:37:41 +04:00
kfree ( buf ) ;
}
2010-10-11 17:59:36 +04:00
2013-03-01 22:44:20 +04:00
static void * vb2_dc_alloc ( void * alloc_ctx , unsigned long size , gfp_t gfp_flags )
2010-10-11 17:59:36 +04:00
{
struct vb2_dc_conf * conf = alloc_ctx ;
2012-06-14 17:37:40 +04:00
struct device * dev = conf - > dev ;
2010-10-11 17:59:36 +04:00
struct vb2_dc_buf * buf ;
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
2013-03-01 22:44:20 +04:00
buf - > vaddr = dma_alloc_coherent ( dev , size , & buf - > dma_addr ,
GFP_KERNEL | gfp_flags ) ;
2010-10-11 17:59:36 +04:00
if ( ! buf - > vaddr ) {
2012-06-14 17:37:40 +04:00
dev_err ( dev , " dma_alloc_coherent of size %ld failed \n " , size ) ;
2010-10-11 17:59:36 +04:00
kfree ( buf ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2012-08-07 20:19:49 +04:00
/* Prevent the device from being released while the buffer is used */
buf - > dev = get_device ( dev ) ;
2010-10-11 17:59:36 +04:00
buf - > size = size ;
buf - > handler . refcount = & buf - > refcount ;
2012-06-14 17:37:39 +04:00
buf - > handler . put = vb2_dc_put ;
2010-10-11 17:59:36 +04:00
buf - > handler . arg = buf ;
atomic_inc ( & buf - > refcount ) ;
return buf ;
}
2012-06-14 17:37:39 +04:00
static int vb2_dc_mmap ( void * buf_priv , struct vm_area_struct * vma )
2010-10-11 17:59:36 +04:00
{
struct vb2_dc_buf * buf = buf_priv ;
2012-06-14 18:32:21 +04:00
int ret ;
2010-10-11 17:59:36 +04:00
if ( ! buf ) {
printk ( KERN_ERR " No buffer to map \n " ) ;
return - EINVAL ;
}
2012-06-14 18:32:21 +04:00
/*
* dma_mmap_ * uses vm_pgoff as in - buffer offset , but we want to
* map whole buffer
*/
vma - > vm_pgoff = 0 ;
ret = dma_mmap_coherent ( buf - > dev , vma , buf - > vaddr ,
buf - > dma_addr , buf - > size ) ;
if ( ret ) {
pr_err ( " Remapping memory failed, error: %d \n " , ret ) ;
return ret ;
}
vma - > vm_flags | = VM_DONTEXPAND | VM_DONTDUMP ;
vma - > vm_private_data = & buf - > handler ;
vma - > vm_ops = & vb2_common_vm_ops ;
vma - > vm_ops - > open ( vma ) ;
pr_debug ( " %s: mapped dma addr 0x%08lx at 0x%08lx, size %ld \n " ,
__func__ , ( unsigned long ) buf - > dma_addr , vma - > vm_start ,
buf - > size ) ;
return 0 ;
2010-10-11 17:59:36 +04:00
}
2012-06-14 18:32:25 +04:00
/*********************************************/
/* DMABUF ops for exporters */
/*********************************************/
struct vb2_dc_attachment {
struct sg_table sgt ;
enum dma_data_direction dir ;
} ;
static int vb2_dc_dmabuf_ops_attach ( struct dma_buf * dbuf , struct device * dev ,
struct dma_buf_attachment * dbuf_attach )
{
struct vb2_dc_attachment * attach ;
unsigned int i ;
struct scatterlist * rd , * wr ;
struct sg_table * sgt ;
struct vb2_dc_buf * buf = dbuf - > priv ;
int ret ;
attach = kzalloc ( sizeof ( * attach ) , GFP_KERNEL ) ;
if ( ! attach )
return - ENOMEM ;
sgt = & attach - > sgt ;
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
* map the same scatter list to multiple attachments at the same time .
*/
ret = sg_alloc_table ( sgt , buf - > sgt_base - > orig_nents , GFP_KERNEL ) ;
if ( ret ) {
kfree ( attach ) ;
return - ENOMEM ;
}
rd = buf - > sgt_base - > sgl ;
wr = sgt - > sgl ;
for ( i = 0 ; i < sgt - > orig_nents ; + + i ) {
sg_set_page ( wr , sg_page ( rd ) , rd - > length , rd - > offset ) ;
rd = sg_next ( rd ) ;
wr = sg_next ( wr ) ;
}
attach - > dir = DMA_NONE ;
dbuf_attach - > priv = attach ;
return 0 ;
}
static void vb2_dc_dmabuf_ops_detach ( struct dma_buf * dbuf ,
struct dma_buf_attachment * db_attach )
{
struct vb2_dc_attachment * attach = db_attach - > priv ;
struct sg_table * sgt ;
if ( ! attach )
return ;
sgt = & attach - > sgt ;
/* release the scatterlist cache */
if ( attach - > dir ! = DMA_NONE )
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dir ) ;
sg_free_table ( sgt ) ;
kfree ( attach ) ;
db_attach - > priv = NULL ;
}
static struct sg_table * vb2_dc_dmabuf_ops_map (
struct dma_buf_attachment * db_attach , enum dma_data_direction dir )
{
struct vb2_dc_attachment * attach = db_attach - > priv ;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex * lock = & db_attach - > dmabuf - > lock ;
struct sg_table * sgt ;
int ret ;
mutex_lock ( lock ) ;
sgt = & attach - > sgt ;
/* return previously mapped sg table */
if ( attach - > dir = = dir ) {
mutex_unlock ( lock ) ;
return sgt ;
}
/* release any previous cache */
if ( attach - > dir ! = DMA_NONE ) {
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
attach - > dir ) ;
attach - > dir = DMA_NONE ;
}
/* mapping to the client with new direction */
ret = dma_map_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents , dir ) ;
if ( ret < = 0 ) {
pr_err ( " failed to map scatterlist \n " ) ;
mutex_unlock ( lock ) ;
return ERR_PTR ( - EIO ) ;
}
attach - > dir = dir ;
mutex_unlock ( lock ) ;
return sgt ;
}
static void vb2_dc_dmabuf_ops_unmap ( struct dma_buf_attachment * db_attach ,
struct sg_table * sgt , enum dma_data_direction dir )
{
/* nothing to be done here */
}
static void vb2_dc_dmabuf_ops_release ( struct dma_buf * dbuf )
{
/* drop reference obtained in vb2_dc_get_dmabuf */
vb2_dc_put ( dbuf - > priv ) ;
}
static void * vb2_dc_dmabuf_ops_kmap ( struct dma_buf * dbuf , unsigned long pgnum )
{
struct vb2_dc_buf * buf = dbuf - > priv ;
return buf - > vaddr + pgnum * PAGE_SIZE ;
}
static void * vb2_dc_dmabuf_ops_vmap ( struct dma_buf * dbuf )
{
struct vb2_dc_buf * buf = dbuf - > priv ;
return buf - > vaddr ;
}
static int vb2_dc_dmabuf_ops_mmap ( struct dma_buf * dbuf ,
struct vm_area_struct * vma )
{
return vb2_dc_mmap ( dbuf - > priv , vma ) ;
}
static struct dma_buf_ops vb2_dc_dmabuf_ops = {
. attach = vb2_dc_dmabuf_ops_attach ,
. detach = vb2_dc_dmabuf_ops_detach ,
. map_dma_buf = vb2_dc_dmabuf_ops_map ,
. unmap_dma_buf = vb2_dc_dmabuf_ops_unmap ,
. kmap = vb2_dc_dmabuf_ops_kmap ,
. kmap_atomic = vb2_dc_dmabuf_ops_kmap ,
. vmap = vb2_dc_dmabuf_ops_vmap ,
. mmap = vb2_dc_dmabuf_ops_mmap ,
. release = vb2_dc_dmabuf_ops_release ,
} ;
static struct sg_table * vb2_dc_get_base_sgt ( struct vb2_dc_buf * buf )
{
int ret ;
struct sg_table * sgt ;
sgt = kmalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt ) {
dev_err ( buf - > dev , " failed to alloc sg table \n " ) ;
return NULL ;
}
ret = dma_get_sgtable ( buf - > dev , sgt , buf - > vaddr , buf - > dma_addr ,
buf - > size ) ;
if ( ret < 0 ) {
dev_err ( buf - > dev , " failed to get scatterlist from DMA API \n " ) ;
kfree ( sgt ) ;
return NULL ;
}
return sgt ;
}
2013-05-21 12:11:35 +04:00
static struct dma_buf * vb2_dc_get_dmabuf ( void * buf_priv , unsigned long flags )
2012-06-14 18:32:25 +04:00
{
struct vb2_dc_buf * buf = buf_priv ;
struct dma_buf * dbuf ;
if ( ! buf - > sgt_base )
buf - > sgt_base = vb2_dc_get_base_sgt ( buf ) ;
if ( WARN_ON ( ! buf - > sgt_base ) )
return NULL ;
2013-05-21 12:11:35 +04:00
dbuf = dma_buf_export ( buf , & vb2_dc_dmabuf_ops , buf - > size , flags ) ;
2012-06-14 18:32:25 +04:00
if ( IS_ERR ( dbuf ) )
return NULL ;
/* dmabuf keeps reference to vb2 buffer */
atomic_inc ( & buf - > refcount ) ;
return dbuf ;
}
2012-06-14 17:37:41 +04:00
/*********************************************/
/* callbacks for USERPTR buffers */
/*********************************************/
2012-06-14 17:37:42 +04:00
static inline int vma_is_io ( struct vm_area_struct * vma )
{
return ! ! ( vma - > vm_flags & ( VM_IO | VM_PFNMAP ) ) ;
}
2013-06-19 15:56:46 +04:00
static int vb2_dc_get_user_pfn ( unsigned long start , int n_pages ,
struct vm_area_struct * vma , unsigned long * res )
{
unsigned long pfn , start_pfn , prev_pfn ;
unsigned int i ;
int ret ;
if ( ! vma_is_io ( vma ) )
return - EFAULT ;
ret = follow_pfn ( vma , start , & pfn ) ;
if ( ret )
return ret ;
start_pfn = pfn ;
start + = PAGE_SIZE ;
for ( i = 1 ; i < n_pages ; + + i , start + = PAGE_SIZE ) {
prev_pfn = pfn ;
ret = follow_pfn ( vma , start , & pfn ) ;
if ( ret ) {
pr_err ( " no page for address %lu \n " , start ) ;
return ret ;
}
if ( pfn ! = prev_pfn + 1 )
return - EINVAL ;
}
* res = start_pfn ;
return 0 ;
}
2012-06-14 17:37:42 +04:00
static int vb2_dc_get_user_pages ( unsigned long start , struct page * * pages ,
int n_pages , struct vm_area_struct * vma , int write )
{
if ( vma_is_io ( vma ) ) {
unsigned int i ;
for ( i = 0 ; i < n_pages ; + + i , start + = PAGE_SIZE ) {
unsigned long pfn ;
int ret = follow_pfn ( vma , start , & pfn ) ;
2013-06-19 15:56:46 +04:00
if ( ! pfn_valid ( pfn ) )
return - EINVAL ;
2012-06-14 17:37:42 +04:00
if ( ret ) {
pr_err ( " no page for address %lu \n " , start ) ;
return ret ;
}
pages [ i ] = pfn_to_page ( pfn ) ;
}
} else {
int n ;
n = get_user_pages ( current , current - > mm , start & PAGE_MASK ,
n_pages , write , 1 , pages , NULL ) ;
/* negative error means that no page was pinned */
n = max ( n , 0 ) ;
if ( n ! = n_pages ) {
pr_err ( " got only %d of %d user pages \n " , n , n_pages ) ;
while ( n )
put_page ( pages [ - - n ] ) ;
return - EFAULT ;
}
}
return 0 ;
}
static void vb2_dc_put_dirty_page ( struct page * page )
{
set_page_dirty_lock ( page ) ;
put_page ( page ) ;
}
static void vb2_dc_put_userptr ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
2013-06-19 15:56:46 +04:00
if ( sgt ) {
dma_unmap_sg ( buf - > dev , sgt - > sgl , sgt - > orig_nents , buf - > dma_dir ) ;
if ( ! vma_is_io ( buf - > vma ) )
vb2_dc_sgt_foreach_page ( sgt , vb2_dc_put_dirty_page ) ;
2012-06-14 17:37:42 +04:00
2013-06-19 15:56:46 +04:00
sg_free_table ( sgt ) ;
kfree ( sgt ) ;
}
2012-06-14 17:37:42 +04:00
vb2_put_vma ( buf - > vma ) ;
kfree ( buf ) ;
}
2013-06-19 15:56:46 +04:00
/*
* For some kind of reserved memory there might be no struct page available ,
* so all that can be done to support such ' pages ' is to try to convert
* pfn to dma address or at the last resort just assume that
* dma address = = physical address ( like it has been assumed in earlier version
* of videobuf2 - dma - contig
*/
# ifdef __arch_pfn_to_dma
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
return ( dma_addr_t ) __arch_pfn_to_dma ( dev , pfn ) ;
}
# elif defined(__pfn_to_bus)
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
return ( dma_addr_t ) __pfn_to_bus ( pfn ) ;
}
# elif defined(__pfn_to_phys)
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
return ( dma_addr_t ) __pfn_to_phys ( pfn ) ;
}
# else
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
/* really, we cannot do anything better at this point */
return ( dma_addr_t ) ( pfn ) < < PAGE_SHIFT ;
}
# endif
2012-06-14 17:37:39 +04:00
static void * vb2_dc_get_userptr ( void * alloc_ctx , unsigned long vaddr ,
2012-06-14 17:37:42 +04:00
unsigned long size , int write )
2010-10-11 17:59:36 +04:00
{
2012-06-14 17:37:42 +04:00
struct vb2_dc_conf * conf = alloc_ctx ;
2010-10-11 17:59:36 +04:00
struct vb2_dc_buf * buf ;
2012-06-14 17:37:42 +04:00
unsigned long start ;
unsigned long end ;
unsigned long offset ;
struct page * * pages ;
int n_pages ;
int ret = 0 ;
2010-10-11 17:59:36 +04:00
struct vm_area_struct * vma ;
2012-06-14 17:37:42 +04:00
struct sg_table * sgt ;
unsigned long contig_size ;
2012-06-12 17:18:16 +04:00
unsigned long dma_align = dma_get_cache_alignment ( ) ;
/* Only cache aligned DMA transfers are reliable */
if ( ! IS_ALIGNED ( vaddr | size , dma_align ) ) {
pr_debug ( " user data must be aligned to %lu bytes \n " , dma_align ) ;
return ERR_PTR ( - EINVAL ) ;
}
if ( ! size ) {
pr_debug ( " size is zero \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
2010-10-11 17:59:36 +04:00
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
2012-06-14 17:37:42 +04:00
buf - > dev = conf - > dev ;
buf - > dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE ;
start = vaddr & PAGE_MASK ;
offset = vaddr & ~ PAGE_MASK ;
end = PAGE_ALIGN ( vaddr + size ) ;
n_pages = ( end - start ) > > PAGE_SHIFT ;
pages = kmalloc ( n_pages * sizeof ( pages [ 0 ] ) , GFP_KERNEL ) ;
if ( ! pages ) {
ret = - ENOMEM ;
pr_err ( " failed to allocate pages table \n " ) ;
goto fail_buf ;
}
/* current->mm->mmap_sem is taken by videobuf2 core */
vma = find_vma ( current - > mm , vaddr ) ;
if ( ! vma ) {
pr_err ( " no vma for address %lu \n " , vaddr ) ;
ret = - EFAULT ;
goto fail_pages ;
}
if ( vma - > vm_end < vaddr + size ) {
pr_err ( " vma at %lu is too small for %lu bytes \n " , vaddr , size ) ;
ret = - EFAULT ;
goto fail_pages ;
}
buf - > vma = vb2_get_vma ( vma ) ;
if ( ! buf - > vma ) {
pr_err ( " failed to copy vma \n " ) ;
ret = - ENOMEM ;
goto fail_pages ;
}
/* extract page list from userspace mapping */
ret = vb2_dc_get_user_pages ( start , pages , n_pages , vma , write ) ;
2010-10-11 17:59:36 +04:00
if ( ret ) {
2013-06-19 15:56:46 +04:00
unsigned long pfn ;
if ( vb2_dc_get_user_pfn ( start , n_pages , vma , & pfn ) = = 0 ) {
buf - > dma_addr = vb2_dc_pfn_to_dma ( buf - > dev , pfn ) ;
buf - > size = size ;
kfree ( pages ) ;
return buf ;
}
2012-06-14 17:37:42 +04:00
pr_err ( " failed to get user pages \n " ) ;
goto fail_vma ;
}
sgt = kzalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt ) {
pr_err ( " failed to allocate sg table \n " ) ;
ret = - ENOMEM ;
goto fail_get_user_pages ;
}
ret = sg_alloc_table_from_pages ( sgt , pages , n_pages ,
offset , size , GFP_KERNEL ) ;
if ( ret ) {
pr_err ( " failed to initialize sg table \n " ) ;
goto fail_sgt ;
}
/* pages are no longer needed */
kfree ( pages ) ;
pages = NULL ;
sgt - > nents = dma_map_sg ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
buf - > dma_dir ) ;
if ( sgt - > nents < = 0 ) {
pr_err ( " failed to map scatterlist \n " ) ;
ret = - EIO ;
goto fail_sgt_init ;
}
contig_size = vb2_dc_get_contiguous_size ( sgt ) ;
if ( contig_size < size ) {
pr_err ( " contiguous mapping is too small %lu/%lu \n " ,
contig_size , size ) ;
ret = - EFAULT ;
goto fail_map_sg ;
2010-10-11 17:59:36 +04:00
}
2012-06-14 17:37:42 +04:00
buf - > dma_addr = sg_dma_address ( sgt - > sgl ) ;
2010-10-11 17:59:36 +04:00
buf - > size = size ;
2012-06-14 17:37:42 +04:00
buf - > dma_sgt = sgt ;
2010-10-11 17:59:36 +04:00
return buf ;
2012-06-14 17:37:42 +04:00
fail_map_sg :
dma_unmap_sg ( buf - > dev , sgt - > sgl , sgt - > orig_nents , buf - > dma_dir ) ;
2010-10-11 17:59:36 +04:00
2012-06-14 17:37:42 +04:00
fail_sgt_init :
if ( ! vma_is_io ( buf - > vma ) )
vb2_dc_sgt_foreach_page ( sgt , put_page ) ;
sg_free_table ( sgt ) ;
fail_sgt :
kfree ( sgt ) ;
2010-10-11 17:59:36 +04:00
2012-06-14 17:37:42 +04:00
fail_get_user_pages :
if ( pages & & ! vma_is_io ( buf - > vma ) )
while ( n_pages )
put_page ( pages [ - - n_pages ] ) ;
fail_vma :
2010-10-11 17:59:36 +04:00
vb2_put_vma ( buf - > vma ) ;
2012-06-14 17:37:42 +04:00
fail_pages :
kfree ( pages ) ; /* kfree is NULL-proof */
fail_buf :
2010-10-11 17:59:36 +04:00
kfree ( buf ) ;
2012-06-14 17:37:42 +04:00
return ERR_PTR ( ret ) ;
2010-10-11 17:59:36 +04:00
}
2012-06-14 17:37:45 +04:00
/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
static int vb2_dc_map_dmabuf ( void * mem_priv )
{
struct vb2_dc_buf * buf = mem_priv ;
struct sg_table * sgt ;
unsigned long contig_size ;
if ( WARN_ON ( ! buf - > db_attach ) ) {
pr_err ( " trying to pin a non attached buffer \n " ) ;
return - EINVAL ;
}
if ( WARN_ON ( buf - > dma_sgt ) ) {
pr_err ( " dmabuf buffer is already pinned \n " ) ;
return 0 ;
}
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment ( buf - > db_attach , buf - > dma_dir ) ;
if ( IS_ERR_OR_NULL ( sgt ) ) {
pr_err ( " Error getting dmabuf scatterlist \n " ) ;
return - EINVAL ;
}
/* checking if dmabuf is big enough to store contiguous chunk */
contig_size = vb2_dc_get_contiguous_size ( sgt ) ;
if ( contig_size < buf - > size ) {
pr_err ( " contiguous chunk is too small %lu/%lu b \n " ,
contig_size , buf - > size ) ;
dma_buf_unmap_attachment ( buf - > db_attach , sgt , buf - > dma_dir ) ;
return - EFAULT ;
}
buf - > dma_addr = sg_dma_address ( sgt - > sgl ) ;
buf - > dma_sgt = sgt ;
return 0 ;
}
static void vb2_dc_unmap_dmabuf ( void * mem_priv )
{
struct vb2_dc_buf * buf = mem_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
if ( WARN_ON ( ! buf - > db_attach ) ) {
pr_err ( " trying to unpin a not attached buffer \n " ) ;
return ;
}
if ( WARN_ON ( ! sgt ) ) {
pr_err ( " dmabuf buffer is already unpinned \n " ) ;
return ;
}
dma_buf_unmap_attachment ( buf - > db_attach , sgt , buf - > dma_dir ) ;
buf - > dma_addr = 0 ;
buf - > dma_sgt = NULL ;
}
static void vb2_dc_detach_dmabuf ( void * mem_priv )
{
struct vb2_dc_buf * buf = mem_priv ;
/* if vb2 works correctly you should never detach mapped buffer */
if ( WARN_ON ( buf - > dma_addr ) )
vb2_dc_unmap_dmabuf ( buf ) ;
/* detach this attachment */
dma_buf_detach ( buf - > db_attach - > dmabuf , buf - > db_attach ) ;
kfree ( buf ) ;
}
static void * vb2_dc_attach_dmabuf ( void * alloc_ctx , struct dma_buf * dbuf ,
unsigned long size , int write )
{
struct vb2_dc_conf * conf = alloc_ctx ;
struct vb2_dc_buf * buf ;
struct dma_buf_attachment * dba ;
if ( dbuf - > size < size )
return ERR_PTR ( - EFAULT ) ;
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
buf - > dev = conf - > dev ;
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach ( dbuf , buf - > dev ) ;
if ( IS_ERR ( dba ) ) {
pr_err ( " failed to attach dmabuf \n " ) ;
kfree ( buf ) ;
return dba ;
}
buf - > dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE ;
buf - > size = size ;
buf - > db_attach = dba ;
return buf ;
}
2012-06-14 17:37:41 +04:00
/*********************************************/
/* DMA CONTIG exported functions */
/*********************************************/
2010-10-11 17:59:36 +04:00
const struct vb2_mem_ops vb2_dma_contig_memops = {
2012-06-14 17:37:39 +04:00
. alloc = vb2_dc_alloc ,
. put = vb2_dc_put ,
2012-06-14 18:32:25 +04:00
. get_dmabuf = vb2_dc_get_dmabuf ,
2012-06-14 17:37:39 +04:00
. cookie = vb2_dc_cookie ,
. vaddr = vb2_dc_vaddr ,
. mmap = vb2_dc_mmap ,
. get_userptr = vb2_dc_get_userptr ,
. put_userptr = vb2_dc_put_userptr ,
2012-06-14 17:37:44 +04:00
. prepare = vb2_dc_prepare ,
. finish = vb2_dc_finish ,
2012-06-14 17:37:45 +04:00
. map_dmabuf = vb2_dc_map_dmabuf ,
. unmap_dmabuf = vb2_dc_unmap_dmabuf ,
. attach_dmabuf = vb2_dc_attach_dmabuf ,
. detach_dmabuf = vb2_dc_detach_dmabuf ,
2012-06-14 17:37:39 +04:00
. num_users = vb2_dc_num_users ,
2010-10-11 17:59:36 +04:00
} ;
EXPORT_SYMBOL_GPL ( vb2_dma_contig_memops ) ;
void * vb2_dma_contig_init_ctx ( struct device * dev )
{
struct vb2_dc_conf * conf ;
conf = kzalloc ( sizeof * conf , GFP_KERNEL ) ;
if ( ! conf )
return ERR_PTR ( - ENOMEM ) ;
conf - > dev = dev ;
return conf ;
}
EXPORT_SYMBOL_GPL ( vb2_dma_contig_init_ctx ) ;
void vb2_dma_contig_cleanup_ctx ( void * alloc_ctx )
{
kfree ( alloc_ctx ) ;
}
EXPORT_SYMBOL_GPL ( vb2_dma_contig_cleanup_ctx ) ;
MODULE_DESCRIPTION ( " DMA-contig memory handling routines for videobuf2 " ) ;
2011-03-13 21:23:32 +03:00
MODULE_AUTHOR ( " Pawel Osciak <pawel@osciak.com> " ) ;
2010-10-11 17:59:36 +04:00
MODULE_LICENSE ( " GPL " ) ;