2010-10-11 10:59:36 -03:00
/*
* videobuf2 - dma - contig . c - DMA contig memory allocator for videobuf2
*
* Copyright ( C ) 2010 Samsung Electronics
*
2011-03-13 15:23:32 -03:00
* Author : Pawel Osciak < pawel @ osciak . com >
2010-10-11 10:59:36 -03:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation .
*/
2012-06-14 10:37:45 -03:00
# include <linux/dma-buf.h>
2010-10-11 10:59:36 -03:00
# include <linux/module.h>
2017-03-06 11:21:00 -03:00
# include <linux/refcount.h>
2012-06-14 10:37:42 -03:00
# include <linux/scatterlist.h>
# include <linux/sched.h>
2010-10-11 10:59:36 -03:00
# include <linux/slab.h>
# include <linux/dma-mapping.h>
2015-09-22 10:30:29 -03:00
# include <media/videobuf2-v4l2.h>
2012-04-24 19:08:12 -03:00
# include <media/videobuf2-dma-contig.h>
2010-10-11 10:59:36 -03:00
# include <media/videobuf2-memops.h>
struct vb2_dc_buf {
2012-06-14 10:37:40 -03:00
struct device * dev ;
2010-10-11 10:59:36 -03:00
void * vaddr ;
unsigned long size ;
2016-02-01 22:34:42 +01:00
void * cookie ;
2012-06-14 10:37:41 -03:00
dma_addr_t dma_addr ;
2016-08-03 13:46:00 -07:00
unsigned long attrs ;
2012-06-14 10:37:42 -03:00
enum dma_data_direction dma_dir ;
struct sg_table * dma_sgt ;
2015-07-13 11:55:49 -03:00
struct frame_vector * vec ;
2012-06-14 10:37:41 -03:00
/* MMAP related */
2010-10-11 10:59:36 -03:00
struct vb2_vmarea_handler handler ;
2017-03-06 11:21:00 -03:00
refcount_t refcount ;
2012-06-14 11:32:25 -03:00
struct sg_table * sgt_base ;
2012-06-14 10:37:41 -03:00
2012-06-14 10:37:45 -03:00
/* DMABUF related */
struct dma_buf_attachment * db_attach ;
2010-10-11 10:59:36 -03:00
} ;
2012-06-14 10:37:42 -03:00
/*********************************************/
/* scatterlist table functions */
/*********************************************/
static unsigned long vb2_dc_get_contiguous_size ( struct sg_table * sgt )
{
struct scatterlist * s ;
dma_addr_t expected = sg_dma_address ( sgt - > sgl ) ;
unsigned int i ;
unsigned long size = 0 ;
for_each_sg ( sgt - > sgl , s , sgt - > nents , i ) {
if ( sg_dma_address ( s ) ! = expected )
break ;
expected = sg_dma_address ( s ) + sg_dma_len ( s ) ;
size + = sg_dma_len ( s ) ;
}
return size ;
}
2012-06-14 10:37:41 -03:00
/*********************************************/
/* callbacks for all buffers */
/*********************************************/
static void * vb2_dc_cookie ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
return & buf - > dma_addr ;
}
static void * vb2_dc_vaddr ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
2014-05-26 11:17:32 -03:00
if ( ! buf - > vaddr & & buf - > db_attach )
buf - > vaddr = dma_buf_vmap ( buf - > db_attach - > dmabuf ) ;
2012-06-14 10:37:41 -03:00
return buf - > vaddr ;
}
static unsigned int vb2_dc_num_users ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
2017-03-06 11:21:00 -03:00
return refcount_read ( & buf - > refcount ) ;
2012-06-14 10:37:41 -03:00
}
2012-06-14 10:37:44 -03:00
static void vb2_dc_prepare ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
2012-06-14 10:37:45 -03:00
/* DMABUF exporter will flush the cache for us */
if ( ! sgt | | buf - > db_attach )
2012-06-14 10:37:44 -03:00
return ;
2015-09-24 06:02:36 -03:00
dma_sync_sg_for_device ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
buf - > dma_dir ) ;
2012-06-14 10:37:44 -03:00
}
static void vb2_dc_finish ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
2012-06-14 10:37:45 -03:00
/* DMABUF exporter will flush the cache for us */
if ( ! sgt | | buf - > db_attach )
2012-06-14 10:37:44 -03:00
return ;
2015-09-24 06:02:36 -03:00
dma_sync_sg_for_cpu ( buf - > dev , sgt - > sgl , sgt - > orig_nents , buf - > dma_dir ) ;
2012-06-14 10:37:44 -03:00
}
2012-06-14 10:37:41 -03:00
/*********************************************/
/* callbacks for MMAP buffers */
/*********************************************/
static void vb2_dc_put ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
2017-03-06 11:21:00 -03:00
if ( ! refcount_dec_and_test ( & buf - > refcount ) )
2012-06-14 10:37:41 -03:00
return ;
2012-06-14 11:32:25 -03:00
if ( buf - > sgt_base ) {
sg_free_table ( buf - > sgt_base ) ;
kfree ( buf - > sgt_base ) ;
}
2016-02-01 22:34:42 +01:00
dma_free_attrs ( buf - > dev , buf - > size , buf - > cookie , buf - > dma_addr ,
2016-08-03 13:46:00 -07:00
buf - > attrs ) ;
2012-08-07 13:19:49 -03:00
put_device ( buf - > dev ) ;
2012-06-14 10:37:41 -03:00
kfree ( buf ) ;
}
2010-10-11 10:59:36 -03:00
2016-08-03 13:46:00 -07:00
static void * vb2_dc_alloc ( struct device * dev , unsigned long attrs ,
2016-04-15 09:15:05 -03:00
unsigned long size , enum dma_data_direction dma_dir ,
gfp_t gfp_flags )
2010-10-11 10:59:36 -03:00
{
struct vb2_dc_buf * buf ;
2016-07-21 09:14:03 -03:00
if ( WARN_ON ( ! dev ) )
return ERR_PTR ( - EINVAL ) ;
2010-10-11 10:59:36 -03:00
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
2016-04-15 09:15:05 -03:00
if ( attrs )
2016-08-03 13:46:00 -07:00
buf - > attrs = attrs ;
2016-02-01 22:34:42 +01:00
buf - > cookie = dma_alloc_attrs ( dev , size , & buf - > dma_addr ,
2016-08-03 13:46:00 -07:00
GFP_KERNEL | gfp_flags , buf - > attrs ) ;
2016-02-01 22:34:42 +01:00
if ( ! buf - > cookie ) {
2012-06-14 10:37:40 -03:00
dev_err ( dev , " dma_alloc_coherent of size %ld failed \n " , size ) ;
2010-10-11 10:59:36 -03:00
kfree ( buf ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2016-08-03 13:46:00 -07:00
if ( ( buf - > attrs & DMA_ATTR_NO_KERNEL_MAPPING ) = = 0 )
2016-02-01 22:34:42 +01:00
buf - > vaddr = buf - > cookie ;
2012-08-07 13:19:49 -03:00
/* Prevent the device from being released while the buffer is used */
buf - > dev = get_device ( dev ) ;
2010-10-11 10:59:36 -03:00
buf - > size = size ;
2014-11-18 09:50:59 -03:00
buf - > dma_dir = dma_dir ;
2010-10-11 10:59:36 -03:00
buf - > handler . refcount = & buf - > refcount ;
2012-06-14 10:37:39 -03:00
buf - > handler . put = vb2_dc_put ;
2010-10-11 10:59:36 -03:00
buf - > handler . arg = buf ;
2017-03-06 11:21:00 -03:00
refcount_set ( & buf - > refcount , 1 ) ;
2010-10-11 10:59:36 -03:00
return buf ;
}
2012-06-14 10:37:39 -03:00
static int vb2_dc_mmap ( void * buf_priv , struct vm_area_struct * vma )
2010-10-11 10:59:36 -03:00
{
struct vb2_dc_buf * buf = buf_priv ;
2012-06-14 11:32:21 -03:00
int ret ;
2010-10-11 10:59:36 -03:00
if ( ! buf ) {
printk ( KERN_ERR " No buffer to map \n " ) ;
return - EINVAL ;
}
2012-06-14 11:32:21 -03:00
/*
* dma_mmap_ * uses vm_pgoff as in - buffer offset , but we want to
* map whole buffer
*/
vma - > vm_pgoff = 0 ;
2016-02-01 22:34:42 +01:00
ret = dma_mmap_attrs ( buf - > dev , vma , buf - > cookie ,
2016-08-03 13:46:00 -07:00
buf - > dma_addr , buf - > size , buf - > attrs ) ;
2012-06-14 11:32:21 -03:00
if ( ret ) {
pr_err ( " Remapping memory failed, error: %d \n " , ret ) ;
return ret ;
}
vma - > vm_flags | = VM_DONTEXPAND | VM_DONTDUMP ;
vma - > vm_private_data = & buf - > handler ;
vma - > vm_ops = & vb2_common_vm_ops ;
vma - > vm_ops - > open ( vma ) ;
pr_debug ( " %s: mapped dma addr 0x%08lx at 0x%08lx, size %ld \n " ,
__func__ , ( unsigned long ) buf - > dma_addr , vma - > vm_start ,
buf - > size ) ;
return 0 ;
2010-10-11 10:59:36 -03:00
}
2012-06-14 11:32:25 -03:00
/*********************************************/
/* DMABUF ops for exporters */
/*********************************************/
struct vb2_dc_attachment {
struct sg_table sgt ;
2014-11-18 09:50:58 -03:00
enum dma_data_direction dma_dir ;
2012-06-14 11:32:25 -03:00
} ;
static int vb2_dc_dmabuf_ops_attach ( struct dma_buf * dbuf , struct device * dev ,
struct dma_buf_attachment * dbuf_attach )
{
struct vb2_dc_attachment * attach ;
unsigned int i ;
struct scatterlist * rd , * wr ;
struct sg_table * sgt ;
struct vb2_dc_buf * buf = dbuf - > priv ;
int ret ;
attach = kzalloc ( sizeof ( * attach ) , GFP_KERNEL ) ;
if ( ! attach )
return - ENOMEM ;
sgt = & attach - > sgt ;
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
* map the same scatter list to multiple attachments at the same time .
*/
ret = sg_alloc_table ( sgt , buf - > sgt_base - > orig_nents , GFP_KERNEL ) ;
if ( ret ) {
kfree ( attach ) ;
return - ENOMEM ;
}
rd = buf - > sgt_base - > sgl ;
wr = sgt - > sgl ;
for ( i = 0 ; i < sgt - > orig_nents ; + + i ) {
sg_set_page ( wr , sg_page ( rd ) , rd - > length , rd - > offset ) ;
rd = sg_next ( rd ) ;
wr = sg_next ( wr ) ;
}
2014-11-18 09:50:58 -03:00
attach - > dma_dir = DMA_NONE ;
2012-06-14 11:32:25 -03:00
dbuf_attach - > priv = attach ;
return 0 ;
}
static void vb2_dc_dmabuf_ops_detach ( struct dma_buf * dbuf ,
struct dma_buf_attachment * db_attach )
{
struct vb2_dc_attachment * attach = db_attach - > priv ;
struct sg_table * sgt ;
if ( ! attach )
return ;
sgt = & attach - > sgt ;
/* release the scatterlist cache */
2014-11-18 09:50:58 -03:00
if ( attach - > dma_dir ! = DMA_NONE )
2012-06-14 11:32:25 -03:00
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
2014-11-18 09:50:58 -03:00
attach - > dma_dir ) ;
2012-06-14 11:32:25 -03:00
sg_free_table ( sgt ) ;
kfree ( attach ) ;
db_attach - > priv = NULL ;
}
static struct sg_table * vb2_dc_dmabuf_ops_map (
2014-11-18 09:50:58 -03:00
struct dma_buf_attachment * db_attach , enum dma_data_direction dma_dir )
2012-06-14 11:32:25 -03:00
{
struct vb2_dc_attachment * attach = db_attach - > priv ;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex * lock = & db_attach - > dmabuf - > lock ;
struct sg_table * sgt ;
mutex_lock ( lock ) ;
sgt = & attach - > sgt ;
/* return previously mapped sg table */
2014-11-18 09:50:58 -03:00
if ( attach - > dma_dir = = dma_dir ) {
2012-06-14 11:32:25 -03:00
mutex_unlock ( lock ) ;
return sgt ;
}
/* release any previous cache */
2014-11-18 09:50:58 -03:00
if ( attach - > dma_dir ! = DMA_NONE ) {
2012-06-14 11:32:25 -03:00
dma_unmap_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
2014-11-18 09:50:58 -03:00
attach - > dma_dir ) ;
attach - > dma_dir = DMA_NONE ;
2012-06-14 11:32:25 -03:00
}
/* mapping to the client with new direction */
2015-04-29 09:00:46 -03:00
sgt - > nents = dma_map_sg ( db_attach - > dev , sgt - > sgl , sgt - > orig_nents ,
dma_dir ) ;
if ( ! sgt - > nents ) {
2012-06-14 11:32:25 -03:00
pr_err ( " failed to map scatterlist \n " ) ;
mutex_unlock ( lock ) ;
return ERR_PTR ( - EIO ) ;
}
2014-11-18 09:50:58 -03:00
attach - > dma_dir = dma_dir ;
2012-06-14 11:32:25 -03:00
mutex_unlock ( lock ) ;
return sgt ;
}
static void vb2_dc_dmabuf_ops_unmap ( struct dma_buf_attachment * db_attach ,
2014-11-18 09:50:58 -03:00
struct sg_table * sgt , enum dma_data_direction dma_dir )
2012-06-14 11:32:25 -03:00
{
/* nothing to be done here */
}
static void vb2_dc_dmabuf_ops_release ( struct dma_buf * dbuf )
{
/* drop reference obtained in vb2_dc_get_dmabuf */
vb2_dc_put ( dbuf - > priv ) ;
}
static void * vb2_dc_dmabuf_ops_kmap ( struct dma_buf * dbuf , unsigned long pgnum )
{
struct vb2_dc_buf * buf = dbuf - > priv ;
2016-02-01 22:34:42 +01:00
return buf - > vaddr ? buf - > vaddr + pgnum * PAGE_SIZE : NULL ;
2012-06-14 11:32:25 -03:00
}
static void * vb2_dc_dmabuf_ops_vmap ( struct dma_buf * dbuf )
{
struct vb2_dc_buf * buf = dbuf - > priv ;
return buf - > vaddr ;
}
static int vb2_dc_dmabuf_ops_mmap ( struct dma_buf * dbuf ,
struct vm_area_struct * vma )
{
return vb2_dc_mmap ( dbuf - > priv , vma ) ;
}
2017-07-01 07:27:13 -04:00
static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
2012-06-14 11:32:25 -03:00
. attach = vb2_dc_dmabuf_ops_attach ,
. detach = vb2_dc_dmabuf_ops_detach ,
. map_dma_buf = vb2_dc_dmabuf_ops_map ,
. unmap_dma_buf = vb2_dc_dmabuf_ops_unmap ,
2017-04-19 13:36:10 -06:00
. map = vb2_dc_dmabuf_ops_kmap ,
. map_atomic = vb2_dc_dmabuf_ops_kmap ,
2012-06-14 11:32:25 -03:00
. vmap = vb2_dc_dmabuf_ops_vmap ,
. mmap = vb2_dc_dmabuf_ops_mmap ,
. release = vb2_dc_dmabuf_ops_release ,
} ;
static struct sg_table * vb2_dc_get_base_sgt ( struct vb2_dc_buf * buf )
{
int ret ;
struct sg_table * sgt ;
sgt = kmalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt ) {
dev_err ( buf - > dev , " failed to alloc sg table \n " ) ;
return NULL ;
}
2016-02-01 22:34:42 +01:00
ret = dma_get_sgtable_attrs ( buf - > dev , sgt , buf - > cookie , buf - > dma_addr ,
2016-08-03 13:46:00 -07:00
buf - > size , buf - > attrs ) ;
2012-06-14 11:32:25 -03:00
if ( ret < 0 ) {
dev_err ( buf - > dev , " failed to get scatterlist from DMA API \n " ) ;
kfree ( sgt ) ;
return NULL ;
}
return sgt ;
}
2013-05-21 05:11:35 -03:00
static struct dma_buf * vb2_dc_get_dmabuf ( void * buf_priv , unsigned long flags )
2012-06-14 11:32:25 -03:00
{
struct vb2_dc_buf * buf = buf_priv ;
struct dma_buf * dbuf ;
2015-01-23 12:53:43 +05:30
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
exp_info . ops = & vb2_dc_dmabuf_ops ;
exp_info . size = buf - > size ;
exp_info . flags = flags ;
exp_info . priv = buf ;
2012-06-14 11:32:25 -03:00
if ( ! buf - > sgt_base )
buf - > sgt_base = vb2_dc_get_base_sgt ( buf ) ;
if ( WARN_ON ( ! buf - > sgt_base ) )
return NULL ;
2015-01-23 12:53:43 +05:30
dbuf = dma_buf_export ( & exp_info ) ;
2012-06-14 11:32:25 -03:00
if ( IS_ERR ( dbuf ) )
return NULL ;
/* dmabuf keeps reference to vb2 buffer */
2017-03-06 11:21:00 -03:00
refcount_inc ( & buf - > refcount ) ;
2012-06-14 11:32:25 -03:00
return dbuf ;
}
2012-06-14 10:37:41 -03:00
/*********************************************/
/* callbacks for USERPTR buffers */
/*********************************************/
2012-06-14 10:37:42 -03:00
static void vb2_dc_put_userptr ( void * buf_priv )
{
struct vb2_dc_buf * buf = buf_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
2015-07-13 11:55:49 -03:00
int i ;
struct page * * pages ;
2012-06-14 10:37:42 -03:00
2013-06-19 08:56:46 -03:00
if ( sgt ) {
2014-11-18 09:51:08 -03:00
/*
* No need to sync to CPU , it ' s already synced to the CPU
* since the finish ( ) memop will have been called before this .
*/
dma_unmap_sg_attrs ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
2016-08-03 13:46:00 -07:00
buf - > dma_dir , DMA_ATTR_SKIP_CPU_SYNC ) ;
2015-07-13 11:55:49 -03:00
pages = frame_vector_pages ( buf - > vec ) ;
/* sgt should exist only if vector contains pages... */
BUG_ON ( IS_ERR ( pages ) ) ;
for ( i = 0 ; i < frame_vector_count ( buf - > vec ) ; i + + )
set_page_dirty_lock ( pages [ i ] ) ;
2013-06-19 08:56:46 -03:00
sg_free_table ( sgt ) ;
kfree ( sgt ) ;
}
2015-07-13 11:55:49 -03:00
vb2_destroy_framevec ( buf - > vec ) ;
2012-06-14 10:37:42 -03:00
kfree ( buf ) ;
}
2013-06-19 08:56:46 -03:00
/*
* For some kind of reserved memory there might be no struct page available ,
* so all that can be done to support such ' pages ' is to try to convert
* pfn to dma address or at the last resort just assume that
* dma address = = physical address ( like it has been assumed in earlier version
* of videobuf2 - dma - contig
*/
# ifdef __arch_pfn_to_dma
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
return ( dma_addr_t ) __arch_pfn_to_dma ( dev , pfn ) ;
}
# elif defined(__pfn_to_bus)
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
return ( dma_addr_t ) __pfn_to_bus ( pfn ) ;
}
# elif defined(__pfn_to_phys)
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
return ( dma_addr_t ) __pfn_to_phys ( pfn ) ;
}
# else
static inline dma_addr_t vb2_dc_pfn_to_dma ( struct device * dev , unsigned long pfn )
{
/* really, we cannot do anything better at this point */
return ( dma_addr_t ) ( pfn ) < < PAGE_SHIFT ;
}
# endif
2016-04-15 09:15:05 -03:00
static void * vb2_dc_get_userptr ( struct device * dev , unsigned long vaddr ,
2014-11-18 09:50:58 -03:00
unsigned long size , enum dma_data_direction dma_dir )
2010-10-11 10:59:36 -03:00
{
struct vb2_dc_buf * buf ;
2015-07-13 11:55:49 -03:00
struct frame_vector * vec ;
2012-06-14 10:37:42 -03:00
unsigned long offset ;
2015-07-13 11:55:49 -03:00
int n_pages , i ;
2012-06-14 10:37:42 -03:00
int ret = 0 ;
struct sg_table * sgt ;
unsigned long contig_size ;
2012-06-12 10:18:16 -03:00
unsigned long dma_align = dma_get_cache_alignment ( ) ;
/* Only cache aligned DMA transfers are reliable */
if ( ! IS_ALIGNED ( vaddr | size , dma_align ) ) {
pr_debug ( " user data must be aligned to %lu bytes \n " , dma_align ) ;
return ERR_PTR ( - EINVAL ) ;
}
if ( ! size ) {
pr_debug ( " size is zero \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
2010-10-11 10:59:36 -03:00
2016-07-21 09:14:03 -03:00
if ( WARN_ON ( ! dev ) )
return ERR_PTR ( - EINVAL ) ;
2010-10-11 10:59:36 -03:00
buf = kzalloc ( sizeof * buf , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
2016-04-15 09:15:05 -03:00
buf - > dev = dev ;
2014-11-18 09:50:58 -03:00
buf - > dma_dir = dma_dir ;
2012-06-14 10:37:42 -03:00
offset = vaddr & ~ PAGE_MASK ;
2017-08-21 07:34:10 -04:00
vec = vb2_create_framevec ( vaddr , size , dma_dir = = DMA_FROM_DEVICE | |
dma_dir = = DMA_BIDIRECTIONAL ) ;
2015-07-13 11:55:49 -03:00
if ( IS_ERR ( vec ) ) {
ret = PTR_ERR ( vec ) ;
2012-06-14 10:37:42 -03:00
goto fail_buf ;
}
2015-07-13 11:55:49 -03:00
buf - > vec = vec ;
n_pages = frame_vector_count ( vec ) ;
ret = frame_vector_to_pages ( vec ) ;
if ( ret < 0 ) {
unsigned long * nums = frame_vector_pfns ( vec ) ;
2012-06-14 10:37:42 -03:00
2015-07-13 11:55:49 -03:00
/*
* Failed to convert to pages . . . Check the memory is physically
* contiguous and use direct mapping
*/
for ( i = 1 ; i < n_pages ; i + + )
if ( nums [ i - 1 ] + 1 ! = nums [ i ] )
goto fail_pfnvec ;
buf - > dma_addr = vb2_dc_pfn_to_dma ( buf - > dev , nums [ 0 ] ) ;
goto out ;
2012-06-14 10:37:42 -03:00
}
sgt = kzalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt ) {
pr_err ( " failed to allocate sg table \n " ) ;
ret = - ENOMEM ;
2015-07-13 11:55:49 -03:00
goto fail_pfnvec ;
2012-06-14 10:37:42 -03:00
}
2015-07-13 11:55:49 -03:00
ret = sg_alloc_table_from_pages ( sgt , frame_vector_pages ( vec ) , n_pages ,
2012-06-14 10:37:42 -03:00
offset , size , GFP_KERNEL ) ;
if ( ret ) {
pr_err ( " failed to initialize sg table \n " ) ;
goto fail_sgt ;
}
2014-11-18 09:51:08 -03:00
/*
* No need to sync to the device , this will happen later when the
* prepare ( ) memop is called .
*/
sgt - > nents = dma_map_sg_attrs ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
2016-08-03 13:46:00 -07:00
buf - > dma_dir , DMA_ATTR_SKIP_CPU_SYNC ) ;
2012-06-14 10:37:42 -03:00
if ( sgt - > nents < = 0 ) {
pr_err ( " failed to map scatterlist \n " ) ;
ret = - EIO ;
goto fail_sgt_init ;
}
contig_size = vb2_dc_get_contiguous_size ( sgt ) ;
if ( contig_size < size ) {
pr_err ( " contiguous mapping is too small %lu/%lu \n " ,
contig_size , size ) ;
ret = - EFAULT ;
goto fail_map_sg ;
2010-10-11 10:59:36 -03:00
}
2012-06-14 10:37:42 -03:00
buf - > dma_addr = sg_dma_address ( sgt - > sgl ) ;
buf - > dma_sgt = sgt ;
2015-07-13 11:55:49 -03:00
out :
buf - > size = size ;
2010-10-11 10:59:36 -03:00
return buf ;
2012-06-14 10:37:42 -03:00
fail_map_sg :
2014-11-18 09:51:08 -03:00
dma_unmap_sg_attrs ( buf - > dev , sgt - > sgl , sgt - > orig_nents ,
2016-08-03 13:46:00 -07:00
buf - > dma_dir , DMA_ATTR_SKIP_CPU_SYNC ) ;
2010-10-11 10:59:36 -03:00
2012-06-14 10:37:42 -03:00
fail_sgt_init :
sg_free_table ( sgt ) ;
fail_sgt :
kfree ( sgt ) ;
2010-10-11 10:59:36 -03:00
2015-07-13 11:55:49 -03:00
fail_pfnvec :
vb2_destroy_framevec ( vec ) ;
2012-06-14 10:37:42 -03:00
fail_buf :
2010-10-11 10:59:36 -03:00
kfree ( buf ) ;
2012-06-14 10:37:42 -03:00
return ERR_PTR ( ret ) ;
2010-10-11 10:59:36 -03:00
}
2012-06-14 10:37:45 -03:00
/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
static int vb2_dc_map_dmabuf ( void * mem_priv )
{
struct vb2_dc_buf * buf = mem_priv ;
struct sg_table * sgt ;
unsigned long contig_size ;
if ( WARN_ON ( ! buf - > db_attach ) ) {
pr_err ( " trying to pin a non attached buffer \n " ) ;
return - EINVAL ;
}
if ( WARN_ON ( buf - > dma_sgt ) ) {
pr_err ( " dmabuf buffer is already pinned \n " ) ;
return 0 ;
}
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment ( buf - > db_attach , buf - > dma_dir ) ;
2013-12-20 16:43:50 -08:00
if ( IS_ERR ( sgt ) ) {
2012-06-14 10:37:45 -03:00
pr_err ( " Error getting dmabuf scatterlist \n " ) ;
return - EINVAL ;
}
/* checking if dmabuf is big enough to store contiguous chunk */
contig_size = vb2_dc_get_contiguous_size ( sgt ) ;
if ( contig_size < buf - > size ) {
pr_err ( " contiguous chunk is too small %lu/%lu b \n " ,
contig_size , buf - > size ) ;
dma_buf_unmap_attachment ( buf - > db_attach , sgt , buf - > dma_dir ) ;
return - EFAULT ;
}
buf - > dma_addr = sg_dma_address ( sgt - > sgl ) ;
buf - > dma_sgt = sgt ;
2014-05-26 11:17:32 -03:00
buf - > vaddr = NULL ;
2012-06-14 10:37:45 -03:00
return 0 ;
}
static void vb2_dc_unmap_dmabuf ( void * mem_priv )
{
struct vb2_dc_buf * buf = mem_priv ;
struct sg_table * sgt = buf - > dma_sgt ;
if ( WARN_ON ( ! buf - > db_attach ) ) {
pr_err ( " trying to unpin a not attached buffer \n " ) ;
return ;
}
if ( WARN_ON ( ! sgt ) ) {
pr_err ( " dmabuf buffer is already unpinned \n " ) ;
return ;
}
2014-05-26 11:17:32 -03:00
if ( buf - > vaddr ) {
dma_buf_vunmap ( buf - > db_attach - > dmabuf , buf - > vaddr ) ;
buf - > vaddr = NULL ;
}
2012-06-14 10:37:45 -03:00
dma_buf_unmap_attachment ( buf - > db_attach , sgt , buf - > dma_dir ) ;
buf - > dma_addr = 0 ;
buf - > dma_sgt = NULL ;
}
static void vb2_dc_detach_dmabuf ( void * mem_priv )
{
struct vb2_dc_buf * buf = mem_priv ;
/* if vb2 works correctly you should never detach mapped buffer */
if ( WARN_ON ( buf - > dma_addr ) )
vb2_dc_unmap_dmabuf ( buf ) ;
/* detach this attachment */
dma_buf_detach ( buf - > db_attach - > dmabuf , buf - > db_attach ) ;
kfree ( buf ) ;
}
2016-04-15 09:15:05 -03:00
static void * vb2_dc_attach_dmabuf ( struct device * dev , struct dma_buf * dbuf ,
2014-11-18 09:50:58 -03:00
unsigned long size , enum dma_data_direction dma_dir )
2012-06-14 10:37:45 -03:00
{
struct vb2_dc_buf * buf ;
struct dma_buf_attachment * dba ;
if ( dbuf - > size < size )
return ERR_PTR ( - EFAULT ) ;
2016-07-21 09:14:03 -03:00
if ( WARN_ON ( ! dev ) )
return ERR_PTR ( - EINVAL ) ;
2012-06-14 10:37:45 -03:00
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( ! buf )
return ERR_PTR ( - ENOMEM ) ;
2016-04-15 09:15:05 -03:00
buf - > dev = dev ;
2012-06-14 10:37:45 -03:00
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach ( dbuf , buf - > dev ) ;
if ( IS_ERR ( dba ) ) {
pr_err ( " failed to attach dmabuf \n " ) ;
kfree ( buf ) ;
return dba ;
}
2014-11-18 09:50:58 -03:00
buf - > dma_dir = dma_dir ;
2012-06-14 10:37:45 -03:00
buf - > size = size ;
buf - > db_attach = dba ;
return buf ;
}
2012-06-14 10:37:41 -03:00
/*********************************************/
/* DMA CONTIG exported functions */
/*********************************************/
2010-10-11 10:59:36 -03:00
const struct vb2_mem_ops vb2_dma_contig_memops = {
2012-06-14 10:37:39 -03:00
. alloc = vb2_dc_alloc ,
. put = vb2_dc_put ,
2012-06-14 11:32:25 -03:00
. get_dmabuf = vb2_dc_get_dmabuf ,
2012-06-14 10:37:39 -03:00
. cookie = vb2_dc_cookie ,
. vaddr = vb2_dc_vaddr ,
. mmap = vb2_dc_mmap ,
. get_userptr = vb2_dc_get_userptr ,
. put_userptr = vb2_dc_put_userptr ,
2012-06-14 10:37:44 -03:00
. prepare = vb2_dc_prepare ,
. finish = vb2_dc_finish ,
2012-06-14 10:37:45 -03:00
. map_dmabuf = vb2_dc_map_dmabuf ,
. unmap_dmabuf = vb2_dc_unmap_dmabuf ,
. attach_dmabuf = vb2_dc_attach_dmabuf ,
. detach_dmabuf = vb2_dc_detach_dmabuf ,
2012-06-14 10:37:39 -03:00
. num_users = vb2_dc_num_users ,
2010-10-11 10:59:36 -03:00
} ;
EXPORT_SYMBOL_GPL ( vb2_dma_contig_memops ) ;
2016-05-24 09:16:06 +02:00
/**
* vb2_dma_contig_set_max_seg_size ( ) - configure DMA max segment size
* @ dev : device for configuring DMA parameters
* @ size : size of DMA max segment size to set
*
* To allow mapping the scatter - list into a single chunk in the DMA
* address space , the device is required to have the DMA max segment
* size parameter set to a value larger than the buffer size . Otherwise ,
* the DMA - mapping subsystem will split the mapping into max segment
* size chunks . This function sets the DMA max segment size
* parameter to let DMA - mapping map a buffer as a single chunk in DMA
* address space .
* This code assumes that the DMA - mapping subsystem will merge all
* scatterlist segments if this is really possible ( for example when
* an IOMMU is available and enabled ) .
* Ideally , this parameter should be set by the generic bus code , but it
* is left with the default 64 KiB value due to historical litmiations in
* other subsystems ( like limited USB host drivers ) and there no good
* place to set it to the proper value .
* This function should be called from the drivers , which are known to
* operate on platforms with IOMMU and provide access to shared buffers
* ( either USERPTR or DMABUF ) . This should be done before initializing
* videobuf2 queue .
*/
int vb2_dma_contig_set_max_seg_size ( struct device * dev , unsigned int size )
{
if ( ! dev - > dma_parms ) {
2016-07-18 14:54:04 -03:00
dev - > dma_parms = kzalloc ( sizeof ( * dev - > dma_parms ) , GFP_KERNEL ) ;
2016-05-24 09:16:06 +02:00
if ( ! dev - > dma_parms )
return - ENOMEM ;
}
if ( dma_get_max_seg_size ( dev ) < size )
return dma_set_max_seg_size ( dev , size ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( vb2_dma_contig_set_max_seg_size ) ;
/*
* vb2_dma_contig_clear_max_seg_size ( ) - release resources for DMA parameters
* @ dev : device for configuring DMA parameters
*
* This function releases resources allocated to configure DMA parameters
* ( see vb2_dma_contig_set_max_seg_size ( ) function ) . It should be called from
* device drivers on driver remove .
*/
void vb2_dma_contig_clear_max_seg_size ( struct device * dev )
{
kfree ( dev - > dma_parms ) ;
dev - > dma_parms = NULL ;
}
EXPORT_SYMBOL_GPL ( vb2_dma_contig_clear_max_seg_size ) ;
2010-10-11 10:59:36 -03:00
MODULE_DESCRIPTION ( " DMA-contig memory handling routines for videobuf2 " ) ;
2011-03-13 15:23:32 -03:00
MODULE_AUTHOR ( " Pawel Osciak <pawel@osciak.com> " ) ;
2010-10-11 10:59:36 -03:00
MODULE_LICENSE ( " GPL " ) ;