2010-04-23 12:38:37 +04:00
/*
* Memory - to - memory device framework for Video for Linux 2 and videobuf .
*
* Helper functions for devices that use videobuf buffers for both their
* source and destination .
*
* Copyright ( c ) 2009 - 2010 Samsung Electronics Co . , Ltd .
2011-03-13 21:23:32 +03:00
* Pawel Osciak , < pawel @ osciak . com >
2010-04-23 12:38:37 +04:00
* Marek Szyprowski , < m . szyprowski @ samsung . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
*/
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/slab.h>
2011-01-12 12:50:24 +03:00
# include <media/videobuf2-core.h>
2010-04-23 12:38:37 +04:00
# include <media/v4l2-mem2mem.h>
2012-07-18 17:53:04 +04:00
# include <media/v4l2-dev.h>
# include <media/v4l2-fh.h>
# include <media/v4l2-event.h>
2010-04-23 12:38:37 +04:00
MODULE_DESCRIPTION ( " Mem to mem device framework for videobuf " ) ;
2011-03-13 21:23:32 +03:00
MODULE_AUTHOR ( " Pawel Osciak, <pawel@osciak.com> " ) ;
2010-04-23 12:38:37 +04:00
MODULE_LICENSE ( " GPL " ) ;
static bool debug ;
module_param ( debug , bool , 0644 ) ;
# define dprintk(fmt, arg...) \
do { \
if ( debug ) \
printk ( KERN_DEBUG " %s: " fmt , __func__ , # # arg ) ; \
} while ( 0 )
/* Instance is already queued on the job_queue */
# define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
# define TRANS_RUNNING (1 << 1)
/* Offset base for buffers on the destination queue - used to distinguish
* between source and destination buffers when mmapping - they receive the same
* offsets but for different queues */
# define DST_QUEUE_OFF_BASE (1 << 30)
/**
* struct v4l2_m2m_dev - per - device context
* @ curr_ctx : currently running instance
* @ job_queue : instances queued to run
* @ job_spinlock : protects job_queue
* @ m2m_ops : driver callbacks
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx * curr_ctx ;
struct list_head job_queue ;
spinlock_t job_spinlock ;
2012-09-11 13:32:17 +04:00
const struct v4l2_m2m_ops * m2m_ops ;
2010-04-23 12:38:37 +04:00
} ;
static struct v4l2_m2m_queue_ctx * get_queue_ctx ( struct v4l2_m2m_ctx * m2m_ctx ,
enum v4l2_buf_type type )
{
2011-01-12 12:50:24 +03:00
if ( V4L2_TYPE_IS_OUTPUT ( type ) )
2010-04-23 12:38:37 +04:00
return & m2m_ctx - > out_q_ctx ;
2011-01-12 12:50:24 +03:00
else
return & m2m_ctx - > cap_q_ctx ;
2010-04-23 12:38:37 +04:00
}
/**
2011-01-12 12:50:24 +03:00
* v4l2_m2m_get_vq ( ) - return vb2_queue for the given type
2010-04-23 12:38:37 +04:00
*/
2011-01-12 12:50:24 +03:00
struct vb2_queue * v4l2_m2m_get_vq ( struct v4l2_m2m_ctx * m2m_ctx ,
2010-04-23 12:38:37 +04:00
enum v4l2_buf_type type )
{
struct v4l2_m2m_queue_ctx * q_ctx ;
q_ctx = get_queue_ctx ( m2m_ctx , type ) ;
if ( ! q_ctx )
return NULL ;
return & q_ctx - > q ;
}
EXPORT_SYMBOL ( v4l2_m2m_get_vq ) ;
/**
* v4l2_m2m_next_buf ( ) - return next buffer from the list of ready buffers
*/
2011-01-12 12:50:24 +03:00
void * v4l2_m2m_next_buf ( struct v4l2_m2m_queue_ctx * q_ctx )
2010-04-23 12:38:37 +04:00
{
2011-01-12 12:50:24 +03:00
struct v4l2_m2m_buffer * b = NULL ;
2010-04-23 12:38:37 +04:00
unsigned long flags ;
2011-01-12 12:50:24 +03:00
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
2010-04-23 12:38:37 +04:00
2011-08-25 14:21:21 +04:00
if ( list_empty ( & q_ctx - > rdy_queue ) ) {
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
return NULL ;
}
2010-04-23 12:38:37 +04:00
2012-08-31 16:18:03 +04:00
b = list_first_entry ( & q_ctx - > rdy_queue , struct v4l2_m2m_buffer , list ) ;
2011-01-12 12:50:24 +03:00
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
return & b - > vb ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_next_buf ) ;
/**
* v4l2_m2m_buf_remove ( ) - take off a buffer from the list of ready buffers and
* return it
*/
2011-01-12 12:50:24 +03:00
void * v4l2_m2m_buf_remove ( struct v4l2_m2m_queue_ctx * q_ctx )
2010-04-23 12:38:37 +04:00
{
2011-01-12 12:50:24 +03:00
struct v4l2_m2m_buffer * b = NULL ;
2010-04-23 12:38:37 +04:00
unsigned long flags ;
2011-01-12 12:50:24 +03:00
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
2011-08-25 14:21:21 +04:00
if ( list_empty ( & q_ctx - > rdy_queue ) ) {
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
return NULL ;
2010-04-23 12:38:37 +04:00
}
2012-08-31 16:18:03 +04:00
b = list_first_entry ( & q_ctx - > rdy_queue , struct v4l2_m2m_buffer , list ) ;
2011-08-25 14:21:21 +04:00
list_del ( & b - > list ) ;
q_ctx - > num_rdy - - ;
2011-01-12 12:50:24 +03:00
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
2010-04-23 12:38:37 +04:00
2011-01-12 12:50:24 +03:00
return & b - > vb ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_buf_remove ) ;
/*
* Scheduling handlers
*/
/**
* v4l2_m2m_get_curr_priv ( ) - return driver private data for the currently
* running instance or NULL if no instance is running
*/
void * v4l2_m2m_get_curr_priv ( struct v4l2_m2m_dev * m2m_dev )
{
unsigned long flags ;
void * ret = NULL ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
if ( m2m_dev - > curr_ctx )
ret = m2m_dev - > curr_ctx - > priv ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
return ret ;
}
EXPORT_SYMBOL ( v4l2_m2m_get_curr_priv ) ;
/**
* v4l2_m2m_try_run ( ) - select next job to perform and run it if possible
*
* Get next transaction ( if present ) from the waiting jobs list and run it .
*/
static void v4l2_m2m_try_run ( struct v4l2_m2m_dev * m2m_dev )
{
unsigned long flags ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
if ( NULL ! = m2m_dev - > curr_ctx ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " Another instance is running, won't run now \n " ) ;
return ;
}
if ( list_empty ( & m2m_dev - > job_queue ) ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " No job pending \n " ) ;
return ;
}
2012-08-31 16:18:03 +04:00
m2m_dev - > curr_ctx = list_first_entry ( & m2m_dev - > job_queue ,
2010-04-23 12:38:37 +04:00
struct v4l2_m2m_ctx , queue ) ;
m2m_dev - > curr_ctx - > job_flags | = TRANS_RUNNING ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
m2m_dev - > m2m_ops - > device_run ( m2m_dev - > curr_ctx - > priv ) ;
}
/**
* v4l2_m2m_try_schedule ( ) - check whether an instance is ready to be added to
* the pending job queue and add it if so .
* @ m2m_ctx : m2m context assigned to the instance to be checked
*
* There are three basic requirements an instance has to meet to be able to run :
* 1 ) at least one source buffer has to be queued ,
* 2 ) at least one destination buffer has to be queued ,
* 3 ) streaming has to be on .
*
2013-06-03 11:23:48 +04:00
* If a queue is buffered ( for example a decoder hardware ringbuffer that has
* to be drained before doing streamoff ) , allow scheduling without v4l2 buffers
* on that queue .
*
2010-04-23 12:38:37 +04:00
* There may also be additional , custom requirements . In such case the driver
* should supply a custom callback ( job_ready in v4l2_m2m_ops ) that should
* return 1 if the instance is ready .
* An example of the above could be an instance that requires more than one
* src / dst buffer per transaction .
*/
static void v4l2_m2m_try_schedule ( struct v4l2_m2m_ctx * m2m_ctx )
{
struct v4l2_m2m_dev * m2m_dev ;
2013-05-24 03:41:48 +04:00
unsigned long flags_job , flags_out , flags_cap ;
2010-04-23 12:38:37 +04:00
m2m_dev = m2m_ctx - > m2m_dev ;
dprintk ( " Trying to schedule a job for m2m_ctx: %p \n " , m2m_ctx ) ;
if ( ! m2m_ctx - > out_q_ctx . q . streaming
| | ! m2m_ctx - > cap_q_ctx . q . streaming ) {
dprintk ( " Streaming needs to be on for both queues \n " ) ;
return ;
}
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags_job ) ;
if ( m2m_ctx - > job_flags & TRANS_QUEUED ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " On job queue already \n " ) ;
return ;
}
2013-05-24 03:41:48 +04:00
spin_lock_irqsave ( & m2m_ctx - > out_q_ctx . rdy_spinlock , flags_out ) ;
2013-06-03 11:23:48 +04:00
if ( list_empty ( & m2m_ctx - > out_q_ctx . rdy_queue )
& & ! m2m_ctx - > out_q_ctx . buffered ) {
2013-05-24 03:41:48 +04:00
spin_unlock_irqrestore ( & m2m_ctx - > out_q_ctx . rdy_spinlock ,
flags_out ) ;
2010-04-23 12:38:37 +04:00
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " No input buffers available \n " ) ;
return ;
}
2013-05-24 03:41:48 +04:00
spin_lock_irqsave ( & m2m_ctx - > cap_q_ctx . rdy_spinlock , flags_cap ) ;
2013-06-03 11:23:48 +04:00
if ( list_empty ( & m2m_ctx - > cap_q_ctx . rdy_queue )
& & ! m2m_ctx - > cap_q_ctx . buffered ) {
2013-05-24 03:41:48 +04:00
spin_unlock_irqrestore ( & m2m_ctx - > cap_q_ctx . rdy_spinlock ,
flags_cap ) ;
spin_unlock_irqrestore ( & m2m_ctx - > out_q_ctx . rdy_spinlock ,
flags_out ) ;
2010-04-23 12:38:37 +04:00
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " No output buffers available \n " ) ;
return ;
}
2013-05-24 03:41:48 +04:00
spin_unlock_irqrestore ( & m2m_ctx - > cap_q_ctx . rdy_spinlock , flags_cap ) ;
spin_unlock_irqrestore ( & m2m_ctx - > out_q_ctx . rdy_spinlock , flags_out ) ;
2010-04-23 12:38:37 +04:00
if ( m2m_dev - > m2m_ops - > job_ready
& & ( ! m2m_dev - > m2m_ops - > job_ready ( m2m_ctx - > priv ) ) ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " Driver not ready \n " ) ;
return ;
}
list_add_tail ( & m2m_ctx - > queue , & m2m_dev - > job_queue ) ;
m2m_ctx - > job_flags | = TRANS_QUEUED ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
v4l2_m2m_try_run ( m2m_dev ) ;
}
2013-08-13 09:58:07 +04:00
/**
* v4l2_m2m_cancel_job ( ) - cancel pending jobs for the context
*
* In case of streamoff or release called on any context ,
* 1 ] If the context is currently running , then abort job will be called
* 2 ] If the context is queued , then the context will be removed from
* the job_queue
*/
static void v4l2_m2m_cancel_job ( struct v4l2_m2m_ctx * m2m_ctx )
{
struct v4l2_m2m_dev * m2m_dev ;
unsigned long flags ;
m2m_dev = m2m_ctx - > m2m_dev ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
if ( m2m_ctx - > job_flags & TRANS_RUNNING ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
m2m_dev - > m2m_ops - > job_abort ( m2m_ctx - > priv ) ;
dprintk ( " m2m_ctx %p running, will wait to complete " , m2m_ctx ) ;
wait_event ( m2m_ctx - > finished ,
! ( m2m_ctx - > job_flags & TRANS_RUNNING ) ) ;
} else if ( m2m_ctx - > job_flags & TRANS_QUEUED ) {
list_del ( & m2m_ctx - > queue ) ;
m2m_ctx - > job_flags & = ~ ( TRANS_QUEUED | TRANS_RUNNING ) ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " m2m_ctx: %p had been on queue and was removed \n " ,
m2m_ctx ) ;
} else {
/* Do nothing, was not on queue/running */
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
}
}
2010-04-23 12:38:37 +04:00
/**
* v4l2_m2m_job_finish ( ) - inform the framework that a job has been finished
* and have it clean up
*
* Called by a driver to yield back the device after it has finished with it .
* Should be called as soon as possible after reaching a state which allows
* other instances to take control of the device .
*
* This function has to be called only after device_run ( ) callback has been
* called on the driver . To prevent recursion , it should not be called directly
* from the device_run ( ) callback though .
*/
void v4l2_m2m_job_finish ( struct v4l2_m2m_dev * m2m_dev ,
struct v4l2_m2m_ctx * m2m_ctx )
{
unsigned long flags ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
if ( ! m2m_dev - > curr_ctx | | m2m_dev - > curr_ctx ! = m2m_ctx ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " Called by an instance not currently running \n " ) ;
return ;
}
list_del ( & m2m_dev - > curr_ctx - > queue ) ;
m2m_dev - > curr_ctx - > job_flags & = ~ ( TRANS_QUEUED | TRANS_RUNNING ) ;
2011-01-12 12:50:24 +03:00
wake_up ( & m2m_dev - > curr_ctx - > finished ) ;
2010-04-23 12:38:37 +04:00
m2m_dev - > curr_ctx = NULL ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
/* This instance might have more buffers ready, but since we do not
* allow more than one job on the job_queue per instance , each has
* to be scheduled separately after the previous one finishes . */
v4l2_m2m_try_schedule ( m2m_ctx ) ;
v4l2_m2m_try_run ( m2m_dev ) ;
}
EXPORT_SYMBOL ( v4l2_m2m_job_finish ) ;
/**
* v4l2_m2m_reqbufs ( ) - multi - queue - aware REQBUFS multiplexer
*/
int v4l2_m2m_reqbufs ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_requestbuffers * reqbufs )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
vq = v4l2_m2m_get_vq ( m2m_ctx , reqbufs - > type ) ;
2011-01-12 12:50:24 +03:00
return vb2_reqbufs ( vq , reqbufs ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_reqbufs ) ;
/**
* v4l2_m2m_querybuf ( ) - multi - queue - aware QUERYBUF multiplexer
*
* See v4l2_m2m_mmap ( ) documentation for details .
*/
int v4l2_m2m_querybuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_buffer * buf )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
int ret = 0 ;
unsigned int i ;
2010-04-23 12:38:37 +04:00
vq = v4l2_m2m_get_vq ( m2m_ctx , buf - > type ) ;
2011-01-12 12:50:24 +03:00
ret = vb2_querybuf ( vq , buf ) ;
/* Adjust MMAP memory offsets for the CAPTURE queue */
if ( buf - > memory = = V4L2_MEMORY_MMAP & & ! V4L2_TYPE_IS_OUTPUT ( vq - > type ) ) {
if ( V4L2_TYPE_IS_MULTIPLANAR ( vq - > type ) ) {
for ( i = 0 ; i < buf - > length ; + + i )
buf - > m . planes [ i ] . m . mem_offset
+ = DST_QUEUE_OFF_BASE ;
} else {
buf - > m . offset + = DST_QUEUE_OFF_BASE ;
}
2010-04-23 12:38:37 +04:00
}
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_querybuf ) ;
/**
* v4l2_m2m_qbuf ( ) - enqueue a source or destination buffer , depending on
* the type
*/
int v4l2_m2m_qbuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_buffer * buf )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
int ret ;
vq = v4l2_m2m_get_vq ( m2m_ctx , buf - > type ) ;
2011-01-12 12:50:24 +03:00
ret = vb2_qbuf ( vq , buf ) ;
2010-04-23 12:38:37 +04:00
if ( ! ret )
v4l2_m2m_try_schedule ( m2m_ctx ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_qbuf ) ;
/**
* v4l2_m2m_dqbuf ( ) - dequeue a source or destination buffer , depending on
* the type
*/
int v4l2_m2m_dqbuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_buffer * buf )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
vq = v4l2_m2m_get_vq ( m2m_ctx , buf - > type ) ;
2011-01-12 12:50:24 +03:00
return vb2_dqbuf ( vq , buf , file - > f_flags & O_NONBLOCK ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_dqbuf ) ;
2013-05-21 11:16:28 +04:00
/**
* v4l2_m2m_create_bufs ( ) - create a source or destination buffer , depending
* on the type
*/
int v4l2_m2m_create_bufs ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_create_buffers * create )
{
struct vb2_queue * vq ;
vq = v4l2_m2m_get_vq ( m2m_ctx , create - > format . type ) ;
return vb2_create_bufs ( vq , create ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_create_bufs ) ;
2012-06-14 18:32:24 +04:00
/**
* v4l2_m2m_expbuf ( ) - export a source or destination buffer , depending on
* the type
*/
int v4l2_m2m_expbuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_exportbuffer * eb )
{
struct vb2_queue * vq ;
vq = v4l2_m2m_get_vq ( m2m_ctx , eb - > type ) ;
return vb2_expbuf ( vq , eb ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_expbuf ) ;
2010-04-23 12:38:37 +04:00
/**
* v4l2_m2m_streamon ( ) - turn on streaming for a video queue
*/
int v4l2_m2m_streamon ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
enum v4l2_buf_type type )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
int ret ;
vq = v4l2_m2m_get_vq ( m2m_ctx , type ) ;
2011-01-12 12:50:24 +03:00
ret = vb2_streamon ( vq , type ) ;
2010-04-23 12:38:37 +04:00
if ( ! ret )
v4l2_m2m_try_schedule ( m2m_ctx ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_streamon ) ;
/**
* v4l2_m2m_streamoff ( ) - turn off streaming for a video queue
*/
int v4l2_m2m_streamoff ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
enum v4l2_buf_type type )
{
2013-02-07 03:03:01 +04:00
struct v4l2_m2m_dev * m2m_dev ;
struct v4l2_m2m_queue_ctx * q_ctx ;
unsigned long flags_job , flags ;
int ret ;
2010-04-23 12:38:37 +04:00
2013-08-13 09:58:07 +04:00
/* wait until the current context is dequeued from job_queue */
v4l2_m2m_cancel_job ( m2m_ctx ) ;
2013-02-07 03:03:01 +04:00
q_ctx = get_queue_ctx ( m2m_ctx , type ) ;
ret = vb2_streamoff ( & q_ctx - > q , type ) ;
if ( ret )
return ret ;
m2m_dev = m2m_ctx - > m2m_dev ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags_job ) ;
/* We should not be scheduled anymore, since we're dropping a queue. */
2013-09-19 11:40:32 +04:00
if ( m2m_ctx - > job_flags & TRANS_QUEUED )
list_del ( & m2m_ctx - > queue ) ;
2013-02-07 03:03:01 +04:00
m2m_ctx - > job_flags = 0 ;
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
/* Drop queue, since streamoff returns device to the same state as after
* calling reqbufs . */
INIT_LIST_HEAD ( & q_ctx - > rdy_queue ) ;
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
if ( m2m_dev - > curr_ctx = = m2m_ctx ) {
m2m_dev - > curr_ctx = NULL ;
wake_up ( & m2m_ctx - > finished ) ;
}
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
return 0 ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_streamoff ) ;
/**
* v4l2_m2m_poll ( ) - poll replacement , for destination buffers only
*
* Call from the driver ' s poll ( ) function . Will poll both queues . If a buffer
* is available to dequeue ( with dqbuf ) from the source queue , this will
* indicate that a non - blocking write can be performed , while read will be
* returned in case of the destination queue .
*/
unsigned int v4l2_m2m_poll ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct poll_table_struct * wait )
{
2012-07-18 17:53:04 +04:00
struct video_device * vfd = video_devdata ( file ) ;
unsigned long req_events = poll_requested_events ( wait ) ;
2011-01-12 12:50:24 +03:00
struct vb2_queue * src_q , * dst_q ;
struct vb2_buffer * src_vb = NULL , * dst_vb = NULL ;
2010-04-23 12:38:37 +04:00
unsigned int rc = 0 ;
2011-01-12 12:50:24 +03:00
unsigned long flags ;
2010-04-23 12:38:37 +04:00
2012-07-18 17:53:04 +04:00
if ( test_bit ( V4L2_FL_USES_V4L2_FH , & vfd - > flags ) ) {
struct v4l2_fh * fh = file - > private_data ;
if ( v4l2_event_pending ( fh ) )
rc = POLLPRI ;
else if ( req_events & POLLPRI )
poll_wait ( file , & fh - > wait , wait ) ;
if ( ! ( req_events & ( POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM ) ) )
return rc ;
}
2010-04-23 12:38:37 +04:00
src_q = v4l2_m2m_get_src_vq ( m2m_ctx ) ;
dst_q = v4l2_m2m_get_dst_vq ( m2m_ctx ) ;
2011-01-12 12:50:24 +03:00
/*
* There has to be at least one buffer queued on each queued_list , which
* means either in driver already or waiting for driver to claim it
* and start processing .
*/
if ( ( ! src_q - > streaming | | list_empty ( & src_q - > queued_list ) )
& & ( ! dst_q - > streaming | | list_empty ( & dst_q - > queued_list ) ) ) {
2012-07-18 17:53:04 +04:00
rc | = POLLERR ;
2010-04-23 12:38:37 +04:00
goto end ;
}
2011-01-12 12:50:24 +03:00
if ( m2m_ctx - > m2m_dev - > m2m_ops - > unlock )
m2m_ctx - > m2m_dev - > m2m_ops - > unlock ( m2m_ctx - > priv ) ;
2013-05-21 06:47:30 +04:00
if ( list_empty ( & src_q - > done_list ) )
poll_wait ( file , & src_q - > done_wq , wait ) ;
if ( list_empty ( & dst_q - > done_list ) )
poll_wait ( file , & dst_q - > done_wq , wait ) ;
2011-01-12 12:50:24 +03:00
if ( m2m_ctx - > m2m_dev - > m2m_ops - > lock )
m2m_ctx - > m2m_dev - > m2m_ops - > lock ( m2m_ctx - > priv ) ;
spin_lock_irqsave ( & src_q - > done_lock , flags ) ;
if ( ! list_empty ( & src_q - > done_list ) )
src_vb = list_first_entry ( & src_q - > done_list , struct vb2_buffer ,
done_entry ) ;
if ( src_vb & & ( src_vb - > state = = VB2_BUF_STATE_DONE
| | src_vb - > state = = VB2_BUF_STATE_ERROR ) )
rc | = POLLOUT | POLLWRNORM ;
spin_unlock_irqrestore ( & src_q - > done_lock , flags ) ;
spin_lock_irqsave ( & dst_q - > done_lock , flags ) ;
if ( ! list_empty ( & dst_q - > done_list ) )
dst_vb = list_first_entry ( & dst_q - > done_list , struct vb2_buffer ,
done_entry ) ;
if ( dst_vb & & ( dst_vb - > state = = VB2_BUF_STATE_DONE
| | dst_vb - > state = = VB2_BUF_STATE_ERROR ) )
rc | = POLLIN | POLLRDNORM ;
spin_unlock_irqrestore ( & dst_q - > done_lock , flags ) ;
2010-04-23 12:38:37 +04:00
end :
return rc ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_poll ) ;
/**
* v4l2_m2m_mmap ( ) - source and destination queues - aware mmap multiplexer
*
* Call from driver ' s mmap ( ) function . Will handle mmap ( ) for both queues
* seamlessly for videobuffer , which will receive normal per - queue offsets and
* proper videobuf queue pointers . The differentiation is made outside videobuf
* by adding a predefined offset to buffers from one of the queues and
* subtracting it before passing it back to videobuf . Only drivers ( and
* thus applications ) receive modified offsets .
*/
int v4l2_m2m_mmap ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct vm_area_struct * vma )
{
unsigned long offset = vma - > vm_pgoff < < PAGE_SHIFT ;
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
if ( offset < DST_QUEUE_OFF_BASE ) {
vq = v4l2_m2m_get_src_vq ( m2m_ctx ) ;
} else {
vq = v4l2_m2m_get_dst_vq ( m2m_ctx ) ;
vma - > vm_pgoff - = ( DST_QUEUE_OFF_BASE > > PAGE_SHIFT ) ;
}
2011-01-12 12:50:24 +03:00
return vb2_mmap ( vq , vma ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL ( v4l2_m2m_mmap ) ;
/**
* v4l2_m2m_init ( ) - initialize per - driver m2m data
*
* Usually called from driver ' s probe ( ) function .
*/
2012-09-11 13:32:17 +04:00
struct v4l2_m2m_dev * v4l2_m2m_init ( const struct v4l2_m2m_ops * m2m_ops )
2010-04-23 12:38:37 +04:00
{
struct v4l2_m2m_dev * m2m_dev ;
2012-10-23 11:47:19 +04:00
if ( ! m2m_ops | | WARN_ON ( ! m2m_ops - > device_run ) | |
WARN_ON ( ! m2m_ops - > job_abort ) )
2010-04-23 12:38:37 +04:00
return ERR_PTR ( - EINVAL ) ;
m2m_dev = kzalloc ( sizeof * m2m_dev , GFP_KERNEL ) ;
if ( ! m2m_dev )
return ERR_PTR ( - ENOMEM ) ;
m2m_dev - > curr_ctx = NULL ;
m2m_dev - > m2m_ops = m2m_ops ;
INIT_LIST_HEAD ( & m2m_dev - > job_queue ) ;
spin_lock_init ( & m2m_dev - > job_spinlock ) ;
return m2m_dev ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_init ) ;
/**
* v4l2_m2m_release ( ) - cleans up and frees a m2m_dev structure
*
* Usually called from driver ' s remove ( ) function .
*/
void v4l2_m2m_release ( struct v4l2_m2m_dev * m2m_dev )
{
kfree ( m2m_dev ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_release ) ;
/**
* v4l2_m2m_ctx_init ( ) - allocate and initialize a m2m context
* @ priv - driver ' s instance private data
* @ m2m_dev - a previously initialized m2m_dev struct
* @ vq_init - a callback for queue type - specific initialization function to be
* used for initializing videobuf_queues
*
* Usually called from driver ' s open ( ) function .
*/
2011-01-12 12:50:24 +03:00
struct v4l2_m2m_ctx * v4l2_m2m_ctx_init ( struct v4l2_m2m_dev * m2m_dev ,
void * drv_priv ,
int ( * queue_init ) ( void * priv , struct vb2_queue * src_vq , struct vb2_queue * dst_vq ) )
2010-04-23 12:38:37 +04:00
{
struct v4l2_m2m_ctx * m2m_ctx ;
struct v4l2_m2m_queue_ctx * out_q_ctx , * cap_q_ctx ;
2011-01-12 12:50:24 +03:00
int ret ;
2010-04-23 12:38:37 +04:00
m2m_ctx = kzalloc ( sizeof * m2m_ctx , GFP_KERNEL ) ;
if ( ! m2m_ctx )
return ERR_PTR ( - ENOMEM ) ;
2011-01-12 12:50:24 +03:00
m2m_ctx - > priv = drv_priv ;
2010-04-23 12:38:37 +04:00
m2m_ctx - > m2m_dev = m2m_dev ;
2011-01-12 12:50:24 +03:00
init_waitqueue_head ( & m2m_ctx - > finished ) ;
2010-04-23 12:38:37 +04:00
2011-01-12 12:50:24 +03:00
out_q_ctx = & m2m_ctx - > out_q_ctx ;
cap_q_ctx = & m2m_ctx - > cap_q_ctx ;
2010-04-23 12:38:37 +04:00
INIT_LIST_HEAD ( & out_q_ctx - > rdy_queue ) ;
INIT_LIST_HEAD ( & cap_q_ctx - > rdy_queue ) ;
2011-01-12 12:50:24 +03:00
spin_lock_init ( & out_q_ctx - > rdy_spinlock ) ;
spin_lock_init ( & cap_q_ctx - > rdy_spinlock ) ;
2010-04-23 12:38:37 +04:00
INIT_LIST_HEAD ( & m2m_ctx - > queue ) ;
2011-01-12 12:50:24 +03:00
ret = queue_init ( drv_priv , & out_q_ctx - > q , & cap_q_ctx - > q ) ;
if ( ret )
goto err ;
2010-04-23 12:38:37 +04:00
return m2m_ctx ;
2011-01-12 12:50:24 +03:00
err :
kfree ( m2m_ctx ) ;
return ERR_PTR ( ret ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ctx_init ) ;
/**
* v4l2_m2m_ctx_release ( ) - release m2m context
*
* Usually called from driver ' s release ( ) function .
*/
void v4l2_m2m_ctx_release ( struct v4l2_m2m_ctx * m2m_ctx )
{
2013-08-13 09:58:07 +04:00
/* wait until the current context is dequeued from job_queue */
v4l2_m2m_cancel_job ( m2m_ctx ) ;
2010-04-23 12:38:37 +04:00
2011-01-12 12:50:24 +03:00
vb2_queue_release ( & m2m_ctx - > cap_q_ctx . q ) ;
vb2_queue_release ( & m2m_ctx - > out_q_ctx . q ) ;
2010-04-23 12:38:37 +04:00
kfree ( m2m_ctx ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ctx_release ) ;
/**
* v4l2_m2m_buf_queue ( ) - add a buffer to the proper ready buffers list .
*
* Call from buf_queue ( ) , videobuf_queue_ops callback .
*/
2011-01-12 12:50:24 +03:00
void v4l2_m2m_buf_queue ( struct v4l2_m2m_ctx * m2m_ctx , struct vb2_buffer * vb )
2010-04-23 12:38:37 +04:00
{
2011-01-12 12:50:24 +03:00
struct v4l2_m2m_buffer * b = container_of ( vb , struct v4l2_m2m_buffer , vb ) ;
2010-04-23 12:38:37 +04:00
struct v4l2_m2m_queue_ctx * q_ctx ;
2011-01-12 12:50:24 +03:00
unsigned long flags ;
2010-04-23 12:38:37 +04:00
2011-01-12 12:50:24 +03:00
q_ctx = get_queue_ctx ( m2m_ctx , vb - > vb2_queue - > type ) ;
2010-04-23 12:38:37 +04:00
if ( ! q_ctx )
return ;
2011-01-12 12:50:24 +03:00
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
list_add_tail ( & b - > list , & q_ctx - > rdy_queue ) ;
2010-04-23 12:38:37 +04:00
q_ctx - > num_rdy + + ;
2011-01-12 12:50:24 +03:00
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_buf_queue ) ;