2010-04-23 12:38:37 +04:00
/*
* Memory - to - memory device framework for Video for Linux 2 and videobuf .
*
* Helper functions for devices that use videobuf buffers for both their
* source and destination .
*
* Copyright ( c ) 2009 - 2010 Samsung Electronics Co . , Ltd .
2011-03-13 21:23:32 +03:00
* Pawel Osciak , < pawel @ osciak . com >
2010-04-23 12:38:37 +04:00
* Marek Szyprowski , < m . szyprowski @ samsung . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
*/
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/slab.h>
2015-09-22 16:30:29 +03:00
# include <media/videobuf2-v4l2.h>
2010-04-23 12:38:37 +04:00
# include <media/v4l2-mem2mem.h>
2012-07-18 17:53:04 +04:00
# include <media/v4l2-dev.h>
# include <media/v4l2-fh.h>
# include <media/v4l2-event.h>
2010-04-23 12:38:37 +04:00
MODULE_DESCRIPTION ( " Mem to mem device framework for videobuf " ) ;
2011-03-13 21:23:32 +03:00
MODULE_AUTHOR ( " Pawel Osciak, <pawel@osciak.com> " ) ;
2010-04-23 12:38:37 +04:00
MODULE_LICENSE ( " GPL " ) ;
static bool debug ;
module_param ( debug , bool , 0644 ) ;
# define dprintk(fmt, arg...) \
do { \
if ( debug ) \
printk ( KERN_DEBUG " %s: " fmt , __func__ , # # arg ) ; \
} while ( 0 )
/* Instance is already queued on the job_queue */
# define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
# define TRANS_RUNNING (1 << 1)
2013-09-20 10:26:18 +04:00
/* Instance is currently aborting */
# define TRANS_ABORT (1 << 2)
2010-04-23 12:38:37 +04:00
/* Offset base for buffers on the destination queue - used to distinguish
* between source and destination buffers when mmapping - they receive the same
* offsets but for different queues */
# define DST_QUEUE_OFF_BASE (1 << 30)
/**
* struct v4l2_m2m_dev - per - device context
* @ curr_ctx : currently running instance
* @ job_queue : instances queued to run
* @ job_spinlock : protects job_queue
* @ m2m_ops : driver callbacks
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx * curr_ctx ;
struct list_head job_queue ;
spinlock_t job_spinlock ;
2012-09-11 13:32:17 +04:00
const struct v4l2_m2m_ops * m2m_ops ;
2010-04-23 12:38:37 +04:00
} ;
static struct v4l2_m2m_queue_ctx * get_queue_ctx ( struct v4l2_m2m_ctx * m2m_ctx ,
enum v4l2_buf_type type )
{
2011-01-12 12:50:24 +03:00
if ( V4L2_TYPE_IS_OUTPUT ( type ) )
2010-04-23 12:38:37 +04:00
return & m2m_ctx - > out_q_ctx ;
2011-01-12 12:50:24 +03:00
else
return & m2m_ctx - > cap_q_ctx ;
2010-04-23 12:38:37 +04:00
}
2011-01-12 12:50:24 +03:00
struct vb2_queue * v4l2_m2m_get_vq ( struct v4l2_m2m_ctx * m2m_ctx ,
2010-04-23 12:38:37 +04:00
enum v4l2_buf_type type )
{
struct v4l2_m2m_queue_ctx * q_ctx ;
q_ctx = get_queue_ctx ( m2m_ctx , type ) ;
if ( ! q_ctx )
return NULL ;
return & q_ctx - > q ;
}
EXPORT_SYMBOL ( v4l2_m2m_get_vq ) ;
2011-01-12 12:50:24 +03:00
void * v4l2_m2m_next_buf ( struct v4l2_m2m_queue_ctx * q_ctx )
2010-04-23 12:38:37 +04:00
{
2015-03-11 18:57:50 +03:00
struct v4l2_m2m_buffer * b ;
2010-04-23 12:38:37 +04:00
unsigned long flags ;
2011-01-12 12:50:24 +03:00
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
2010-04-23 12:38:37 +04:00
2011-08-25 14:21:21 +04:00
if ( list_empty ( & q_ctx - > rdy_queue ) ) {
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
return NULL ;
}
2010-04-23 12:38:37 +04:00
2012-08-31 16:18:03 +04:00
b = list_first_entry ( & q_ctx - > rdy_queue , struct v4l2_m2m_buffer , list ) ;
2011-01-12 12:50:24 +03:00
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
return & b - > vb ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_next_buf ) ;
2011-01-12 12:50:24 +03:00
void * v4l2_m2m_buf_remove ( struct v4l2_m2m_queue_ctx * q_ctx )
2010-04-23 12:38:37 +04:00
{
2015-03-11 18:57:50 +03:00
struct v4l2_m2m_buffer * b ;
2010-04-23 12:38:37 +04:00
unsigned long flags ;
2011-01-12 12:50:24 +03:00
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
2011-08-25 14:21:21 +04:00
if ( list_empty ( & q_ctx - > rdy_queue ) ) {
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
return NULL ;
2010-04-23 12:38:37 +04:00
}
2012-08-31 16:18:03 +04:00
b = list_first_entry ( & q_ctx - > rdy_queue , struct v4l2_m2m_buffer , list ) ;
2011-08-25 14:21:21 +04:00
list_del ( & b - > list ) ;
q_ctx - > num_rdy - - ;
2011-01-12 12:50:24 +03:00
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
2010-04-23 12:38:37 +04:00
2011-01-12 12:50:24 +03:00
return & b - > vb ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_buf_remove ) ;
2017-06-15 19:31:42 +03:00
void v4l2_m2m_buf_remove_by_buf ( struct v4l2_m2m_queue_ctx * q_ctx ,
struct vb2_v4l2_buffer * vbuf )
{
struct v4l2_m2m_buffer * b ;
unsigned long flags ;
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
b = container_of ( vbuf , struct v4l2_m2m_buffer , vb ) ;
list_del ( & b - > list ) ;
q_ctx - > num_rdy - - ;
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_buf_remove_by_buf ) ;
struct vb2_v4l2_buffer *
v4l2_m2m_buf_remove_by_idx ( struct v4l2_m2m_queue_ctx * q_ctx , unsigned int idx )
{
struct v4l2_m2m_buffer * b , * tmp ;
struct vb2_v4l2_buffer * ret = NULL ;
unsigned long flags ;
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
list_for_each_entry_safe ( b , tmp , & q_ctx - > rdy_queue , list ) {
if ( b - > vb . vb2_buf . index = = idx ) {
list_del ( & b - > list ) ;
q_ctx - > num_rdy - - ;
ret = & b - > vb ;
break ;
}
}
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_buf_remove_by_idx ) ;
2010-04-23 12:38:37 +04:00
/*
* Scheduling handlers
*/
void * v4l2_m2m_get_curr_priv ( struct v4l2_m2m_dev * m2m_dev )
{
unsigned long flags ;
void * ret = NULL ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
if ( m2m_dev - > curr_ctx )
ret = m2m_dev - > curr_ctx - > priv ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
return ret ;
}
EXPORT_SYMBOL ( v4l2_m2m_get_curr_priv ) ;
/**
* v4l2_m2m_try_run ( ) - select next job to perform and run it if possible
2017-11-29 11:56:18 +03:00
* @ m2m_dev : per - device context
2010-04-23 12:38:37 +04:00
*
* Get next transaction ( if present ) from the waiting jobs list and run it .
*/
static void v4l2_m2m_try_run ( struct v4l2_m2m_dev * m2m_dev )
{
unsigned long flags ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
if ( NULL ! = m2m_dev - > curr_ctx ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " Another instance is running, won't run now \n " ) ;
return ;
}
if ( list_empty ( & m2m_dev - > job_queue ) ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " No job pending \n " ) ;
return ;
}
2012-08-31 16:18:03 +04:00
m2m_dev - > curr_ctx = list_first_entry ( & m2m_dev - > job_queue ,
2010-04-23 12:38:37 +04:00
struct v4l2_m2m_ctx , queue ) ;
m2m_dev - > curr_ctx - > job_flags | = TRANS_RUNNING ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
m2m_dev - > m2m_ops - > device_run ( m2m_dev - > curr_ctx - > priv ) ;
}
2014-07-22 16:36:04 +04:00
void v4l2_m2m_try_schedule ( struct v4l2_m2m_ctx * m2m_ctx )
2010-04-23 12:38:37 +04:00
{
struct v4l2_m2m_dev * m2m_dev ;
2013-05-24 03:41:48 +04:00
unsigned long flags_job , flags_out , flags_cap ;
2010-04-23 12:38:37 +04:00
m2m_dev = m2m_ctx - > m2m_dev ;
dprintk ( " Trying to schedule a job for m2m_ctx: %p \n " , m2m_ctx ) ;
if ( ! m2m_ctx - > out_q_ctx . q . streaming
| | ! m2m_ctx - > cap_q_ctx . q . streaming ) {
dprintk ( " Streaming needs to be on for both queues \n " ) ;
return ;
}
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags_job ) ;
2013-09-20 10:26:18 +04:00
/* If the context is aborted then don't schedule it */
if ( m2m_ctx - > job_flags & TRANS_ABORT ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " Aborted context \n " ) ;
return ;
}
2010-04-23 12:38:37 +04:00
if ( m2m_ctx - > job_flags & TRANS_QUEUED ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " On job queue already \n " ) ;
return ;
}
2013-05-24 03:41:48 +04:00
spin_lock_irqsave ( & m2m_ctx - > out_q_ctx . rdy_spinlock , flags_out ) ;
2013-06-03 11:23:48 +04:00
if ( list_empty ( & m2m_ctx - > out_q_ctx . rdy_queue )
& & ! m2m_ctx - > out_q_ctx . buffered ) {
2013-05-24 03:41:48 +04:00
spin_unlock_irqrestore ( & m2m_ctx - > out_q_ctx . rdy_spinlock ,
flags_out ) ;
2010-04-23 12:38:37 +04:00
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " No input buffers available \n " ) ;
return ;
}
2013-05-24 03:41:48 +04:00
spin_lock_irqsave ( & m2m_ctx - > cap_q_ctx . rdy_spinlock , flags_cap ) ;
2013-06-03 11:23:48 +04:00
if ( list_empty ( & m2m_ctx - > cap_q_ctx . rdy_queue )
& & ! m2m_ctx - > cap_q_ctx . buffered ) {
2013-05-24 03:41:48 +04:00
spin_unlock_irqrestore ( & m2m_ctx - > cap_q_ctx . rdy_spinlock ,
flags_cap ) ;
spin_unlock_irqrestore ( & m2m_ctx - > out_q_ctx . rdy_spinlock ,
flags_out ) ;
2010-04-23 12:38:37 +04:00
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " No output buffers available \n " ) ;
return ;
}
2013-05-24 03:41:48 +04:00
spin_unlock_irqrestore ( & m2m_ctx - > cap_q_ctx . rdy_spinlock , flags_cap ) ;
spin_unlock_irqrestore ( & m2m_ctx - > out_q_ctx . rdy_spinlock , flags_out ) ;
2010-04-23 12:38:37 +04:00
if ( m2m_dev - > m2m_ops - > job_ready
& & ( ! m2m_dev - > m2m_ops - > job_ready ( m2m_ctx - > priv ) ) ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
dprintk ( " Driver not ready \n " ) ;
return ;
}
list_add_tail ( & m2m_ctx - > queue , & m2m_dev - > job_queue ) ;
m2m_ctx - > job_flags | = TRANS_QUEUED ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
v4l2_m2m_try_run ( m2m_dev ) ;
}
2014-07-22 16:36:04 +04:00
EXPORT_SYMBOL_GPL ( v4l2_m2m_try_schedule ) ;
2010-04-23 12:38:37 +04:00
2013-08-13 09:58:07 +04:00
/**
* v4l2_m2m_cancel_job ( ) - cancel pending jobs for the context
2017-11-29 11:56:18 +03:00
* @ m2m_ctx : m2m context with jobs to be canceled
2013-08-13 09:58:07 +04:00
*
* In case of streamoff or release called on any context ,
* 1 ] If the context is currently running , then abort job will be called
* 2 ] If the context is queued , then the context will be removed from
* the job_queue
*/
static void v4l2_m2m_cancel_job ( struct v4l2_m2m_ctx * m2m_ctx )
{
struct v4l2_m2m_dev * m2m_dev ;
unsigned long flags ;
m2m_dev = m2m_ctx - > m2m_dev ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
2013-09-20 10:26:18 +04:00
m2m_ctx - > job_flags | = TRANS_ABORT ;
2013-08-13 09:58:07 +04:00
if ( m2m_ctx - > job_flags & TRANS_RUNNING ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
m2m_dev - > m2m_ops - > job_abort ( m2m_ctx - > priv ) ;
dprintk ( " m2m_ctx %p running, will wait to complete " , m2m_ctx ) ;
wait_event ( m2m_ctx - > finished ,
! ( m2m_ctx - > job_flags & TRANS_RUNNING ) ) ;
} else if ( m2m_ctx - > job_flags & TRANS_QUEUED ) {
list_del ( & m2m_ctx - > queue ) ;
m2m_ctx - > job_flags & = ~ ( TRANS_QUEUED | TRANS_RUNNING ) ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " m2m_ctx: %p had been on queue and was removed \n " ,
m2m_ctx ) ;
} else {
/* Do nothing, was not on queue/running */
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
}
}
2010-04-23 12:38:37 +04:00
void v4l2_m2m_job_finish ( struct v4l2_m2m_dev * m2m_dev ,
struct v4l2_m2m_ctx * m2m_ctx )
{
unsigned long flags ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags ) ;
if ( ! m2m_dev - > curr_ctx | | m2m_dev - > curr_ctx ! = m2m_ctx ) {
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
dprintk ( " Called by an instance not currently running \n " ) ;
return ;
}
list_del ( & m2m_dev - > curr_ctx - > queue ) ;
m2m_dev - > curr_ctx - > job_flags & = ~ ( TRANS_QUEUED | TRANS_RUNNING ) ;
2011-01-12 12:50:24 +03:00
wake_up ( & m2m_dev - > curr_ctx - > finished ) ;
2010-04-23 12:38:37 +04:00
m2m_dev - > curr_ctx = NULL ;
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags ) ;
/* This instance might have more buffers ready, but since we do not
* allow more than one job on the job_queue per instance , each has
* to be scheduled separately after the previous one finishes . */
v4l2_m2m_try_schedule ( m2m_ctx ) ;
v4l2_m2m_try_run ( m2m_dev ) ;
}
EXPORT_SYMBOL ( v4l2_m2m_job_finish ) ;
int v4l2_m2m_reqbufs ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_requestbuffers * reqbufs )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2015-07-10 16:49:25 +03:00
int ret ;
2010-04-23 12:38:37 +04:00
vq = v4l2_m2m_get_vq ( m2m_ctx , reqbufs - > type ) ;
2015-07-10 16:49:25 +03:00
ret = vb2_reqbufs ( vq , reqbufs ) ;
/* If count == 0, then the owner has released all buffers and he
is no longer owner of the queue . Otherwise we have an owner . */
if ( ret = = 0 )
vq - > owner = reqbufs - > count ? file - > private_data : NULL ;
return ret ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_reqbufs ) ;
int v4l2_m2m_querybuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_buffer * buf )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
int ret = 0 ;
unsigned int i ;
2010-04-23 12:38:37 +04:00
vq = v4l2_m2m_get_vq ( m2m_ctx , buf - > type ) ;
2011-01-12 12:50:24 +03:00
ret = vb2_querybuf ( vq , buf ) ;
/* Adjust MMAP memory offsets for the CAPTURE queue */
if ( buf - > memory = = V4L2_MEMORY_MMAP & & ! V4L2_TYPE_IS_OUTPUT ( vq - > type ) ) {
if ( V4L2_TYPE_IS_MULTIPLANAR ( vq - > type ) ) {
for ( i = 0 ; i < buf - > length ; + + i )
buf - > m . planes [ i ] . m . mem_offset
+ = DST_QUEUE_OFF_BASE ;
} else {
buf - > m . offset + = DST_QUEUE_OFF_BASE ;
}
2010-04-23 12:38:37 +04:00
}
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_querybuf ) ;
int v4l2_m2m_qbuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_buffer * buf )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
int ret ;
vq = v4l2_m2m_get_vq ( m2m_ctx , buf - > type ) ;
2011-01-12 12:50:24 +03:00
ret = vb2_qbuf ( vq , buf ) ;
2010-04-23 12:38:37 +04:00
if ( ! ret )
v4l2_m2m_try_schedule ( m2m_ctx ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_qbuf ) ;
int v4l2_m2m_dqbuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_buffer * buf )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
vq = v4l2_m2m_get_vq ( m2m_ctx , buf - > type ) ;
2011-01-12 12:50:24 +03:00
return vb2_dqbuf ( vq , buf , file - > f_flags & O_NONBLOCK ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_dqbuf ) ;
2015-06-05 17:28:50 +03:00
int v4l2_m2m_prepare_buf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_buffer * buf )
{
struct vb2_queue * vq ;
int ret ;
vq = v4l2_m2m_get_vq ( m2m_ctx , buf - > type ) ;
ret = vb2_prepare_buf ( vq , buf ) ;
if ( ! ret )
v4l2_m2m_try_schedule ( m2m_ctx ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_prepare_buf ) ;
2013-05-21 11:16:28 +04:00
int v4l2_m2m_create_bufs ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_create_buffers * create )
{
struct vb2_queue * vq ;
vq = v4l2_m2m_get_vq ( m2m_ctx , create - > format . type ) ;
return vb2_create_bufs ( vq , create ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_create_bufs ) ;
2012-06-14 18:32:24 +04:00
int v4l2_m2m_expbuf ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct v4l2_exportbuffer * eb )
{
struct vb2_queue * vq ;
vq = v4l2_m2m_get_vq ( m2m_ctx , eb - > type ) ;
return vb2_expbuf ( vq , eb ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_expbuf ) ;
2016-09-08 16:16:27 +03:00
2010-04-23 12:38:37 +04:00
int v4l2_m2m_streamon ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
enum v4l2_buf_type type )
{
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
int ret ;
vq = v4l2_m2m_get_vq ( m2m_ctx , type ) ;
2011-01-12 12:50:24 +03:00
ret = vb2_streamon ( vq , type ) ;
2010-04-23 12:38:37 +04:00
if ( ! ret )
v4l2_m2m_try_schedule ( m2m_ctx ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_streamon ) ;
int v4l2_m2m_streamoff ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
enum v4l2_buf_type type )
{
2013-02-07 03:03:01 +04:00
struct v4l2_m2m_dev * m2m_dev ;
struct v4l2_m2m_queue_ctx * q_ctx ;
unsigned long flags_job , flags ;
int ret ;
2010-04-23 12:38:37 +04:00
2013-08-13 09:58:07 +04:00
/* wait until the current context is dequeued from job_queue */
v4l2_m2m_cancel_job ( m2m_ctx ) ;
2013-02-07 03:03:01 +04:00
q_ctx = get_queue_ctx ( m2m_ctx , type ) ;
ret = vb2_streamoff ( & q_ctx - > q , type ) ;
if ( ret )
return ret ;
m2m_dev = m2m_ctx - > m2m_dev ;
spin_lock_irqsave ( & m2m_dev - > job_spinlock , flags_job ) ;
/* We should not be scheduled anymore, since we're dropping a queue. */
2013-09-19 11:40:32 +04:00
if ( m2m_ctx - > job_flags & TRANS_QUEUED )
list_del ( & m2m_ctx - > queue ) ;
2013-02-07 03:03:01 +04:00
m2m_ctx - > job_flags = 0 ;
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
/* Drop queue, since streamoff returns device to the same state as after
* calling reqbufs . */
INIT_LIST_HEAD ( & q_ctx - > rdy_queue ) ;
2013-09-19 11:53:21 +04:00
q_ctx - > num_rdy = 0 ;
2013-02-07 03:03:01 +04:00
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
if ( m2m_dev - > curr_ctx = = m2m_ctx ) {
m2m_dev - > curr_ctx = NULL ;
wake_up ( & m2m_ctx - > finished ) ;
}
spin_unlock_irqrestore ( & m2m_dev - > job_spinlock , flags_job ) ;
return 0 ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_streamoff ) ;
2017-07-03 10:02:56 +03:00
__poll_t v4l2_m2m_poll ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
2010-04-23 12:38:37 +04:00
struct poll_table_struct * wait )
{
2012-07-18 17:53:04 +04:00
struct video_device * vfd = video_devdata ( file ) ;
2017-07-03 10:14:15 +03:00
__poll_t req_events = poll_requested_events ( wait ) ;
2011-01-12 12:50:24 +03:00
struct vb2_queue * src_q , * dst_q ;
struct vb2_buffer * src_vb = NULL , * dst_vb = NULL ;
2017-07-03 10:02:56 +03:00
__poll_t rc = 0 ;
2011-01-12 12:50:24 +03:00
unsigned long flags ;
2010-04-23 12:38:37 +04:00
2012-07-18 17:53:04 +04:00
if ( test_bit ( V4L2_FL_USES_V4L2_FH , & vfd - > flags ) ) {
struct v4l2_fh * fh = file - > private_data ;
if ( v4l2_event_pending ( fh ) )
rc = POLLPRI ;
else if ( req_events & POLLPRI )
poll_wait ( file , & fh - > wait , wait ) ;
if ( ! ( req_events & ( POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM ) ) )
return rc ;
}
2010-04-23 12:38:37 +04:00
src_q = v4l2_m2m_get_src_vq ( m2m_ctx ) ;
dst_q = v4l2_m2m_get_dst_vq ( m2m_ctx ) ;
2011-01-12 12:50:24 +03:00
/*
* There has to be at least one buffer queued on each queued_list , which
* means either in driver already or waiting for driver to claim it
* and start processing .
*/
if ( ( ! src_q - > streaming | | list_empty ( & src_q - > queued_list ) )
& & ( ! dst_q - > streaming | | list_empty ( & dst_q - > queued_list ) ) ) {
2012-07-18 17:53:04 +04:00
rc | = POLLERR ;
2010-04-23 12:38:37 +04:00
goto end ;
}
2015-08-17 13:13:53 +03:00
spin_lock_irqsave ( & src_q - > done_lock , flags ) ;
2013-05-21 06:47:30 +04:00
if ( list_empty ( & src_q - > done_list ) )
poll_wait ( file , & src_q - > done_wq , wait ) ;
2015-08-17 13:13:53 +03:00
spin_unlock_irqrestore ( & src_q - > done_lock , flags ) ;
spin_lock_irqsave ( & dst_q - > done_lock , flags ) ;
2015-05-04 13:51:06 +03:00
if ( list_empty ( & dst_q - > done_list ) ) {
/*
* If the last buffer was dequeued from the capture queue ,
* return immediately . DQBUF will return - EPIPE .
*/
2015-08-17 13:13:53 +03:00
if ( dst_q - > last_buffer_dequeued ) {
spin_unlock_irqrestore ( & dst_q - > done_lock , flags ) ;
2015-05-04 13:51:06 +03:00
return rc | POLLIN | POLLRDNORM ;
2015-08-17 13:13:53 +03:00
}
2015-05-04 13:51:06 +03:00
2013-05-21 06:47:30 +04:00
poll_wait ( file , & dst_q - > done_wq , wait ) ;
2015-05-04 13:51:06 +03:00
}
2015-08-17 13:13:53 +03:00
spin_unlock_irqrestore ( & dst_q - > done_lock , flags ) ;
2011-01-12 12:50:24 +03:00
spin_lock_irqsave ( & src_q - > done_lock , flags ) ;
if ( ! list_empty ( & src_q - > done_list ) )
src_vb = list_first_entry ( & src_q - > done_list , struct vb2_buffer ,
done_entry ) ;
if ( src_vb & & ( src_vb - > state = = VB2_BUF_STATE_DONE
| | src_vb - > state = = VB2_BUF_STATE_ERROR ) )
rc | = POLLOUT | POLLWRNORM ;
spin_unlock_irqrestore ( & src_q - > done_lock , flags ) ;
spin_lock_irqsave ( & dst_q - > done_lock , flags ) ;
if ( ! list_empty ( & dst_q - > done_list ) )
dst_vb = list_first_entry ( & dst_q - > done_list , struct vb2_buffer ,
done_entry ) ;
if ( dst_vb & & ( dst_vb - > state = = VB2_BUF_STATE_DONE
| | dst_vb - > state = = VB2_BUF_STATE_ERROR ) )
rc | = POLLIN | POLLRDNORM ;
spin_unlock_irqrestore ( & dst_q - > done_lock , flags ) ;
2010-04-23 12:38:37 +04:00
end :
return rc ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_poll ) ;
int v4l2_m2m_mmap ( struct file * file , struct v4l2_m2m_ctx * m2m_ctx ,
struct vm_area_struct * vma )
{
unsigned long offset = vma - > vm_pgoff < < PAGE_SHIFT ;
2011-01-12 12:50:24 +03:00
struct vb2_queue * vq ;
2010-04-23 12:38:37 +04:00
if ( offset < DST_QUEUE_OFF_BASE ) {
vq = v4l2_m2m_get_src_vq ( m2m_ctx ) ;
} else {
vq = v4l2_m2m_get_dst_vq ( m2m_ctx ) ;
vma - > vm_pgoff - = ( DST_QUEUE_OFF_BASE > > PAGE_SHIFT ) ;
}
2011-01-12 12:50:24 +03:00
return vb2_mmap ( vq , vma ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL ( v4l2_m2m_mmap ) ;
2012-09-11 13:32:17 +04:00
struct v4l2_m2m_dev * v4l2_m2m_init ( const struct v4l2_m2m_ops * m2m_ops )
2010-04-23 12:38:37 +04:00
{
struct v4l2_m2m_dev * m2m_dev ;
2012-10-23 11:47:19 +04:00
if ( ! m2m_ops | | WARN_ON ( ! m2m_ops - > device_run ) | |
WARN_ON ( ! m2m_ops - > job_abort ) )
2010-04-23 12:38:37 +04:00
return ERR_PTR ( - EINVAL ) ;
m2m_dev = kzalloc ( sizeof * m2m_dev , GFP_KERNEL ) ;
if ( ! m2m_dev )
return ERR_PTR ( - ENOMEM ) ;
m2m_dev - > curr_ctx = NULL ;
m2m_dev - > m2m_ops = m2m_ops ;
INIT_LIST_HEAD ( & m2m_dev - > job_queue ) ;
spin_lock_init ( & m2m_dev - > job_spinlock ) ;
return m2m_dev ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_init ) ;
void v4l2_m2m_release ( struct v4l2_m2m_dev * m2m_dev )
{
kfree ( m2m_dev ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_release ) ;
2011-01-12 12:50:24 +03:00
struct v4l2_m2m_ctx * v4l2_m2m_ctx_init ( struct v4l2_m2m_dev * m2m_dev ,
void * drv_priv ,
int ( * queue_init ) ( void * priv , struct vb2_queue * src_vq , struct vb2_queue * dst_vq ) )
2010-04-23 12:38:37 +04:00
{
struct v4l2_m2m_ctx * m2m_ctx ;
struct v4l2_m2m_queue_ctx * out_q_ctx , * cap_q_ctx ;
2011-01-12 12:50:24 +03:00
int ret ;
2010-04-23 12:38:37 +04:00
m2m_ctx = kzalloc ( sizeof * m2m_ctx , GFP_KERNEL ) ;
if ( ! m2m_ctx )
return ERR_PTR ( - ENOMEM ) ;
2011-01-12 12:50:24 +03:00
m2m_ctx - > priv = drv_priv ;
2010-04-23 12:38:37 +04:00
m2m_ctx - > m2m_dev = m2m_dev ;
2011-01-12 12:50:24 +03:00
init_waitqueue_head ( & m2m_ctx - > finished ) ;
2010-04-23 12:38:37 +04:00
2011-01-12 12:50:24 +03:00
out_q_ctx = & m2m_ctx - > out_q_ctx ;
cap_q_ctx = & m2m_ctx - > cap_q_ctx ;
2010-04-23 12:38:37 +04:00
INIT_LIST_HEAD ( & out_q_ctx - > rdy_queue ) ;
INIT_LIST_HEAD ( & cap_q_ctx - > rdy_queue ) ;
2011-01-12 12:50:24 +03:00
spin_lock_init ( & out_q_ctx - > rdy_spinlock ) ;
spin_lock_init ( & cap_q_ctx - > rdy_spinlock ) ;
2010-04-23 12:38:37 +04:00
INIT_LIST_HEAD ( & m2m_ctx - > queue ) ;
2011-01-12 12:50:24 +03:00
ret = queue_init ( drv_priv , & out_q_ctx - > q , & cap_q_ctx - > q ) ;
if ( ret )
goto err ;
2013-09-15 01:39:04 +04:00
/*
* If both queues use same mutex assign it as the common buffer
* queues lock to the m2m context . This lock is used in the
* v4l2_m2m_ioctl_ * helpers .
*/
if ( out_q_ctx - > q . lock = = cap_q_ctx - > q . lock )
m2m_ctx - > q_lock = out_q_ctx - > q . lock ;
2010-04-23 12:38:37 +04:00
return m2m_ctx ;
2011-01-12 12:50:24 +03:00
err :
kfree ( m2m_ctx ) ;
return ERR_PTR ( ret ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ctx_init ) ;
void v4l2_m2m_ctx_release ( struct v4l2_m2m_ctx * m2m_ctx )
{
2013-08-13 09:58:07 +04:00
/* wait until the current context is dequeued from job_queue */
v4l2_m2m_cancel_job ( m2m_ctx ) ;
2010-04-23 12:38:37 +04:00
2011-01-12 12:50:24 +03:00
vb2_queue_release ( & m2m_ctx - > cap_q_ctx . q ) ;
vb2_queue_release ( & m2m_ctx - > out_q_ctx . q ) ;
2010-04-23 12:38:37 +04:00
kfree ( m2m_ctx ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ctx_release ) ;
[media] media: videobuf2: Restructure vb2_buffer
Remove v4l2 stuff - v4l2_buf, v4l2_plane - from struct vb2_buffer.
Add new member variables - bytesused, length, offset, userptr, fd,
data_offset - to struct vb2_plane in order to cover all information
of v4l2_plane.
struct vb2_plane {
<snip>
unsigned int bytesused;
unsigned int length;
union {
unsigned int offset;
unsigned long userptr;
int fd;
} m;
unsigned int data_offset;
}
Replace v4l2_buf with new member variables - index, type, memory - which
are common fields for buffer management.
struct vb2_buffer {
<snip>
unsigned int index;
unsigned int type;
unsigned int memory;
unsigned int num_planes;
struct vb2_plane planes[VIDEO_MAX_PLANES];
<snip>
};
v4l2 specific fields - flags, field, timestamp, timecode,
sequence - are moved to vb2_v4l2_buffer in videobuf2-v4l2.c
struct vb2_v4l2_buffer {
struct vb2_buffer vb2_buf;
__u32 flags;
__u32 field;
struct timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
};
Signed-off-by: Junghak Sung <jh1009.sung@samsung.com>
Signed-off-by: Geunyoung Kim <nenggun.kim@samsung.com>
Acked-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Acked-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
2015-09-22 16:30:30 +03:00
void v4l2_m2m_buf_queue ( struct v4l2_m2m_ctx * m2m_ctx ,
struct vb2_v4l2_buffer * vbuf )
2010-04-23 12:38:37 +04:00
{
[media] media: videobuf2: Restructure vb2_buffer
Remove v4l2 stuff - v4l2_buf, v4l2_plane - from struct vb2_buffer.
Add new member variables - bytesused, length, offset, userptr, fd,
data_offset - to struct vb2_plane in order to cover all information
of v4l2_plane.
struct vb2_plane {
<snip>
unsigned int bytesused;
unsigned int length;
union {
unsigned int offset;
unsigned long userptr;
int fd;
} m;
unsigned int data_offset;
}
Replace v4l2_buf with new member variables - index, type, memory - which
are common fields for buffer management.
struct vb2_buffer {
<snip>
unsigned int index;
unsigned int type;
unsigned int memory;
unsigned int num_planes;
struct vb2_plane planes[VIDEO_MAX_PLANES];
<snip>
};
v4l2 specific fields - flags, field, timestamp, timecode,
sequence - are moved to vb2_v4l2_buffer in videobuf2-v4l2.c
struct vb2_v4l2_buffer {
struct vb2_buffer vb2_buf;
__u32 flags;
__u32 field;
struct timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
};
Signed-off-by: Junghak Sung <jh1009.sung@samsung.com>
Signed-off-by: Geunyoung Kim <nenggun.kim@samsung.com>
Acked-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Acked-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
2015-09-22 16:30:30 +03:00
struct v4l2_m2m_buffer * b = container_of ( vbuf ,
struct v4l2_m2m_buffer , vb ) ;
2010-04-23 12:38:37 +04:00
struct v4l2_m2m_queue_ctx * q_ctx ;
2011-01-12 12:50:24 +03:00
unsigned long flags ;
2010-04-23 12:38:37 +04:00
[media] media: videobuf2: Restructure vb2_buffer
Remove v4l2 stuff - v4l2_buf, v4l2_plane - from struct vb2_buffer.
Add new member variables - bytesused, length, offset, userptr, fd,
data_offset - to struct vb2_plane in order to cover all information
of v4l2_plane.
struct vb2_plane {
<snip>
unsigned int bytesused;
unsigned int length;
union {
unsigned int offset;
unsigned long userptr;
int fd;
} m;
unsigned int data_offset;
}
Replace v4l2_buf with new member variables - index, type, memory - which
are common fields for buffer management.
struct vb2_buffer {
<snip>
unsigned int index;
unsigned int type;
unsigned int memory;
unsigned int num_planes;
struct vb2_plane planes[VIDEO_MAX_PLANES];
<snip>
};
v4l2 specific fields - flags, field, timestamp, timecode,
sequence - are moved to vb2_v4l2_buffer in videobuf2-v4l2.c
struct vb2_v4l2_buffer {
struct vb2_buffer vb2_buf;
__u32 flags;
__u32 field;
struct timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
};
Signed-off-by: Junghak Sung <jh1009.sung@samsung.com>
Signed-off-by: Geunyoung Kim <nenggun.kim@samsung.com>
Acked-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Acked-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
2015-09-22 16:30:30 +03:00
q_ctx = get_queue_ctx ( m2m_ctx , vbuf - > vb2_buf . vb2_queue - > type ) ;
2010-04-23 12:38:37 +04:00
if ( ! q_ctx )
return ;
2011-01-12 12:50:24 +03:00
spin_lock_irqsave ( & q_ctx - > rdy_spinlock , flags ) ;
list_add_tail ( & b - > list , & q_ctx - > rdy_queue ) ;
2010-04-23 12:38:37 +04:00
q_ctx - > num_rdy + + ;
2011-01-12 12:50:24 +03:00
spin_unlock_irqrestore ( & q_ctx - > rdy_spinlock , flags ) ;
2010-04-23 12:38:37 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_buf_queue ) ;
2013-09-15 01:39:04 +04:00
/* Videobuf2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs ( struct file * file , void * priv ,
struct v4l2_requestbuffers * rb )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_reqbufs ( file , fh - > m2m_ctx , rb ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_reqbufs ) ;
int v4l2_m2m_ioctl_create_bufs ( struct file * file , void * priv ,
struct v4l2_create_buffers * create )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_create_bufs ( file , fh - > m2m_ctx , create ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_create_bufs ) ;
int v4l2_m2m_ioctl_querybuf ( struct file * file , void * priv ,
struct v4l2_buffer * buf )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_querybuf ( file , fh - > m2m_ctx , buf ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_querybuf ) ;
int v4l2_m2m_ioctl_qbuf ( struct file * file , void * priv ,
struct v4l2_buffer * buf )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_qbuf ( file , fh - > m2m_ctx , buf ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_qbuf ) ;
int v4l2_m2m_ioctl_dqbuf ( struct file * file , void * priv ,
struct v4l2_buffer * buf )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_dqbuf ( file , fh - > m2m_ctx , buf ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_dqbuf ) ;
2015-06-05 17:28:50 +03:00
int v4l2_m2m_ioctl_prepare_buf ( struct file * file , void * priv ,
struct v4l2_buffer * buf )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_prepare_buf ( file , fh - > m2m_ctx , buf ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_prepare_buf ) ;
2013-09-15 01:39:04 +04:00
int v4l2_m2m_ioctl_expbuf ( struct file * file , void * priv ,
struct v4l2_exportbuffer * eb )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_expbuf ( file , fh - > m2m_ctx , eb ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_expbuf ) ;
int v4l2_m2m_ioctl_streamon ( struct file * file , void * priv ,
enum v4l2_buf_type type )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_streamon ( file , fh - > m2m_ctx , type ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_streamon ) ;
int v4l2_m2m_ioctl_streamoff ( struct file * file , void * priv ,
enum v4l2_buf_type type )
{
struct v4l2_fh * fh = file - > private_data ;
return v4l2_m2m_streamoff ( file , fh - > m2m_ctx , type ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_ioctl_streamoff ) ;
/*
* v4l2_file_operations helpers . It is assumed here same lock is used
* for the output and the capture buffer queue .
*/
int v4l2_m2m_fop_mmap ( struct file * file , struct vm_area_struct * vma )
{
struct v4l2_fh * fh = file - > private_data ;
2015-07-20 10:58:24 +03:00
return v4l2_m2m_mmap ( file , fh - > m2m_ctx , vma ) ;
2013-09-15 01:39:04 +04:00
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_fop_mmap ) ;
2017-07-03 10:02:56 +03:00
__poll_t v4l2_m2m_fop_poll ( struct file * file , poll_table * wait )
2013-09-15 01:39:04 +04:00
{
struct v4l2_fh * fh = file - > private_data ;
struct v4l2_m2m_ctx * m2m_ctx = fh - > m2m_ctx ;
2017-07-03 10:02:56 +03:00
__poll_t ret ;
2013-09-15 01:39:04 +04:00
if ( m2m_ctx - > q_lock )
mutex_lock ( m2m_ctx - > q_lock ) ;
ret = v4l2_m2m_poll ( file , m2m_ctx , wait ) ;
if ( m2m_ctx - > q_lock )
mutex_unlock ( m2m_ctx - > q_lock ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( v4l2_m2m_fop_poll ) ;