2010-05-02 20:57:41 +02:00
# ifndef _UVC_QUEUE_H_
# define _UVC_QUEUE_H_
# ifdef __KERNEL__
# include <linux/kernel.h>
# include <linux/poll.h>
# include <linux/videodev2.h>
2013-03-28 15:11:52 +05:30
# include <media/videobuf2-core.h>
2010-05-02 20:57:41 +02:00
/* Maximum frame size in bytes, for sanity checking. */
# define UVC_MAX_FRAME_SIZE (16*1024*1024)
/* Maximum number of video buffers. */
# define UVC_MAX_VIDEO_BUFFERS 32
/* ------------------------------------------------------------------------
* Structures .
*/
enum uvc_buffer_state {
UVC_BUF_STATE_IDLE = 0 ,
UVC_BUF_STATE_QUEUED = 1 ,
UVC_BUF_STATE_ACTIVE = 2 ,
UVC_BUF_STATE_DONE = 3 ,
UVC_BUF_STATE_ERROR = 4 ,
} ;
struct uvc_buffer {
2013-03-28 15:11:52 +05:30
struct vb2_buffer buf ;
2010-05-02 20:57:41 +02:00
struct list_head queue ;
2013-03-28 15:11:52 +05:30
2010-05-02 20:57:41 +02:00
enum uvc_buffer_state state ;
2013-03-28 15:11:52 +05:30
void * mem ;
unsigned int length ;
unsigned int bytesused ;
2010-05-02 20:57:41 +02:00
} ;
2013-03-28 15:11:52 +05:30
# define UVC_QUEUE_DISCONNECTED (1 << 0)
# define UVC_QUEUE_DROP_INCOMPLETE (1 << 1)
# define UVC_QUEUE_PAUSED (1 << 2)
2010-05-02 20:57:41 +02:00
struct uvc_video_queue {
2013-03-28 15:11:52 +05:30
struct vb2_queue queue ;
2010-05-02 20:57:41 +02:00
unsigned int flags ;
__u32 sequence ;
unsigned int buf_used ;
2013-03-28 15:11:52 +05:30
spinlock_t irqlock ; /* Protects flags and irqqueue */
2010-05-02 20:57:41 +02:00
struct list_head irqqueue ;
} ;
static inline int uvc_queue_streaming ( struct uvc_video_queue * queue )
{
2013-03-28 15:11:52 +05:30
return vb2_is_streaming ( & queue - > queue ) ;
2010-05-02 20:57:41 +02:00
}
2015-02-17 05:44:06 -03:00
int uvcg_queue_init ( struct uvc_video_queue * queue , enum v4l2_buf_type type ,
struct mutex * lock ) ;
2014-09-09 02:02:09 +03:00
void uvcg_free_buffers ( struct uvc_video_queue * queue ) ;
int uvcg_alloc_buffers ( struct uvc_video_queue * queue ,
struct v4l2_requestbuffers * rb ) ;
int uvcg_query_buffer ( struct uvc_video_queue * queue , struct v4l2_buffer * buf ) ;
int uvcg_queue_buffer ( struct uvc_video_queue * queue , struct v4l2_buffer * buf ) ;
int uvcg_dequeue_buffer ( struct uvc_video_queue * queue ,
struct v4l2_buffer * buf , int nonblocking ) ;
unsigned int uvcg_queue_poll ( struct uvc_video_queue * queue ,
struct file * file , poll_table * wait ) ;
int uvcg_queue_mmap ( struct uvc_video_queue * queue , struct vm_area_struct * vma ) ;
# ifndef CONFIG_MMU
unsigned long uvcg_queue_get_unmapped_area ( struct uvc_video_queue * queue ,
unsigned long pgoff ) ;
# endif /* CONFIG_MMU */
void uvcg_queue_cancel ( struct uvc_video_queue * queue , int disconnect ) ;
int uvcg_queue_enable ( struct uvc_video_queue * queue , int enable ) ;
struct uvc_buffer * uvcg_queue_next_buffer ( struct uvc_video_queue * queue ,
struct uvc_buffer * buf ) ;
struct uvc_buffer * uvcg_queue_head ( struct uvc_video_queue * queue ) ;
2010-05-02 20:57:41 +02:00
# endif /* __KERNEL__ */
# endif /* _UVC_QUEUE_H_ */