2019-05-27 08:55:05 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-16 15:20:36 -07:00
/*
* ALSA sequencer FIFO
* Copyright ( c ) 1998 by Frank van de Pol < fvdpol @ coil . demon . nl >
*/
# include <sound/core.h>
# include <linux/slab.h>
2017-02-02 19:15:33 +01:00
# include <linux/sched/signal.h>
2005-04-16 15:20:36 -07:00
# include "seq_fifo.h"
# include "seq_lock.h"
/* FIFO */
/* create new fifo */
2005-11-17 14:04:02 +01:00
struct snd_seq_fifo * snd_seq_fifo_new ( int poolsize )
2005-04-16 15:20:36 -07:00
{
2005-11-17 14:04:02 +01:00
struct snd_seq_fifo * f ;
2005-04-16 15:20:36 -07:00
2005-09-09 14:20:49 +02:00
f = kzalloc ( sizeof ( * f ) , GFP_KERNEL ) ;
2015-03-10 15:41:18 +01:00
if ( ! f )
2005-04-16 15:20:36 -07:00
return NULL ;
f - > pool = snd_seq_pool_new ( poolsize ) ;
if ( f - > pool = = NULL ) {
kfree ( f ) ;
return NULL ;
}
if ( snd_seq_pool_init ( f - > pool ) < 0 ) {
snd_seq_pool_delete ( & f - > pool ) ;
kfree ( f ) ;
return NULL ;
}
spin_lock_init ( & f - > lock ) ;
snd_use_lock_init ( & f - > use_lock ) ;
init_waitqueue_head ( & f - > input_sleep ) ;
atomic_set ( & f - > overflow , 0 ) ;
f - > head = NULL ;
f - > tail = NULL ;
f - > cells = 0 ;
return f ;
}
2005-11-17 14:04:02 +01:00
void snd_seq_fifo_delete ( struct snd_seq_fifo * * fifo )
2005-04-16 15:20:36 -07:00
{
2005-11-17 14:04:02 +01:00
struct snd_seq_fifo * f ;
2005-04-16 15:20:36 -07:00
2008-08-08 17:09:09 +02:00
if ( snd_BUG_ON ( ! fifo ) )
return ;
2005-04-16 15:20:36 -07:00
f = * fifo ;
2008-08-08 17:09:09 +02:00
if ( snd_BUG_ON ( ! f ) )
return ;
2005-04-16 15:20:36 -07:00
* fifo = NULL ;
2017-03-21 13:56:04 +01:00
if ( f - > pool )
snd_seq_pool_mark_closing ( f - > pool ) ;
2005-04-16 15:20:36 -07:00
snd_seq_fifo_clear ( f ) ;
/* wake up clients if any */
if ( waitqueue_active ( & f - > input_sleep ) )
wake_up ( & f - > input_sleep ) ;
/* release resources...*/
/*....................*/
if ( f - > pool ) {
snd_seq_pool_done ( f - > pool ) ;
snd_seq_pool_delete ( & f - > pool ) ;
}
kfree ( f ) ;
}
2005-11-17 14:04:02 +01:00
static struct snd_seq_event_cell * fifo_cell_out ( struct snd_seq_fifo * f ) ;
2005-04-16 15:20:36 -07:00
/* clear queue */
2005-11-17 14:04:02 +01:00
void snd_seq_fifo_clear ( struct snd_seq_fifo * f )
2005-04-16 15:20:36 -07:00
{
2005-11-17 14:04:02 +01:00
struct snd_seq_event_cell * cell ;
2005-04-16 15:20:36 -07:00
/* clear overflow flag */
atomic_set ( & f - > overflow , 0 ) ;
snd_use_lock_sync ( & f - > use_lock ) ;
2019-03-28 16:21:01 +01:00
spin_lock_irq ( & f - > lock ) ;
2005-04-16 15:20:36 -07:00
/* drain the fifo */
while ( ( cell = fifo_cell_out ( f ) ) ! = NULL ) {
snd_seq_cell_free ( cell ) ;
}
2019-03-28 16:21:01 +01:00
spin_unlock_irq ( & f - > lock ) ;
2005-04-16 15:20:36 -07:00
}
/* enqueue event to fifo */
2005-11-17 14:04:02 +01:00
int snd_seq_fifo_event_in ( struct snd_seq_fifo * f ,
struct snd_seq_event * event )
2005-04-16 15:20:36 -07:00
{
2005-11-17 14:04:02 +01:00
struct snd_seq_event_cell * cell ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
int err ;
2008-08-08 17:09:09 +02:00
if ( snd_BUG_ON ( ! f ) )
return - EINVAL ;
2005-04-16 15:20:36 -07:00
snd_use_lock_use ( & f - > use_lock ) ;
2018-03-05 22:06:09 +01:00
err = snd_seq_event_dup ( f - > pool , event , & cell , 1 , NULL , NULL ) ; /* always non-blocking */
2005-04-16 15:20:36 -07:00
if ( err < 0 ) {
2014-06-04 01:02:51 -04:00
if ( ( err = = - ENOMEM ) | | ( err = = - EAGAIN ) )
2005-04-16 15:20:36 -07:00
atomic_inc ( & f - > overflow ) ;
snd_use_lock_free ( & f - > use_lock ) ;
return err ;
}
/* append new cells to fifo */
spin_lock_irqsave ( & f - > lock , flags ) ;
if ( f - > tail ! = NULL )
f - > tail - > next = cell ;
f - > tail = cell ;
if ( f - > head = = NULL )
f - > head = cell ;
2017-02-28 22:15:51 +01:00
cell - > next = NULL ;
2005-04-16 15:20:36 -07:00
f - > cells + + ;
spin_unlock_irqrestore ( & f - > lock , flags ) ;
/* wakeup client */
if ( waitqueue_active ( & f - > input_sleep ) )
wake_up ( & f - > input_sleep ) ;
snd_use_lock_free ( & f - > use_lock ) ;
return 0 ; /* success */
}
/* dequeue cell from fifo */
2005-11-17 14:04:02 +01:00
static struct snd_seq_event_cell * fifo_cell_out ( struct snd_seq_fifo * f )
2005-04-16 15:20:36 -07:00
{
2005-11-17 14:04:02 +01:00
struct snd_seq_event_cell * cell ;
2005-04-16 15:20:36 -07:00
2021-06-08 16:05:30 +02:00
cell = f - > head ;
if ( cell ) {
2005-04-16 15:20:36 -07:00
f - > head = cell - > next ;
/* reset tail if this was the last element */
if ( f - > tail = = cell )
f - > tail = NULL ;
cell - > next = NULL ;
f - > cells - - ;
}
return cell ;
}
/* dequeue cell from fifo and copy on user space */
2005-11-17 14:04:02 +01:00
int snd_seq_fifo_cell_out ( struct snd_seq_fifo * f ,
struct snd_seq_event_cell * * cellp , int nonblock )
2005-04-16 15:20:36 -07:00
{
2005-11-17 14:04:02 +01:00
struct snd_seq_event_cell * cell ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
2017-06-20 12:06:13 +02:00
wait_queue_entry_t wait ;
2005-04-16 15:20:36 -07:00
2008-08-08 17:09:09 +02:00
if ( snd_BUG_ON ( ! f ) )
return - EINVAL ;
2005-04-16 15:20:36 -07:00
* cellp = NULL ;
init_waitqueue_entry ( & wait , current ) ;
spin_lock_irqsave ( & f - > lock , flags ) ;
while ( ( cell = fifo_cell_out ( f ) ) = = NULL ) {
if ( nonblock ) {
/* non-blocking - return immediately */
spin_unlock_irqrestore ( & f - > lock , flags ) ;
return - EAGAIN ;
}
set_current_state ( TASK_INTERRUPTIBLE ) ;
add_wait_queue ( & f - > input_sleep , & wait ) ;
2019-03-28 15:55:08 +01:00
spin_unlock_irqrestore ( & f - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
schedule ( ) ;
2019-03-28 15:55:08 +01:00
spin_lock_irqsave ( & f - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
remove_wait_queue ( & f - > input_sleep , & wait ) ;
if ( signal_pending ( current ) ) {
spin_unlock_irqrestore ( & f - > lock , flags ) ;
return - ERESTARTSYS ;
}
}
spin_unlock_irqrestore ( & f - > lock , flags ) ;
* cellp = cell ;
return 0 ;
}
2005-11-17 14:04:02 +01:00
void snd_seq_fifo_cell_putback ( struct snd_seq_fifo * f ,
struct snd_seq_event_cell * cell )
2005-04-16 15:20:36 -07:00
{
unsigned long flags ;
if ( cell ) {
spin_lock_irqsave ( & f - > lock , flags ) ;
cell - > next = f - > head ;
f - > head = cell ;
2017-02-28 22:15:51 +01:00
if ( ! f - > tail )
f - > tail = cell ;
2005-04-16 15:20:36 -07:00
f - > cells + + ;
spin_unlock_irqrestore ( & f - > lock , flags ) ;
}
}
/* polling; return non-zero if queue is available */
2005-11-17 14:04:02 +01:00
int snd_seq_fifo_poll_wait ( struct snd_seq_fifo * f , struct file * file ,
poll_table * wait )
2005-04-16 15:20:36 -07:00
{
poll_wait ( file , & f - > input_sleep , wait ) ;
return ( f - > cells > 0 ) ;
}
/* change the size of pool; all old events are removed */
2005-11-17 14:04:02 +01:00
int snd_seq_fifo_resize ( struct snd_seq_fifo * f , int poolsize )
2005-04-16 15:20:36 -07:00
{
2005-11-17 14:04:02 +01:00
struct snd_seq_pool * newpool , * oldpool ;
struct snd_seq_event_cell * cell , * next , * oldhead ;
2005-04-16 15:20:36 -07:00
2008-08-08 17:09:09 +02:00
if ( snd_BUG_ON ( ! f | | ! f - > pool ) )
return - EINVAL ;
2005-04-16 15:20:36 -07:00
/* allocate new pool */
newpool = snd_seq_pool_new ( poolsize ) ;
if ( newpool = = NULL )
return - ENOMEM ;
if ( snd_seq_pool_init ( newpool ) < 0 ) {
snd_seq_pool_delete ( & newpool ) ;
return - ENOMEM ;
}
2019-03-28 16:21:01 +01:00
spin_lock_irq ( & f - > lock ) ;
2005-04-16 15:20:36 -07:00
/* remember old pool */
oldpool = f - > pool ;
oldhead = f - > head ;
/* exchange pools */
f - > pool = newpool ;
f - > head = NULL ;
f - > tail = NULL ;
f - > cells = 0 ;
/* NOTE: overflow flag is not cleared */
2019-03-28 16:21:01 +01:00
spin_unlock_irq ( & f - > lock ) ;
2005-04-16 15:20:36 -07:00
2017-03-24 17:07:57 +01:00
/* close the old pool and wait until all users are gone */
snd_seq_pool_mark_closing ( oldpool ) ;
snd_use_lock_sync ( & f - > use_lock ) ;
2005-04-16 15:20:36 -07:00
/* release cells in old pool */
for ( cell = oldhead ; cell ; cell = next ) {
next = cell - > next ;
snd_seq_cell_free ( cell ) ;
}
snd_seq_pool_delete ( & oldpool ) ;
return 0 ;
}
2019-08-25 09:21:44 +02:00
/* get the number of unused cells safely */
int snd_seq_fifo_unused_cells ( struct snd_seq_fifo * f )
{
unsigned long flags ;
int cells ;
if ( ! f )
return 0 ;
snd_use_lock_use ( & f - > use_lock ) ;
spin_lock_irqsave ( & f - > lock , flags ) ;
cells = snd_seq_unused_cells ( f - > pool ) ;
spin_unlock_irqrestore ( & f - > lock , flags ) ;
snd_use_lock_free ( & f - > use_lock ) ;
return cells ;
}