2008-10-13 10:36:58 +01:00
/*
* Tty buffer allocation management
*/
# include <linux/types.h>
# include <linux/errno.h>
# include <linux/tty.h>
# include <linux/tty_driver.h>
# include <linux/tty_flip.h>
# include <linux/timer.h>
# include <linux/string.h>
# include <linux/slab.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/wait.h>
# include <linux/bitops.h>
# include <linux/delay.h>
# include <linux/module.h>
2013-02-12 02:00:43 -05:00
# include <linux/ratelimit.h>
2008-10-13 10:36:58 +01:00
2013-06-15 09:36:02 -04:00
# define MIN_TTYB_SIZE 256
# define TTYB_ALIGN_MASK 255
2013-06-15 09:36:08 -04:00
/*
* Byte threshold to limit memory consumption for flip buffers .
* The actual memory limit is > 2 x this amount .
*/
# define TTYB_MEM_LIMIT 65536
2013-06-15 09:36:16 -04:00
/*
* We default to dicing tty buffer allocations to this many characters
* in order to avoid multiple page allocations . We know the size of
* tty_buffer itself but it must also be taken into account that the
* the buffer is 256 byte aligned . See tty_buffer_find for the allocation
* logic this must match
*/
# define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
2013-06-15 09:36:08 -04:00
2013-06-15 09:36:15 -04:00
/**
* tty_buffer_lock_exclusive - gain exclusive access to buffer
* tty_buffer_unlock_exclusive - release exclusive access
*
* @ port - tty_port owning the flip buffer
*
* Guarantees safe use of the line discipline ' s receive_buf ( ) method by
* excluding the buffer work and any pending flush from using the flip
* buffer . Data can continue to be added concurrently to the flip buffer
* from the driver side .
*
* On release , the buffer work is restarted if there is data in the
* flip buffer
*/
void tty_buffer_lock_exclusive ( struct tty_port * port )
{
struct tty_bufhead * buf = & port - > buf ;
atomic_inc ( & buf - > priority ) ;
mutex_lock ( & buf - > lock ) ;
}
void tty_buffer_unlock_exclusive ( struct tty_port * port )
{
struct tty_bufhead * buf = & port - > buf ;
int restart ;
restart = buf - > head - > commit ! = buf - > head - > read ;
atomic_dec ( & buf - > priority ) ;
mutex_unlock ( & buf - > lock ) ;
if ( restart )
queue_work ( system_unbound_wq , & buf - > work ) ;
}
2013-06-15 09:36:08 -04:00
/**
* tty_buffer_space_avail - return unused buffer space
* @ port - tty_port owning the flip buffer
*
* Returns the # of bytes which can be written by the driver without
* reaching the buffer limit .
*
* Note : this does not guarantee that memory is available to write
* the returned # of bytes ( use tty_prepare_flip_string_xxx ( ) to
* pre - allocate if memory guarantee is required ) .
*/
int tty_buffer_space_avail ( struct tty_port * port )
{
int space = TTYB_MEM_LIMIT - atomic_read ( & port - > buf . memory_used ) ;
return max ( space , 0 ) ;
}
2013-06-15 09:36:03 -04:00
static void tty_buffer_reset ( struct tty_buffer * p , size_t size )
{
p - > used = 0 ;
p - > size = size ;
p - > next = NULL ;
p - > commit = 0 ;
p - > read = 0 ;
}
2008-10-13 10:36:58 +01:00
/**
* tty_buffer_free_all - free buffers used by a tty
* @ tty : tty to free from
*
* Remove all the buffers pending on a tty whether queued with data
* or in the free ring . Must be called when the tty is no longer in use
*/
2012-10-18 22:26:47 +02:00
void tty_buffer_free_all ( struct tty_port * port )
2008-10-13 10:36:58 +01:00
{
2012-10-18 22:26:47 +02:00
struct tty_bufhead * buf = & port - > buf ;
2013-06-15 09:36:06 -04:00
struct tty_buffer * p , * next ;
struct llist_node * llist ;
2012-10-18 22:26:45 +02:00
2013-06-15 09:36:05 -04:00
while ( ( p = buf - > head ) ! = NULL ) {
buf - > head = p - > next ;
2013-06-15 09:36:07 -04:00
if ( p - > size > 0 )
kfree ( p ) ;
2008-10-13 10:36:58 +01:00
}
2013-06-15 09:36:06 -04:00
llist = llist_del_all ( & buf - > free ) ;
llist_for_each_entry_safe ( p , next , llist , free )
2013-06-15 09:36:05 -04:00
kfree ( p ) ;
2013-06-15 09:36:06 -04:00
2013-06-15 09:36:07 -04:00
tty_buffer_reset ( & buf - > sentinel , 0 ) ;
buf - > head = & buf - > sentinel ;
buf - > tail = & buf - > sentinel ;
2013-06-15 09:36:08 -04:00
atomic_set ( & buf - > memory_used , 0 ) ;
2008-10-13 10:36:58 +01:00
}
/**
* tty_buffer_alloc - allocate a tty buffer
* @ tty : tty device
* @ size : desired size ( characters )
*
* Allocate a new tty buffer to hold the desired number of characters .
2013-06-15 09:36:04 -04:00
* We round our buffers off in 256 character chunks to get better
* allocation behaviour .
2008-10-13 10:36:58 +01:00
* Return NULL if out of memory or the allocation would exceed the
* per device queue
*/
2012-10-18 22:26:47 +02:00
static struct tty_buffer * tty_buffer_alloc ( struct tty_port * port , size_t size )
2008-10-13 10:36:58 +01:00
{
2013-06-15 09:36:06 -04:00
struct llist_node * free ;
2008-10-13 10:36:58 +01:00
struct tty_buffer * p ;
2013-06-15 09:36:04 -04:00
/* Round the buffer size out */
size = __ALIGN_MASK ( size , TTYB_ALIGN_MASK ) ;
if ( size < = MIN_TTYB_SIZE ) {
2013-06-15 09:36:06 -04:00
free = llist_del_first ( & port - > buf . free ) ;
if ( free ) {
p = llist_entry ( free , struct tty_buffer , free ) ;
2013-06-15 09:36:04 -04:00
goto found ;
}
}
/* Should possibly check if this fails for the largest buffer we
have queued and recycle that ? */
2013-06-15 09:36:08 -04:00
if ( atomic_read ( & port - > buf . memory_used ) > TTYB_MEM_LIMIT )
2008-10-13 10:36:58 +01:00
return NULL ;
p = kmalloc ( sizeof ( struct tty_buffer ) + 2 * size , GFP_ATOMIC ) ;
if ( p = = NULL )
return NULL ;
2013-06-15 09:36:03 -04:00
2013-06-15 09:36:04 -04:00
found :
2013-06-15 09:36:03 -04:00
tty_buffer_reset ( p , size ) ;
2013-06-15 09:36:08 -04:00
atomic_add ( size , & port - > buf . memory_used ) ;
2008-10-13 10:36:58 +01:00
return p ;
}
/**
* tty_buffer_free - free a tty buffer
* @ tty : tty owning the buffer
* @ b : the buffer to free
*
* Free a tty buffer , or add it to the free list according to our
* internal strategy
*/
2012-10-18 22:26:47 +02:00
static void tty_buffer_free ( struct tty_port * port , struct tty_buffer * b )
2008-10-13 10:36:58 +01:00
{
2012-10-18 22:26:47 +02:00
struct tty_bufhead * buf = & port - > buf ;
2012-10-18 22:26:45 +02:00
2008-10-13 10:36:58 +01:00
/* Dumb strategy for now - should keep some stats */
2013-06-15 09:36:08 -04:00
WARN_ON ( atomic_sub_return ( b - > size , & buf - > memory_used ) < 0 ) ;
2008-10-13 10:36:58 +01:00
2013-06-15 09:36:02 -04:00
if ( b - > size > MIN_TTYB_SIZE )
2008-10-13 10:36:58 +01:00
kfree ( b ) ;
2013-06-15 09:36:07 -04:00
else if ( b - > size > 0 )
2013-06-15 09:36:06 -04:00
llist_add ( & b - > free , & buf - > free ) ;
2008-10-13 10:36:58 +01:00
}
/**
* tty_buffer_flush - flush full tty buffers
* @ tty : tty to flush
*
* flush all the buffers containing receive data . If the buffer is
* being processed by flush_to_ldisc then we defer the processing
* to that function
*
2013-06-15 09:36:15 -04:00
* Locking : takes buffer lock to ensure single - threaded flip buffer
2013-06-15 09:36:10 -04:00
* ' consumer '
2008-10-13 10:36:58 +01:00
*/
void tty_buffer_flush ( struct tty_struct * tty )
{
2012-10-18 22:26:44 +02:00
struct tty_port * port = tty - > port ;
2012-10-18 22:26:47 +02:00
struct tty_bufhead * buf = & port - > buf ;
2013-06-15 09:36:14 -04:00
struct tty_buffer * next ;
2008-10-13 10:36:58 +01:00
2013-06-15 09:36:15 -04:00
atomic_inc ( & buf - > priority ) ;
2013-06-15 09:36:10 -04:00
2013-06-15 09:36:15 -04:00
mutex_lock ( & buf - > lock ) ;
2013-06-15 09:36:14 -04:00
while ( ( next = buf - > head - > next ) ! = NULL ) {
tty_buffer_free ( port , buf - > head ) ;
buf - > head = next ;
}
buf - > head - > read = buf - > head - > commit ;
2013-06-15 09:36:15 -04:00
atomic_dec ( & buf - > priority ) ;
mutex_unlock ( & buf - > lock ) ;
2008-10-13 10:36:58 +01:00
}
/**
2013-01-19 18:16:20 +04:00
* tty_buffer_request_room - grow tty buffer if needed
2008-10-13 10:36:58 +01:00
* @ tty : tty structure
* @ size : size desired
*
* Make at least size bytes of linear space available for the tty
* buffer . If we fail return the size we managed to find .
*/
2013-01-19 18:16:20 +04:00
int tty_buffer_request_room ( struct tty_port * port , size_t size )
2008-10-13 10:36:58 +01:00
{
2012-10-18 22:26:47 +02:00
struct tty_bufhead * buf = & port - > buf ;
2008-10-13 10:36:58 +01:00
struct tty_buffer * b , * n ;
int left ;
2013-06-15 09:36:09 -04:00
2012-10-18 22:26:45 +02:00
b = buf - > tail ;
2013-06-15 09:36:07 -04:00
left = b - > size - b - > used ;
2008-10-13 10:36:58 +01:00
if ( left < size ) {
/* This is the slow path - looking for new buffers to use */
2013-06-15 09:36:04 -04:00
if ( ( n = tty_buffer_alloc ( port , size ) ) ! = NULL ) {
2012-10-18 22:26:45 +02:00
buf - > tail = n ;
2013-06-15 09:36:09 -04:00
b - > commit = b - > used ;
smp_mb ( ) ;
b - > next = n ;
2008-10-13 10:36:58 +01:00
} else
size = left ;
}
return size ;
}
EXPORT_SYMBOL_GPL ( tty_buffer_request_room ) ;
/**
2010-02-18 16:43:54 +00:00
* tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
2013-01-03 15:53:02 +01:00
* @ port : tty port
2008-10-13 10:36:58 +01:00
* @ chars : characters
2010-02-18 16:43:54 +00:00
* @ flag : flag value for each character
2008-10-13 10:36:58 +01:00
* @ size : size
*
* Queue a series of bytes to the tty buffering . All the characters
2010-05-07 19:58:32 +02:00
* passed are marked with the supplied flag . Returns the number added .
2008-10-13 10:36:58 +01:00
*/
2013-01-03 15:53:02 +01:00
int tty_insert_flip_string_fixed_flag ( struct tty_port * port ,
2010-02-18 16:43:54 +00:00
const unsigned char * chars , char flag , size_t size )
2008-10-13 10:36:58 +01:00
{
int copied = 0 ;
do {
2010-03-09 18:54:28 +08:00
int goal = min_t ( size_t , size - copied , TTY_BUFFER_PAGE ) ;
2013-01-19 18:16:20 +04:00
int space = tty_buffer_request_room ( port , goal ) ;
struct tty_buffer * tb = port - > buf . tail ;
2013-06-15 09:36:07 -04:00
if ( unlikely ( space = = 0 ) )
2008-10-13 10:36:58 +01:00
break ;
2013-06-15 09:36:01 -04:00
memcpy ( char_buf_ptr ( tb , tb - > used ) , chars , space ) ;
memset ( flag_buf_ptr ( tb , tb - > used ) , flag , space ) ;
2008-10-13 10:36:58 +01:00
tb - > used + = space ;
copied + = space ;
chars + = space ;
/* There is a small chance that we need to split the data over
several buffers . If this is the case we must loop */
} while ( unlikely ( size > copied ) ) ;
return copied ;
}
2010-02-18 16:43:54 +00:00
EXPORT_SYMBOL ( tty_insert_flip_string_fixed_flag ) ;
2008-10-13 10:36:58 +01:00
/**
* tty_insert_flip_string_flags - Add characters to the tty buffer
2013-01-03 15:53:02 +01:00
* @ port : tty port
2008-10-13 10:36:58 +01:00
* @ chars : characters
* @ flags : flag bytes
* @ size : size
*
* Queue a series of bytes to the tty buffering . For each character
* the flags array indicates the status of the character . Returns the
* number added .
*/
2013-01-03 15:53:02 +01:00
int tty_insert_flip_string_flags ( struct tty_port * port ,
2008-10-13 10:36:58 +01:00
const unsigned char * chars , const char * flags , size_t size )
{
int copied = 0 ;
do {
2010-03-09 18:54:28 +08:00
int goal = min_t ( size_t , size - copied , TTY_BUFFER_PAGE ) ;
2013-01-19 18:16:20 +04:00
int space = tty_buffer_request_room ( port , goal ) ;
struct tty_buffer * tb = port - > buf . tail ;
2013-06-15 09:36:07 -04:00
if ( unlikely ( space = = 0 ) )
2008-10-13 10:36:58 +01:00
break ;
2013-06-15 09:36:01 -04:00
memcpy ( char_buf_ptr ( tb , tb - > used ) , chars , space ) ;
memcpy ( flag_buf_ptr ( tb , tb - > used ) , flags , space ) ;
2008-10-13 10:36:58 +01:00
tb - > used + = space ;
copied + = space ;
chars + = space ;
flags + = space ;
/* There is a small chance that we need to split the data over
several buffers . If this is the case we must loop */
} while ( unlikely ( size > copied ) ) ;
return copied ;
}
EXPORT_SYMBOL ( tty_insert_flip_string_flags ) ;
/**
* tty_schedule_flip - push characters to ldisc
2013-01-03 15:53:07 +01:00
* @ port : tty port to push from
2008-10-13 10:36:58 +01:00
*
* Takes any pending buffers and transfers their ownership to the
* ldisc side of the queue . It then schedules those characters for
* processing by the line discipline .
2012-09-27 14:02:05 +02:00
* Note that this function can only be used when the low_latency flag
* is unset . Otherwise the workqueue won ' t be flushed .
2008-10-13 10:36:58 +01:00
*/
2013-01-03 15:53:07 +01:00
void tty_schedule_flip ( struct tty_port * port )
2008-10-13 10:36:58 +01:00
{
2013-01-03 15:53:07 +01:00
struct tty_bufhead * buf = & port - > buf ;
WARN_ON ( port - > low_latency ) ;
2012-10-18 22:26:45 +02:00
2013-06-15 09:36:07 -04:00
buf - > tail - > commit = buf - > tail - > used ;
2012-10-18 22:26:45 +02:00
schedule_work ( & buf - > work ) ;
2008-10-13 10:36:58 +01:00
}
EXPORT_SYMBOL ( tty_schedule_flip ) ;
/**
* tty_prepare_flip_string - make room for characters
2013-01-03 15:53:02 +01:00
* @ port : tty port
2008-10-13 10:36:58 +01:00
* @ chars : return pointer for character write area
* @ size : desired size
*
* Prepare a block of space in the buffer for data . Returns the length
* available and buffer pointer to the space which is now allocated and
* accounted for as ready for normal characters . This is used for drivers
* that need their own block copy routines into the buffer . There is no
* guarantee the buffer is a DMA target !
*/
2013-01-03 15:53:02 +01:00
int tty_prepare_flip_string ( struct tty_port * port , unsigned char * * chars ,
2012-10-18 22:26:47 +02:00
size_t size )
2008-10-13 10:36:58 +01:00
{
2013-01-19 18:16:20 +04:00
int space = tty_buffer_request_room ( port , size ) ;
2008-10-13 10:36:58 +01:00
if ( likely ( space ) ) {
2013-01-19 18:16:20 +04:00
struct tty_buffer * tb = port - > buf . tail ;
2013-06-15 09:36:01 -04:00
* chars = char_buf_ptr ( tb , tb - > used ) ;
memset ( flag_buf_ptr ( tb , tb - > used ) , TTY_NORMAL , space ) ;
2008-10-13 10:36:58 +01:00
tb - > used + = space ;
}
return space ;
}
EXPORT_SYMBOL_GPL ( tty_prepare_flip_string ) ;
/**
* tty_prepare_flip_string_flags - make room for characters
2013-01-03 15:53:02 +01:00
* @ port : tty port
2008-10-13 10:36:58 +01:00
* @ chars : return pointer for character write area
* @ flags : return pointer for status flag write area
* @ size : desired size
*
* Prepare a block of space in the buffer for data . Returns the length
* available and buffer pointer to the space which is now allocated and
* accounted for as ready for characters . This is used for drivers
* that need their own block copy routines into the buffer . There is no
* guarantee the buffer is a DMA target !
*/
2013-01-03 15:53:02 +01:00
int tty_prepare_flip_string_flags ( struct tty_port * port ,
2008-10-13 10:36:58 +01:00
unsigned char * * chars , char * * flags , size_t size )
{
2013-01-19 18:16:20 +04:00
int space = tty_buffer_request_room ( port , size ) ;
2008-10-13 10:36:58 +01:00
if ( likely ( space ) ) {
2013-01-19 18:16:20 +04:00
struct tty_buffer * tb = port - > buf . tail ;
2013-06-15 09:36:01 -04:00
* chars = char_buf_ptr ( tb , tb - > used ) ;
* flags = flag_buf_ptr ( tb , tb - > used ) ;
2008-10-13 10:36:58 +01:00
tb - > used + = space ;
}
return space ;
}
EXPORT_SYMBOL_GPL ( tty_prepare_flip_string_flags ) ;
2013-06-15 09:14:14 -04:00
static int
receive_buf ( struct tty_struct * tty , struct tty_buffer * head , int count )
{
struct tty_ldisc * disc = tty - > ldisc ;
2013-06-15 09:36:01 -04:00
unsigned char * p = char_buf_ptr ( head , head - > read ) ;
char * f = flag_buf_ptr ( head , head - > read ) ;
2013-06-15 09:14:14 -04:00
2013-06-15 09:14:15 -04:00
if ( disc - > ops - > receive_buf2 )
count = disc - > ops - > receive_buf2 ( tty , p , f , count ) ;
else {
count = min_t ( int , count , tty - > receive_room ) ;
if ( count )
disc - > ops - > receive_buf ( tty , p , f , count ) ;
}
2013-06-15 09:14:14 -04:00
head - > read + = count ;
return count ;
}
2008-10-13 10:36:58 +01:00
/**
* flush_to_ldisc
* @ work : tty structure passed from work queue .
*
* This routine is called out of the software interrupt to flush data
* from the buffer chain to the line discipline .
*
2013-06-15 09:36:10 -04:00
* The receive_buf method is single threaded for each tty instance .
*
2013-06-15 09:36:15 -04:00
* Locking : takes buffer lock to ensure single - threaded flip buffer
2013-06-15 09:36:10 -04:00
* ' consumer '
2008-10-13 10:36:58 +01:00
*/
static void flush_to_ldisc ( struct work_struct * work )
{
2012-10-18 22:26:47 +02:00
struct tty_port * port = container_of ( work , struct tty_port , buf . work ) ;
struct tty_bufhead * buf = & port - > buf ;
struct tty_struct * tty ;
2008-10-13 10:36:58 +01:00
struct tty_ldisc * disc ;
2012-10-18 22:26:47 +02:00
tty = port - > itty ;
2013-02-27 22:30:24 +01:00
if ( tty = = NULL )
2012-10-18 22:26:47 +02:00
return ;
2008-10-13 10:36:58 +01:00
disc = tty_ldisc_ref ( tty ) ;
2013-06-15 07:04:48 -04:00
if ( disc = = NULL )
2008-10-13 10:36:58 +01:00
return ;
2013-06-15 09:36:15 -04:00
mutex_lock ( & buf - > lock ) ;
2009-10-14 08:59:49 -07:00
2013-06-15 09:36:11 -04:00
while ( 1 ) {
struct tty_buffer * head = buf - > head ;
int count ;
2013-06-15 09:36:15 -04:00
/* Ldisc or user is trying to gain exclusive access */
if ( atomic_read ( & buf - > priority ) )
2013-06-15 09:36:11 -04:00
break ;
count = head - > commit - head - > read ;
if ( ! count ) {
if ( head - > next = = NULL )
2013-06-15 09:14:14 -04:00
break ;
2013-06-15 09:36:11 -04:00
buf - > head = head - > next ;
tty_buffer_free ( port , head ) ;
continue ;
2008-10-13 10:36:58 +01:00
}
2013-06-15 09:36:11 -04:00
count = receive_buf ( tty , head , count ) ;
if ( ! count )
break ;
2008-10-13 10:36:58 +01:00
}
2009-10-14 08:59:49 -07:00
2013-06-15 09:36:15 -04:00
mutex_unlock ( & buf - > lock ) ;
2008-10-13 10:36:58 +01:00
tty_ldisc_deref ( disc ) ;
}
2009-07-29 12:15:56 -07:00
/**
* tty_flush_to_ldisc
* @ tty : tty to push
*
* Push the terminal flip buffers to the line discipline .
*
* Must not be called from IRQ context .
*/
void tty_flush_to_ldisc ( struct tty_struct * tty )
{
2013-01-03 15:53:05 +01:00
if ( ! tty - > port - > low_latency )
2012-10-18 22:26:47 +02:00
flush_work ( & tty - > port - > buf . work ) ;
2009-07-29 12:15:56 -07:00
}
2008-10-13 10:36:58 +01:00
/**
* tty_flip_buffer_push - terminal
2013-01-03 15:53:06 +01:00
* @ port : tty port to push
2008-10-13 10:36:58 +01:00
*
* Queue a push of the terminal flip buffers to the line discipline . This
2013-01-03 15:53:05 +01:00
* function must not be called from IRQ context if port - > low_latency is
* set .
2008-10-13 10:36:58 +01:00
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later .
*/
2013-01-03 15:53:06 +01:00
void tty_flip_buffer_push ( struct tty_port * port )
2008-10-13 10:36:58 +01:00
{
2013-01-03 15:53:06 +01:00
struct tty_bufhead * buf = & port - > buf ;
2012-10-18 22:26:45 +02:00
2013-06-15 09:36:07 -04:00
buf - > tail - > commit = buf - > tail - > used ;
2008-10-13 10:36:58 +01:00
2013-01-03 15:53:06 +01:00
if ( port - > low_latency )
2012-10-18 22:26:45 +02:00
flush_to_ldisc ( & buf - > work ) ;
2008-10-13 10:36:58 +01:00
else
2012-10-18 22:26:45 +02:00
schedule_work ( & buf - > work ) ;
2008-10-13 10:36:58 +01:00
}
EXPORT_SYMBOL ( tty_flip_buffer_push ) ;
/**
* tty_buffer_init - prepare a tty buffer structure
* @ tty : tty to initialise
*
* Set up the initial state of the buffer management for a tty device .
* Must be called before the other tty buffer functions are used .
*/
2012-10-18 22:26:47 +02:00
void tty_buffer_init ( struct tty_port * port )
2008-10-13 10:36:58 +01:00
{
2012-10-18 22:26:47 +02:00
struct tty_bufhead * buf = & port - > buf ;
2012-10-18 22:26:45 +02:00
2013-06-15 09:36:15 -04:00
mutex_init ( & buf - > lock ) ;
2013-06-15 09:36:07 -04:00
tty_buffer_reset ( & buf - > sentinel , 0 ) ;
buf - > head = & buf - > sentinel ;
buf - > tail = & buf - > sentinel ;
2013-06-15 09:36:06 -04:00
init_llist_head ( & buf - > free ) ;
2013-06-15 09:36:08 -04:00
atomic_set ( & buf - > memory_used , 0 ) ;
2013-06-15 09:36:15 -04:00
atomic_set ( & buf - > priority , 0 ) ;
2012-10-18 22:26:45 +02:00
INIT_WORK ( & buf - > work , flush_to_ldisc ) ;
2008-10-13 10:36:58 +01:00
}