2017-11-06 20:11:51 +03:00
// SPDX-License-Identifier: GPL-2.0
2008-10-13 13:36:58 +04:00
/*
* Tty buffer allocation management
*/
# include <linux/types.h>
# include <linux/errno.h>
# include <linux/tty.h>
# include <linux/tty_driver.h>
# include <linux/tty_flip.h>
# include <linux/timer.h>
# include <linux/string.h>
# include <linux/slab.h>
# include <linux/sched.h>
# include <linux/wait.h>
# include <linux/bitops.h>
# include <linux/delay.h>
# include <linux/module.h>
2013-02-12 11:00:43 +04:00
# include <linux/ratelimit.h>
2021-04-08 15:51:30 +03:00
# include "tty.h"
2013-06-15 17:36:02 +04:00
# define MIN_TTYB_SIZE 256
# define TTYB_ALIGN_MASK 255
2013-06-15 17:36:08 +04:00
/*
* Byte threshold to limit memory consumption for flip buffers .
* The actual memory limit is > 2 x this amount .
*/
2019-01-28 21:01:10 +03:00
# define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
2013-06-15 17:36:08 +04:00
2013-06-15 17:36:16 +04:00
/*
* We default to dicing tty buffer allocations to this many characters
* in order to avoid multiple page allocations . We know the size of
* tty_buffer itself but it must also be taken into account that the
2021-05-12 12:26:12 +03:00
* buffer is 256 byte aligned . See tty_buffer_find for the allocation
* logic this must match .
2013-06-15 17:36:16 +04:00
*/
# define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
2013-06-15 17:36:15 +04:00
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_lock_exclusive - gain exclusive access to buffer
* @ port : tty port owning the flip buffer
2013-06-15 17:36:15 +04:00
*
2021-11-26 11:16:02 +03:00
* Guarantees safe use of the & tty_ldisc_ops . receive_buf ( ) method by excluding
* the buffer work and any pending flush from using the flip buffer . Data can
* continue to be added concurrently to the flip buffer from the driver side .
2013-06-15 17:36:15 +04:00
*
2021-11-26 11:16:02 +03:00
* See also tty_buffer_unlock_exclusive ( ) .
2013-06-15 17:36:15 +04:00
*/
void tty_buffer_lock_exclusive ( struct tty_port * port )
{
struct tty_bufhead * buf = & port - > buf ;
atomic_inc ( & buf - > priority ) ;
mutex_lock ( & buf - > lock ) ;
}
2014-05-19 04:03:06 +04:00
EXPORT_SYMBOL_GPL ( tty_buffer_lock_exclusive ) ;
2013-06-15 17:36:15 +04:00
2021-11-26 11:16:02 +03:00
/**
* tty_buffer_unlock_exclusive - release exclusive access
* @ port : tty port owning the flip buffer
*
* The buffer work is restarted if there is data in the flip buffer .
*
* See also tty_buffer_lock_exclusive ( ) .
*/
2013-06-15 17:36:15 +04:00
void tty_buffer_unlock_exclusive ( struct tty_port * port )
{
struct tty_bufhead * buf = & port - > buf ;
int restart ;
restart = buf - > head - > commit ! = buf - > head - > read ;
atomic_dec ( & buf - > priority ) ;
mutex_unlock ( & buf - > lock ) ;
if ( restart )
queue_work ( system_unbound_wq , & buf - > work ) ;
}
2014-05-19 04:03:06 +04:00
EXPORT_SYMBOL_GPL ( tty_buffer_unlock_exclusive ) ;
2013-06-15 17:36:15 +04:00
2013-06-15 17:36:08 +04:00
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_space_avail - return unused buffer space
* @ port : tty port owning the flip buffer
2013-06-15 17:36:08 +04:00
*
2021-11-26 11:16:02 +03:00
* Returns : the # of bytes which can be written by the driver without reaching
* the buffer limit .
2013-06-15 17:36:08 +04:00
*
2021-11-26 11:16:02 +03:00
* Note : this does not guarantee that memory is available to write the returned
* # of bytes ( use tty_prepare_flip_string ( ) to pre - allocate if memory
* guarantee is required ) .
2013-06-15 17:36:08 +04:00
*/
2021-05-05 12:19:17 +03:00
unsigned int tty_buffer_space_avail ( struct tty_port * port )
2013-06-15 17:36:08 +04:00
{
2013-11-22 21:09:56 +04:00
int space = port - > buf . mem_limit - atomic_read ( & port - > buf . mem_used ) ;
2021-05-12 12:26:11 +03:00
2013-06-15 17:36:08 +04:00
return max ( space , 0 ) ;
}
2013-11-22 22:06:08 +04:00
EXPORT_SYMBOL_GPL ( tty_buffer_space_avail ) ;
2013-06-15 17:36:08 +04:00
2013-06-15 17:36:03 +04:00
static void tty_buffer_reset ( struct tty_buffer * p , size_t size )
{
p - > used = 0 ;
p - > size = size ;
p - > next = NULL ;
p - > commit = 0 ;
p - > read = 0 ;
2013-12-09 18:23:52 +04:00
p - > flags = 0 ;
2013-06-15 17:36:03 +04:00
}
2008-10-13 13:36:58 +04:00
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_free_all - free buffers used by a tty
* @ port : tty port to free from
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* Remove all the buffers pending on a tty whether queued with data or in the
* free ring . Must be called when the tty is no longer in use .
2008-10-13 13:36:58 +04:00
*/
2012-10-19 00:26:47 +04:00
void tty_buffer_free_all ( struct tty_port * port )
2008-10-13 13:36:58 +04:00
{
2012-10-19 00:26:47 +04:00
struct tty_bufhead * buf = & port - > buf ;
2013-06-15 17:36:06 +04:00
struct tty_buffer * p , * next ;
struct llist_node * llist ;
2018-09-07 16:19:06 +03:00
unsigned int freed = 0 ;
int still_used ;
2012-10-19 00:26:45 +04:00
2013-06-15 17:36:05 +04:00
while ( ( p = buf - > head ) ! = NULL ) {
buf - > head = p - > next ;
2018-09-07 16:19:06 +03:00
freed + = p - > size ;
2013-06-15 17:36:07 +04:00
if ( p - > size > 0 )
kfree ( p ) ;
2008-10-13 13:36:58 +04:00
}
2013-06-15 17:36:06 +04:00
llist = llist_del_all ( & buf - > free ) ;
llist_for_each_entry_safe ( p , next , llist , free )
2013-06-15 17:36:05 +04:00
kfree ( p ) ;
2013-06-15 17:36:06 +04:00
2013-06-15 17:36:07 +04:00
tty_buffer_reset ( & buf - > sentinel , 0 ) ;
buf - > head = & buf - > sentinel ;
buf - > tail = & buf - > sentinel ;
2013-06-15 17:36:08 +04:00
2018-09-07 16:19:06 +03:00
still_used = atomic_xchg ( & buf - > mem_used , 0 ) ;
WARN ( still_used ! = freed , " we still have not freed %d bytes! " ,
still_used - freed ) ;
2008-10-13 13:36:58 +04:00
}
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_alloc - allocate a tty buffer
* @ port : tty port
* @ size : desired size ( characters )
*
* Allocate a new tty buffer to hold the desired number of characters . We
* round our buffers off in 256 character chunks to get better allocation
* behaviour .
*
* Returns : % NULL if out of memory or the allocation would exceed the per
* device queue .
2008-10-13 13:36:58 +04:00
*/
2012-10-19 00:26:47 +04:00
static struct tty_buffer * tty_buffer_alloc ( struct tty_port * port , size_t size )
2008-10-13 13:36:58 +04:00
{
2013-06-15 17:36:06 +04:00
struct llist_node * free ;
2008-10-13 13:36:58 +04:00
struct tty_buffer * p ;
2013-06-15 17:36:04 +04:00
/* Round the buffer size out */
size = __ALIGN_MASK ( size , TTYB_ALIGN_MASK ) ;
if ( size < = MIN_TTYB_SIZE ) {
2013-06-15 17:36:06 +04:00
free = llist_del_first ( & port - > buf . free ) ;
if ( free ) {
p = llist_entry ( free , struct tty_buffer , free ) ;
2013-06-15 17:36:04 +04:00
goto found ;
}
}
/* Should possibly check if this fails for the largest buffer we
2021-05-12 12:26:13 +03:00
* have queued and recycle that ?
*/
2013-11-22 21:09:56 +04:00
if ( atomic_read ( & port - > buf . mem_used ) > port - > buf . mem_limit )
2008-10-13 13:36:58 +04:00
return NULL ;
p = kmalloc ( sizeof ( struct tty_buffer ) + 2 * size , GFP_ATOMIC ) ;
if ( p = = NULL )
return NULL ;
2013-06-15 17:36:03 +04:00
2013-06-15 17:36:04 +04:00
found :
2013-06-15 17:36:03 +04:00
tty_buffer_reset ( p , size ) ;
2013-11-22 21:09:56 +04:00
atomic_add ( size , & port - > buf . mem_used ) ;
2008-10-13 13:36:58 +04:00
return p ;
}
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_free - free a tty buffer
* @ port : tty port owning the buffer
* @ b : the buffer to free
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* Free a tty buffer , or add it to the free list according to our internal
* strategy .
2008-10-13 13:36:58 +04:00
*/
2012-10-19 00:26:47 +04:00
static void tty_buffer_free ( struct tty_port * port , struct tty_buffer * b )
2008-10-13 13:36:58 +04:00
{
2012-10-19 00:26:47 +04:00
struct tty_bufhead * buf = & port - > buf ;
2012-10-19 00:26:45 +04:00
2008-10-13 13:36:58 +04:00
/* Dumb strategy for now - should keep some stats */
2013-11-22 21:09:56 +04:00
WARN_ON ( atomic_sub_return ( b - > size , & buf - > mem_used ) < 0 ) ;
2008-10-13 13:36:58 +04:00
2013-06-15 17:36:02 +04:00
if ( b - > size > MIN_TTYB_SIZE )
2008-10-13 13:36:58 +04:00
kfree ( b ) ;
2013-06-15 17:36:07 +04:00
else if ( b - > size > 0 )
2013-06-15 17:36:06 +04:00
llist_add ( & b - > free , & buf - > free ) ;
2008-10-13 13:36:58 +04:00
}
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_flush - flush full tty buffers
* @ tty : tty to flush
* @ ld : optional ldisc ptr ( must be referenced )
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* Flush all the buffers containing receive data . If @ ld ! = % NULL , flush the
* ldisc input buffer .
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* Locking : takes buffer lock to ensure single - threaded flip buffer ' consumer ' .
2008-10-13 13:36:58 +04:00
*/
2014-11-05 20:13:09 +03:00
void tty_buffer_flush ( struct tty_struct * tty , struct tty_ldisc * ld )
2008-10-13 13:36:58 +04:00
{
2012-10-19 00:26:44 +04:00
struct tty_port * port = tty - > port ;
2012-10-19 00:26:47 +04:00
struct tty_bufhead * buf = & port - > buf ;
2013-06-15 17:36:14 +04:00
struct tty_buffer * next ;
2008-10-13 13:36:58 +04:00
2013-06-15 17:36:15 +04:00
atomic_inc ( & buf - > priority ) ;
2013-06-15 17:36:10 +04:00
2013-06-15 17:36:15 +04:00
mutex_lock ( & buf - > lock ) ;
2015-09-17 18:17:09 +03:00
/* paired w/ release in __tty_buffer_request_room; ensures there are
* no pending memory accesses to the freed buffer
*/
while ( ( next = smp_load_acquire ( & buf - > head - > next ) ) ! = NULL ) {
2013-06-15 17:36:14 +04:00
tty_buffer_free ( port , buf - > head ) ;
buf - > head = next ;
}
buf - > head - > read = buf - > head - > commit ;
2014-11-05 20:13:09 +03:00
if ( ld & & ld - > ops - > flush_buffer )
ld - > ops - > flush_buffer ( tty ) ;
2013-06-15 17:36:15 +04:00
atomic_dec ( & buf - > priority ) ;
mutex_unlock ( & buf - > lock ) ;
2008-10-13 13:36:58 +04:00
}
/**
2021-11-26 11:16:02 +03:00
* __tty_buffer_request_room - grow tty buffer if needed
* @ port : tty port
* @ size : size desired
* @ flags : buffer flags if new buffer allocated ( default = 0 )
*
* Make at least @ size bytes of linear space available for the tty buffer .
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* Will change over to a new buffer if the current buffer is encoded as
* % TTY_NORMAL ( so has no flags buffer ) and the new buffer requires a flags
* buffer .
2013-12-09 18:23:52 +04:00
*
2021-11-26 11:16:02 +03:00
* Returns : the size we managed to find .
2008-10-13 13:36:58 +04:00
*/
2013-12-09 18:23:52 +04:00
static int __tty_buffer_request_room ( struct tty_port * port , size_t size ,
int flags )
2008-10-13 13:36:58 +04:00
{
2012-10-19 00:26:47 +04:00
struct tty_bufhead * buf = & port - > buf ;
2008-10-13 13:36:58 +04:00
struct tty_buffer * b , * n ;
2013-12-09 18:23:52 +04:00
int left , change ;
2013-06-15 17:36:09 +04:00
2012-10-19 00:26:45 +04:00
b = buf - > tail ;
2013-12-09 18:23:52 +04:00
if ( b - > flags & TTYB_NORMAL )
left = 2 * b - > size - b - > used ;
else
left = b - > size - b - > used ;
2008-10-13 13:36:58 +04:00
2013-12-09 18:23:52 +04:00
change = ( b - > flags & TTYB_NORMAL ) & & ( ~ flags & TTYB_NORMAL ) ;
if ( change | | left < size ) {
2008-10-13 13:36:58 +04:00
/* This is the slow path - looking for new buffers to use */
2015-04-30 12:22:17 +03:00
n = tty_buffer_alloc ( port , size ) ;
if ( n ! = NULL ) {
2013-12-09 18:23:52 +04:00
n - > flags = flags ;
2012-10-19 00:26:45 +04:00
buf - > tail = n ;
2015-09-17 18:17:10 +03:00
/* paired w/ acquire in flush_to_ldisc(); ensures
* flush_to_ldisc ( ) sees buffer data .
*/
smp_store_release ( & b - > commit , b - > used ) ;
2015-07-13 03:50:49 +03:00
/* paired w/ acquire in flush_to_ldisc(); ensures the
2014-05-02 18:56:12 +04:00
* latest commit value can be read before the head is
* advanced to the next buffer
*/
2015-07-13 03:50:49 +03:00
smp_store_release ( & b - > next , n ) ;
2013-12-09 18:23:52 +04:00
} else if ( change )
size = 0 ;
else
2008-10-13 13:36:58 +04:00
size = left ;
}
return size ;
}
2013-12-09 18:23:52 +04:00
int tty_buffer_request_room ( struct tty_port * port , size_t size )
{
return __tty_buffer_request_room ( port , size , 0 ) ;
}
2008-10-13 13:36:58 +04:00
EXPORT_SYMBOL_GPL ( tty_buffer_request_room ) ;
/**
2021-11-26 11:16:02 +03:00
* tty_insert_flip_string_fixed_flag - add characters to the tty buffer
* @ port : tty port
* @ chars : characters
* @ flag : flag value for each character
* @ size : size
*
* Queue a series of bytes to the tty buffering . All the characters passed are
* marked with the supplied flag .
*
* Returns : the number added .
2008-10-13 13:36:58 +04:00
*/
2013-01-03 18:53:02 +04:00
int tty_insert_flip_string_fixed_flag ( struct tty_port * port ,
2010-02-18 19:43:54 +03:00
const unsigned char * chars , char flag , size_t size )
2008-10-13 13:36:58 +04:00
{
int copied = 0 ;
2021-05-12 12:26:11 +03:00
2008-10-13 13:36:58 +04:00
do {
2010-03-09 13:54:28 +03:00
int goal = min_t ( size_t , size - copied , TTY_BUFFER_PAGE ) ;
2013-12-09 18:23:52 +04:00
int flags = ( flag = = TTY_NORMAL ) ? TTYB_NORMAL : 0 ;
int space = __tty_buffer_request_room ( port , goal , flags ) ;
2013-01-19 18:16:20 +04:00
struct tty_buffer * tb = port - > buf . tail ;
2021-05-12 12:26:11 +03:00
2013-06-15 17:36:07 +04:00
if ( unlikely ( space = = 0 ) )
2008-10-13 13:36:58 +04:00
break ;
2013-06-15 17:36:01 +04:00
memcpy ( char_buf_ptr ( tb , tb - > used ) , chars , space ) ;
2013-12-09 18:23:52 +04:00
if ( ~ tb - > flags & TTYB_NORMAL )
memset ( flag_buf_ptr ( tb , tb - > used ) , flag , space ) ;
2008-10-13 13:36:58 +04:00
tb - > used + = space ;
copied + = space ;
chars + = space ;
/* There is a small chance that we need to split the data over
2021-05-12 12:26:13 +03:00
* several buffers . If this is the case we must loop .
*/
2008-10-13 13:36:58 +04:00
} while ( unlikely ( size > copied ) ) ;
return copied ;
}
2010-02-18 19:43:54 +03:00
EXPORT_SYMBOL ( tty_insert_flip_string_fixed_flag ) ;
2008-10-13 13:36:58 +04:00
/**
2021-11-26 11:16:02 +03:00
* tty_insert_flip_string_flags - add characters to the tty buffer
* @ port : tty port
* @ chars : characters
* @ flags : flag bytes
* @ size : size
*
* Queue a series of bytes to the tty buffering . For each character the flags
* array indicates the status of the character .
*
* Returns : the number added .
2008-10-13 13:36:58 +04:00
*/
2013-01-03 18:53:02 +04:00
int tty_insert_flip_string_flags ( struct tty_port * port ,
2008-10-13 13:36:58 +04:00
const unsigned char * chars , const char * flags , size_t size )
{
int copied = 0 ;
2021-05-12 12:26:11 +03:00
2008-10-13 13:36:58 +04:00
do {
2010-03-09 13:54:28 +03:00
int goal = min_t ( size_t , size - copied , TTY_BUFFER_PAGE ) ;
2013-01-19 18:16:20 +04:00
int space = tty_buffer_request_room ( port , goal ) ;
struct tty_buffer * tb = port - > buf . tail ;
2021-05-12 12:26:11 +03:00
2013-06-15 17:36:07 +04:00
if ( unlikely ( space = = 0 ) )
2008-10-13 13:36:58 +04:00
break ;
2013-06-15 17:36:01 +04:00
memcpy ( char_buf_ptr ( tb , tb - > used ) , chars , space ) ;
memcpy ( flag_buf_ptr ( tb , tb - > used ) , flags , space ) ;
2008-10-13 13:36:58 +04:00
tb - > used + = space ;
copied + = space ;
chars + = space ;
flags + = space ;
/* There is a small chance that we need to split the data over
2021-05-12 12:26:13 +03:00
* several buffers . If this is the case we must loop .
*/
2008-10-13 13:36:58 +04:00
} while ( unlikely ( size > copied ) ) ;
return copied ;
}
EXPORT_SYMBOL ( tty_insert_flip_string_flags ) ;
2017-06-21 00:10:41 +03:00
/**
2021-11-26 11:16:02 +03:00
* __tty_insert_flip_char - add one character to the tty buffer
* @ port : tty port
* @ ch : character
* @ flag : flag byte
2017-06-21 00:10:41 +03:00
*
2021-11-26 11:16:02 +03:00
* Queue a single byte @ ch to the tty buffering , with an optional flag . This is
* the slow path of tty_insert_flip_char ( ) .
2017-06-21 00:10:41 +03:00
*/
int __tty_insert_flip_char ( struct tty_port * port , unsigned char ch , char flag )
{
2017-08-02 14:11:39 +03:00
struct tty_buffer * tb ;
2017-06-21 00:10:41 +03:00
int flags = ( flag = = TTY_NORMAL ) ? TTYB_NORMAL : 0 ;
2017-06-21 00:10:42 +03:00
if ( ! __tty_buffer_request_room ( port , 1 , flags ) )
2017-06-21 00:10:41 +03:00
return 0 ;
2017-08-02 14:11:39 +03:00
tb = port - > buf . tail ;
2017-06-21 00:10:42 +03:00
if ( ~ tb - > flags & TTYB_NORMAL )
* flag_buf_ptr ( tb , tb - > used ) = flag ;
2017-06-21 00:10:41 +03:00
* char_buf_ptr ( tb , tb - > used + + ) = ch ;
return 1 ;
}
EXPORT_SYMBOL ( __tty_insert_flip_char ) ;
2008-10-13 13:36:58 +04:00
/**
2021-11-26 11:16:02 +03:00
* tty_prepare_flip_string - make room for characters
* @ port : tty port
* @ chars : return pointer for character write area
* @ size : desired size
*
* Prepare a block of space in the buffer for data .
*
* This is used for drivers that need their own block copy routines into the
* buffer . There is no guarantee the buffer is a DMA target !
*
* Returns : the length available and buffer pointer ( @ chars ) to the space which
* is now allocated and accounted for as ready for normal characters .
2008-10-13 13:36:58 +04:00
*/
2013-01-03 18:53:02 +04:00
int tty_prepare_flip_string ( struct tty_port * port , unsigned char * * chars ,
2012-10-19 00:26:47 +04:00
size_t size )
2008-10-13 13:36:58 +04:00
{
2013-12-09 18:23:52 +04:00
int space = __tty_buffer_request_room ( port , size , TTYB_NORMAL ) ;
2021-05-12 12:26:11 +03:00
2008-10-13 13:36:58 +04:00
if ( likely ( space ) ) {
2013-01-19 18:16:20 +04:00
struct tty_buffer * tb = port - > buf . tail ;
2021-05-12 12:26:11 +03:00
2013-06-15 17:36:01 +04:00
* chars = char_buf_ptr ( tb , tb - > used ) ;
2013-12-09 18:23:52 +04:00
if ( ~ tb - > flags & TTYB_NORMAL )
memset ( flag_buf_ptr ( tb , tb - > used ) , TTY_NORMAL , space ) ;
2008-10-13 13:36:58 +04:00
tb - > used + = space ;
}
return space ;
}
EXPORT_SYMBOL_GPL ( tty_prepare_flip_string ) ;
2016-01-11 07:36:13 +03:00
/**
2021-11-26 11:16:02 +03:00
* tty_ldisc_receive_buf - forward data to line discipline
* @ ld : line discipline to process input
* @ p : char buffer
* @ f : % TTY_NORMAL , % TTY_BREAK , etc . flags buffer
* @ count : number of bytes to process
2016-01-11 07:36:13 +03:00
*
2021-11-26 11:16:02 +03:00
* Callers other than flush_to_ldisc ( ) need to exclude the kworker from
* concurrent use of the line discipline , see paste_selection ( ) .
2016-01-11 07:36:13 +03:00
*
2021-11-26 11:16:02 +03:00
* Returns : the number of bytes processed .
2016-01-11 07:36:13 +03:00
*/
2017-01-17 01:54:31 +03:00
int tty_ldisc_receive_buf ( struct tty_ldisc * ld , const unsigned char * p ,
2021-05-05 12:19:04 +03:00
const char * f , int count )
2016-01-11 07:36:13 +03:00
{
if ( ld - > ops - > receive_buf2 )
count = ld - > ops - > receive_buf2 ( ld - > tty , p , f , count ) ;
else {
count = min_t ( int , count , ld - > tty - > receive_room ) ;
if ( count & & ld - > ops - > receive_buf )
ld - > ops - > receive_buf ( ld - > tty , p , f , count ) ;
}
return count ;
}
EXPORT_SYMBOL_GPL ( tty_ldisc_receive_buf ) ;
2008-10-13 13:36:58 +04:00
2013-06-15 17:14:14 +04:00
static int
2017-02-02 22:48:05 +03:00
receive_buf ( struct tty_port * port , struct tty_buffer * head , int count )
2013-06-15 17:14:14 +04:00
{
2013-06-15 17:36:01 +04:00
unsigned char * p = char_buf_ptr ( head , head - > read ) ;
2021-05-05 12:19:04 +03:00
const char * f = NULL ;
2018-10-04 21:06:13 +03:00
int n ;
2013-12-09 18:23:52 +04:00
if ( ~ head - > flags & TTYB_NORMAL )
f = flag_buf_ptr ( head , head - > read ) ;
2013-06-15 17:14:14 +04:00
2018-10-04 21:06:13 +03:00
n = port - > client_ops - > receive_buf ( port , p , f , count ) ;
if ( n > 0 )
memset ( p , 0 , n ) ;
return n ;
2013-06-15 17:14:14 +04:00
}
2008-10-13 13:36:58 +04:00
/**
2021-11-26 11:16:02 +03:00
* flush_to_ldisc - flush data from buffer to ldisc
* @ work : tty structure passed from work queue .
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* This routine is called out of the software interrupt to flush data from the
* buffer chain to the line discipline .
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* The receive_buf ( ) method is single threaded for each tty instance .
2013-06-15 17:36:10 +04:00
*
2021-11-26 11:16:02 +03:00
* Locking : takes buffer lock to ensure single - threaded flip buffer ' consumer ' .
2008-10-13 13:36:58 +04:00
*/
static void flush_to_ldisc ( struct work_struct * work )
{
2012-10-19 00:26:47 +04:00
struct tty_port * port = container_of ( work , struct tty_port , buf . work ) ;
struct tty_bufhead * buf = & port - > buf ;
2008-10-13 13:36:58 +04:00
2013-06-15 17:36:15 +04:00
mutex_lock ( & buf - > lock ) ;
2009-10-14 19:59:49 +04:00
2013-06-15 17:36:11 +04:00
while ( 1 ) {
struct tty_buffer * head = buf - > head ;
2014-05-02 18:56:12 +04:00
struct tty_buffer * next ;
2013-06-15 17:36:11 +04:00
int count ;
2013-06-15 17:36:15 +04:00
/* Ldisc or user is trying to gain exclusive access */
if ( atomic_read ( & buf - > priority ) )
2013-06-15 17:36:11 +04:00
break ;
2015-07-13 03:50:49 +03:00
/* paired w/ release in __tty_buffer_request_room();
2014-05-02 18:56:12 +04:00
* ensures commit value read is not stale if the head
* is advancing to the next buffer
*/
2015-07-13 03:50:49 +03:00
next = smp_load_acquire ( & head - > next ) ;
2015-09-17 18:17:10 +03:00
/* paired w/ release in __tty_buffer_request_room() or in
* tty_buffer_flush ( ) ; ensures we see the committed buffer data
*/
count = smp_load_acquire ( & head - > commit ) - head - > read ;
2013-06-15 17:36:11 +04:00
if ( ! count ) {
2016-03-07 00:16:30 +03:00
if ( next = = NULL )
2013-06-15 17:14:14 +04:00
break ;
2014-05-02 18:56:12 +04:00
buf - > head = next ;
2013-06-15 17:36:11 +04:00
tty_buffer_free ( port , head ) ;
continue ;
2008-10-13 13:36:58 +04:00
}
2013-06-15 17:36:11 +04:00
2017-02-02 22:48:05 +03:00
count = receive_buf ( port , head , count ) ;
2013-06-15 17:36:11 +04:00
if ( ! count )
break ;
2015-07-13 03:50:50 +03:00
head - > read + = count ;
2021-10-11 17:08:24 +03:00
if ( need_resched ( ) )
cond_resched ( ) ;
2008-10-13 13:36:58 +04:00
}
2009-10-14 19:59:49 +04:00
2013-06-15 17:36:15 +04:00
mutex_unlock ( & buf - > lock ) ;
2008-10-13 13:36:58 +04:00
}
/**
2021-11-26 11:16:02 +03:00
* tty_flip_buffer_push - push terminal buffers
* @ port : tty port to push
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* Queue a push of the terminal flip buffers to the line discipline . Can be
* called from IRQ / atomic context .
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* In the event of the queue being busy for flipping the work will be held off
* and retried later .
2008-10-13 13:36:58 +04:00
*/
2013-01-03 18:53:06 +04:00
void tty_flip_buffer_push ( struct tty_port * port )
2008-10-13 13:36:58 +04:00
{
2021-11-22 14:16:48 +03:00
struct tty_bufhead * buf = & port - > buf ;
/*
* Paired w / acquire in flush_to_ldisc ( ) ; ensures flush_to_ldisc ( ) sees
* buffer data .
*/
smp_store_release ( & buf - > tail - > commit , buf - > tail - > used ) ;
queue_work ( system_unbound_wq , & buf - > work ) ;
2008-10-13 13:36:58 +04:00
}
EXPORT_SYMBOL ( tty_flip_buffer_push ) ;
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_init - prepare a tty buffer structure
* @ port : tty port to initialise
2008-10-13 13:36:58 +04:00
*
2021-11-26 11:16:02 +03:00
* Set up the initial state of the buffer management for a tty device . Must be
* called before the other tty buffer functions are used .
2008-10-13 13:36:58 +04:00
*/
2012-10-19 00:26:47 +04:00
void tty_buffer_init ( struct tty_port * port )
2008-10-13 13:36:58 +04:00
{
2012-10-19 00:26:47 +04:00
struct tty_bufhead * buf = & port - > buf ;
2012-10-19 00:26:45 +04:00
2013-06-15 17:36:15 +04:00
mutex_init ( & buf - > lock ) ;
2013-06-15 17:36:07 +04:00
tty_buffer_reset ( & buf - > sentinel , 0 ) ;
buf - > head = & buf - > sentinel ;
buf - > tail = & buf - > sentinel ;
2013-06-15 17:36:06 +04:00
init_llist_head ( & buf - > free ) ;
2013-11-22 21:09:56 +04:00
atomic_set ( & buf - > mem_used , 0 ) ;
2013-06-15 17:36:15 +04:00
atomic_set ( & buf - > priority , 0 ) ;
2012-10-19 00:26:45 +04:00
INIT_WORK ( & buf - > work , flush_to_ldisc ) ;
2013-11-22 21:09:55 +04:00
buf - > mem_limit = TTYB_DEFAULT_MEM_LIMIT ;
2008-10-13 13:36:58 +04:00
}
2013-11-22 21:09:55 +04:00
/**
2021-11-26 11:16:02 +03:00
* tty_buffer_set_limit - change the tty buffer memory limit
* @ port : tty port to change
* @ limit : memory limit to set
*
* Change the tty buffer memory limit .
2013-11-22 21:09:55 +04:00
*
2021-11-26 11:16:02 +03:00
* Must be called before the other tty buffer functions are used .
2013-11-22 21:09:55 +04:00
*/
int tty_buffer_set_limit ( struct tty_port * port , int limit )
{
if ( limit < MIN_TTYB_SIZE )
return - EINVAL ;
port - > buf . mem_limit = limit ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( tty_buffer_set_limit ) ;
2015-01-17 23:42:05 +03:00
/* slave ptys can claim nested buffer lock when handling BRK and INTR */
void tty_buffer_set_lock_subclass ( struct tty_port * port )
{
lockdep_set_subclass ( & port - > buf . lock , TTY_LOCK_SLAVE ) ;
}
2015-10-17 23:36:23 +03:00
bool tty_buffer_restart_work ( struct tty_port * port )
{
return queue_work ( system_unbound_wq , & port - > buf . work ) ;
}
bool tty_buffer_cancel_work ( struct tty_port * port )
{
return cancel_work_sync ( & port - > buf . work ) ;
}
2016-03-07 00:16:30 +03:00
void tty_buffer_flush_work ( struct tty_port * port )
{
flush_work ( & port - > buf . work ) ;
}