2022-04-05 21:16:32 +01:00
// SPDX-License-Identifier: GPL-2.0-or-later
/* RxRPC Tx data buffering.
*
* Copyright ( C ) 2022 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/slab.h>
# include "ar-internal.h"
static atomic_t rxrpc_txbuf_debug_ids ;
atomic_t rxrpc_nr_txbuf ;
/*
2024-01-29 23:47:57 +00:00
* Allocate and partially initialise a data transmission buffer .
2022-04-05 21:16:32 +01:00
*/
2024-01-29 23:47:57 +00:00
struct rxrpc_txbuf * rxrpc_alloc_data_txbuf ( struct rxrpc_call * call , size_t data_size ,
size_t data_align , gfp_t gfp )
2022-04-05 21:16:32 +01:00
{
2024-01-30 08:44:40 +00:00
struct rxrpc_wire_header * whdr ;
2022-04-05 21:16:32 +01:00
struct rxrpc_txbuf * txb ;
2024-01-29 23:47:57 +00:00
size_t total , hoff = 0 ;
void * buf ;
2022-04-05 21:16:32 +01:00
txb = kmalloc ( sizeof ( * txb ) , gfp ) ;
2024-01-29 23:47:57 +00:00
if ( ! txb )
return NULL ;
if ( data_align )
hoff = round_up ( sizeof ( * whdr ) , data_align ) - sizeof ( * whdr ) ;
total = hoff + sizeof ( * whdr ) + data_size ;
mutex_lock ( & call - > conn - > tx_data_alloc_lock ) ;
2024-03-12 23:37:17 +00:00
buf = __page_frag_alloc_align ( & call - > conn - > tx_data_alloc , total , gfp ,
~ ( data_align - 1 ) & ~ ( L1_CACHE_BYTES - 1 ) ) ;
2024-01-29 23:47:57 +00:00
mutex_unlock ( & call - > conn - > tx_data_alloc_lock ) ;
if ( ! buf ) {
kfree ( txb ) ;
return NULL ;
}
whdr = buf + hoff ;
INIT_LIST_HEAD ( & txb - > call_link ) ;
INIT_LIST_HEAD ( & txb - > tx_link ) ;
refcount_set ( & txb - > ref , 1 ) ;
txb - > last_sent = KTIME_MIN ;
txb - > call_debug_id = call - > debug_id ;
txb - > debug_id = atomic_inc_return ( & rxrpc_txbuf_debug_ids ) ;
txb - > space = data_size ;
txb - > len = 0 ;
txb - > offset = sizeof ( * whdr ) ;
txb - > flags = call - > conn - > out_clientflag ;
txb - > ack_why = 0 ;
txb - > seq = call - > tx_prepared + 1 ;
txb - > serial = 0 ;
txb - > cksum = 0 ;
txb - > nr_kvec = 1 ;
txb - > kvec [ 0 ] . iov_base = whdr ;
txb - > kvec [ 0 ] . iov_len = sizeof ( * whdr ) ;
whdr - > epoch = htonl ( call - > conn - > proto . epoch ) ;
whdr - > cid = htonl ( call - > cid ) ;
whdr - > callNumber = htonl ( call - > call_id ) ;
whdr - > seq = htonl ( txb - > seq ) ;
whdr - > type = RXRPC_PACKET_TYPE_DATA ;
whdr - > flags = 0 ;
whdr - > userStatus = 0 ;
whdr - > securityIndex = call - > security_ix ;
whdr - > _rsvd = 0 ;
whdr - > serviceId = htons ( call - > dest_srx . srx_service ) ;
trace_rxrpc_txbuf ( txb - > debug_id , txb - > call_debug_id , txb - > seq , 1 ,
rxrpc_txbuf_alloc_data ) ;
atomic_inc ( & rxrpc_nr_txbuf ) ;
return txb ;
}
/*
* Allocate and partially initialise an ACK packet .
*/
struct rxrpc_txbuf * rxrpc_alloc_ack_txbuf ( struct rxrpc_call * call , size_t sack_size )
{
struct rxrpc_wire_header * whdr ;
struct rxrpc_acktrailer * trailer ;
struct rxrpc_ackpacket * ack ;
struct rxrpc_txbuf * txb ;
gfp_t gfp = rcu_read_lock_held ( ) ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS ;
void * buf , * buf2 = NULL ;
u8 * filler ;
txb = kmalloc ( sizeof ( * txb ) , gfp ) ;
if ( ! txb )
return NULL ;
buf = page_frag_alloc ( & call - > local - > tx_alloc ,
sizeof ( * whdr ) + sizeof ( * ack ) + 1 + 3 + sizeof ( * trailer ) , gfp ) ;
if ( ! buf ) {
kfree ( txb ) ;
return NULL ;
}
if ( sack_size ) {
buf2 = page_frag_alloc ( & call - > local - > tx_alloc , sack_size , gfp ) ;
if ( ! buf2 ) {
page_frag_free ( buf ) ;
kfree ( txb ) ;
return NULL ;
}
2022-04-05 21:16:32 +01:00
}
2024-01-29 23:47:57 +00:00
whdr = buf ;
ack = buf + sizeof ( * whdr ) ;
filler = buf + sizeof ( * whdr ) + sizeof ( * ack ) + 1 ;
trailer = buf + sizeof ( * whdr ) + sizeof ( * ack ) + 1 + 3 ;
INIT_LIST_HEAD ( & txb - > call_link ) ;
INIT_LIST_HEAD ( & txb - > tx_link ) ;
refcount_set ( & txb - > ref , 1 ) ;
txb - > call_debug_id = call - > debug_id ;
txb - > debug_id = atomic_inc_return ( & rxrpc_txbuf_debug_ids ) ;
txb - > space = 0 ;
txb - > len = sizeof ( * whdr ) + sizeof ( * ack ) + 3 + sizeof ( * trailer ) ;
txb - > offset = 0 ;
txb - > flags = call - > conn - > out_clientflag ;
txb - > ack_rwind = 0 ;
txb - > seq = 0 ;
txb - > serial = 0 ;
txb - > cksum = 0 ;
txb - > nr_kvec = 3 ;
txb - > kvec [ 0 ] . iov_base = whdr ;
txb - > kvec [ 0 ] . iov_len = sizeof ( * whdr ) + sizeof ( * ack ) ;
txb - > kvec [ 1 ] . iov_base = buf2 ;
txb - > kvec [ 1 ] . iov_len = sack_size ;
txb - > kvec [ 2 ] . iov_base = filler ;
txb - > kvec [ 2 ] . iov_len = 3 + sizeof ( * trailer ) ;
whdr - > epoch = htonl ( call - > conn - > proto . epoch ) ;
whdr - > cid = htonl ( call - > cid ) ;
whdr - > callNumber = htonl ( call - > call_id ) ;
whdr - > seq = 0 ;
whdr - > type = RXRPC_PACKET_TYPE_ACK ;
whdr - > flags = 0 ;
whdr - > userStatus = 0 ;
whdr - > securityIndex = call - > security_ix ;
whdr - > _rsvd = 0 ;
whdr - > serviceId = htons ( call - > dest_srx . srx_service ) ;
get_page ( virt_to_head_page ( trailer ) ) ;
trace_rxrpc_txbuf ( txb - > debug_id , txb - > call_debug_id , txb - > seq , 1 ,
rxrpc_txbuf_alloc_ack ) ;
atomic_inc ( & rxrpc_nr_txbuf ) ;
2022-04-05 21:16:32 +01:00
return txb ;
}
void rxrpc_get_txbuf ( struct rxrpc_txbuf * txb , enum rxrpc_txbuf_trace what )
{
int r ;
__refcount_inc ( & txb - > ref , & r ) ;
trace_rxrpc_txbuf ( txb - > debug_id , txb - > call_debug_id , txb - > seq , r + 1 , what ) ;
}
void rxrpc_see_txbuf ( struct rxrpc_txbuf * txb , enum rxrpc_txbuf_trace what )
{
int r = refcount_read ( & txb - > ref ) ;
trace_rxrpc_txbuf ( txb - > debug_id , txb - > call_debug_id , txb - > seq , r , what ) ;
}
2024-01-29 23:47:57 +00:00
static void rxrpc_free_txbuf ( struct rxrpc_txbuf * txb )
2022-04-05 21:16:32 +01:00
{
2024-01-29 23:47:57 +00:00
int i ;
2022-04-05 21:16:32 +01:00
trace_rxrpc_txbuf ( txb - > debug_id , txb - > call_debug_id , txb - > seq , 0 ,
rxrpc_txbuf_free ) ;
2024-01-29 23:47:57 +00:00
for ( i = 0 ; i < txb - > nr_kvec ; i + + )
if ( txb - > kvec [ i ] . iov_base )
page_frag_free ( txb - > kvec [ i ] . iov_base ) ;
2022-04-05 21:16:32 +01:00
kfree ( txb ) ;
atomic_dec ( & rxrpc_nr_txbuf ) ;
}
void rxrpc_put_txbuf ( struct rxrpc_txbuf * txb , enum rxrpc_txbuf_trace what )
{
unsigned int debug_id , call_debug_id ;
rxrpc_seq_t seq ;
bool dead ;
int r ;
if ( txb ) {
debug_id = txb - > debug_id ;
call_debug_id = txb - > call_debug_id ;
seq = txb - > seq ;
dead = __refcount_dec_and_test ( & txb - > ref , & r ) ;
trace_rxrpc_txbuf ( debug_id , call_debug_id , seq , r - 1 , what ) ;
if ( dead )
2024-01-29 23:47:57 +00:00
rxrpc_free_txbuf ( txb ) ;
2022-04-05 21:16:32 +01:00
}
}
2022-03-31 23:55:08 +01:00
/*
* Shrink the transmit buffer .
*/
void rxrpc_shrink_call_tx_buffer ( struct rxrpc_call * call )
{
struct rxrpc_txbuf * txb ;
rxrpc_seq_t hard_ack = smp_load_acquire ( & call - > acks_hard_ack ) ;
2022-03-31 23:55:08 +01:00
bool wake = false ;
2022-03-31 23:55:08 +01:00
_enter ( " %x/%x/%x " , call - > tx_bottom , call - > acks_hard_ack , call - > tx_top ) ;
2022-10-17 22:48:58 +01:00
while ( ( txb = list_first_entry_or_null ( & call - > tx_buffer ,
struct rxrpc_txbuf , call_link ) ) ) {
2022-03-31 23:55:08 +01:00
hard_ack = smp_load_acquire ( & call - > acks_hard_ack ) ;
if ( before ( hard_ack , txb - > seq ) )
break ;
2022-11-25 09:00:55 +00:00
if ( txb - > seq ! = call - > tx_bottom + 1 )
rxrpc_see_txbuf ( txb , rxrpc_txbuf_see_out_of_step ) ;
2022-03-31 23:55:08 +01:00
ASSERTCMP ( txb - > seq , = = , call - > tx_bottom + 1 ) ;
2022-03-31 23:55:08 +01:00
smp_store_release ( & call - > tx_bottom , call - > tx_bottom + 1 ) ;
2022-03-31 23:55:08 +01:00
list_del_rcu ( & txb - > call_link ) ;
trace_rxrpc_txqueue ( call , rxrpc_txqueue_dequeue ) ;
rxrpc_put_txbuf ( txb , rxrpc_txbuf_put_rotated ) ;
2022-03-31 23:55:08 +01:00
if ( after ( call - > acks_hard_ack , call - > tx_bottom + 128 ) )
wake = true ;
2022-03-31 23:55:08 +01:00
}
2022-03-31 23:55:08 +01:00
if ( wake )
wake_up ( & call - > waitq ) ;
2022-03-31 23:55:08 +01:00
}