2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2007-04-27 02:48:28 +04:00
/* ar-skbuff.c: socket buffer destruction handling
*
* Copyright ( C ) 2007 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*/
2016-06-02 22:08:52 +03:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2007-04-27 02:48:28 +04:00
# include <linux/module.h>
# include <linux/net.h>
# include <linux/skbuff.h>
# include <net/sock.h>
# include <net/af_rxrpc.h>
# include "ar-internal.h"
2019-08-19 11:25:38 +03:00
# define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
# define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
2016-09-17 12:49:14 +03:00
2016-08-23 17:27:24 +03:00
/*
2016-09-17 12:49:14 +03:00
* Note the allocation or reception of a socket buffer .
2016-08-23 17:27:24 +03:00
*/
2016-09-17 12:49:14 +03:00
void rxrpc_new_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
2019-08-19 11:25:38 +03:00
int n = atomic_inc_return ( select_skb_count ( skb ) ) ;
2019-08-27 12:13:46 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n ,
rxrpc_skb ( skb ) - > rx_flags , here ) ;
2016-08-23 17:27:24 +03:00
}
/*
* Note the re - emergence of a socket buffer from a queue or buffer .
*/
2016-09-17 12:49:14 +03:00
void rxrpc_see_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
if ( skb ) {
2019-08-19 11:25:38 +03:00
int n = atomic_read ( select_skb_count ( skb ) ) ;
2019-08-27 12:13:46 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n ,
rxrpc_skb ( skb ) - > rx_flags , here ) ;
2016-08-23 17:27:24 +03:00
}
}
/*
* Note the addition of a ref on a socket buffer .
*/
2016-09-17 12:49:14 +03:00
void rxrpc_get_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
2019-08-19 11:25:38 +03:00
int n = atomic_inc_return ( select_skb_count ( skb ) ) ;
2019-08-27 12:13:46 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n ,
rxrpc_skb ( skb ) - > rx_flags , here ) ;
2016-08-23 17:27:24 +03:00
skb_get ( skb ) ;
}
2019-08-27 12:13:46 +03:00
/*
* Note the dropping of a ref on a socket buffer by the core .
*/
void rxrpc_eaten_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
{
const void * here = __builtin_return_address ( 0 ) ;
int n = atomic_inc_return ( & rxrpc_n_rx_skbs ) ;
trace_rxrpc_skb ( skb , op , 0 , n , 0 , here ) ;
}
2016-08-23 17:27:24 +03:00
/*
* Note the destruction of a socket buffer .
*/
2016-09-17 12:49:14 +03:00
void rxrpc_free_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
if ( skb ) {
int n ;
CHECK_SLAB_OKAY ( & skb - > users ) ;
2019-08-19 11:25:38 +03:00
n = atomic_dec_return ( select_skb_count ( skb ) ) ;
2019-08-27 12:13:46 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n ,
rxrpc_skb ( skb ) - > rx_flags , here ) ;
2016-08-23 17:27:24 +03:00
kfree_skb ( skb ) ;
}
}
/*
* Clear a queue of socket buffers .
*/
void rxrpc_purge_queue ( struct sk_buff_head * list )
{
const void * here = __builtin_return_address ( 0 ) ;
struct sk_buff * skb ;
while ( ( skb = skb_dequeue ( ( list ) ) ) ! = NULL ) {
2019-08-19 11:25:38 +03:00
int n = atomic_dec_return ( select_skb_count ( skb ) ) ;
trace_rxrpc_skb ( skb , rxrpc_skb_purged ,
2019-08-27 12:13:46 +03:00
refcount_read ( & skb - > users ) , n ,
rxrpc_skb ( skb ) - > rx_flags , here ) ;
2016-08-23 17:27:24 +03:00
kfree_skb ( skb ) ;
}
}