2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2022-10-21 15:31:21 +01:00
/* Socket buffer accounting
2007-04-26 15:48:28 -07:00
*
* Copyright ( C ) 2007 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*/
2016-06-02 12:08:52 -07:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2007-04-26 15:48:28 -07:00
# include <linux/module.h>
# include <linux/net.h>
# include <linux/skbuff.h>
# include <net/sock.h>
# include <net/af_rxrpc.h>
# include "ar-internal.h"
2022-10-07 13:52:06 +01:00
# define select_skb_count(skb) (&rxrpc_n_rx_skbs)
2016-09-17 10:49:14 +01:00
2016-08-23 15:27:24 +01:00
/*
2016-09-17 10:49:14 +01:00
* Note the allocation or reception of a socket buffer .
2016-08-23 15:27:24 +01:00
*/
2022-10-21 15:31:21 +01:00
void rxrpc_new_skb ( struct sk_buff * skb , enum rxrpc_skb_trace why )
2016-08-23 15:27:24 +01:00
{
2019-08-19 09:25:38 +01:00
int n = atomic_inc_return ( select_skb_count ( skb ) ) ;
2022-10-21 15:31:21 +01:00
trace_rxrpc_skb ( skb , refcount_read ( & skb - > users ) , n , why ) ;
2016-08-23 15:27:24 +01:00
}
/*
* Note the re - emergence of a socket buffer from a queue or buffer .
*/
2022-10-21 15:31:21 +01:00
void rxrpc_see_skb ( struct sk_buff * skb , enum rxrpc_skb_trace why )
2016-08-23 15:27:24 +01:00
{
if ( skb ) {
2019-08-19 09:25:38 +01:00
int n = atomic_read ( select_skb_count ( skb ) ) ;
2022-10-21 15:31:21 +01:00
trace_rxrpc_skb ( skb , refcount_read ( & skb - > users ) , n , why ) ;
2016-08-23 15:27:24 +01:00
}
}
/*
* Note the addition of a ref on a socket buffer .
*/
2022-10-21 15:31:21 +01:00
void rxrpc_get_skb ( struct sk_buff * skb , enum rxrpc_skb_trace why )
2016-08-23 15:27:24 +01:00
{
2019-08-19 09:25:38 +01:00
int n = atomic_inc_return ( select_skb_count ( skb ) ) ;
2022-10-21 15:31:21 +01:00
trace_rxrpc_skb ( skb , refcount_read ( & skb - > users ) , n , why ) ;
2016-08-23 15:27:24 +01:00
skb_get ( skb ) ;
}
2019-08-27 10:13:46 +01:00
/*
* Note the dropping of a ref on a socket buffer by the core .
*/
2022-10-21 15:31:21 +01:00
void rxrpc_eaten_skb ( struct sk_buff * skb , enum rxrpc_skb_trace why )
2019-08-27 10:13:46 +01:00
{
int n = atomic_inc_return ( & rxrpc_n_rx_skbs ) ;
2022-10-21 15:31:21 +01:00
trace_rxrpc_skb ( skb , 0 , n , why ) ;
2019-08-27 10:13:46 +01:00
}
2016-08-23 15:27:24 +01:00
/*
* Note the destruction of a socket buffer .
*/
2022-10-21 15:31:21 +01:00
void rxrpc_free_skb ( struct sk_buff * skb , enum rxrpc_skb_trace why )
2016-08-23 15:27:24 +01:00
{
if ( skb ) {
2022-10-21 15:31:21 +01:00
int n = atomic_dec_return ( select_skb_count ( skb ) ) ;
trace_rxrpc_skb ( skb , refcount_read ( & skb - > users ) , n , why ) ;
2023-02-07 22:11:30 +00:00
consume_skb ( skb ) ;
2016-08-23 15:27:24 +01:00
}
}
/*
* Clear a queue of socket buffers .
*/
void rxrpc_purge_queue ( struct sk_buff_head * list )
{
struct sk_buff * skb ;
2022-10-21 15:31:21 +01:00
2016-08-23 15:27:24 +01:00
while ( ( skb = skb_dequeue ( ( list ) ) ) ! = NULL ) {
2019-08-19 09:25:38 +01:00
int n = atomic_dec_return ( select_skb_count ( skb ) ) ;
2022-10-21 15:31:21 +01:00
trace_rxrpc_skb ( skb , refcount_read ( & skb - > users ) , n ,
rxrpc_skb_put_purge ) ;
2023-02-07 22:11:30 +00:00
consume_skb ( skb ) ;
2016-08-23 15:27:24 +01:00
}
}