2007-04-27 02:48:28 +04:00
/* ar-skbuff.c: socket buffer destruction handling
*
* Copyright ( C ) 2007 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
2016-06-02 22:08:52 +03:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2007-04-27 02:48:28 +04:00
# include <linux/module.h>
# include <linux/net.h>
# include <linux/skbuff.h>
# include <net/sock.h>
# include <net/af_rxrpc.h>
# include "ar-internal.h"
2016-09-17 12:49:14 +03:00
# define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
2016-08-23 17:27:24 +03:00
/*
2016-09-17 12:49:14 +03:00
* Note the allocation or reception of a socket buffer .
2016-08-23 17:27:24 +03:00
*/
2016-09-17 12:49:14 +03:00
void rxrpc_new_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
2016-09-17 12:49:14 +03:00
int n = atomic_inc_return ( select_skb_count ( op ) ) ;
2017-06-30 13:07:58 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n , here ) ;
2016-08-23 17:27:24 +03:00
}
/*
* Note the re - emergence of a socket buffer from a queue or buffer .
*/
2016-09-17 12:49:14 +03:00
void rxrpc_see_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
if ( skb ) {
2016-09-17 12:49:14 +03:00
int n = atomic_read ( select_skb_count ( op ) ) ;
2017-06-30 13:07:58 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n , here ) ;
2016-08-23 17:27:24 +03:00
}
}
/*
* Note the addition of a ref on a socket buffer .
*/
2016-09-17 12:49:14 +03:00
void rxrpc_get_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
2016-09-17 12:49:14 +03:00
int n = atomic_inc_return ( select_skb_count ( op ) ) ;
2017-06-30 13:07:58 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n , here ) ;
2016-08-23 17:27:24 +03:00
skb_get ( skb ) ;
}
/*
* Note the destruction of a socket buffer .
*/
2016-09-17 12:49:14 +03:00
void rxrpc_free_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
2016-08-23 17:27:24 +03:00
{
const void * here = __builtin_return_address ( 0 ) ;
if ( skb ) {
int n ;
CHECK_SLAB_OKAY ( & skb - > users ) ;
2016-09-17 12:49:14 +03:00
n = atomic_dec_return ( select_skb_count ( op ) ) ;
2017-06-30 13:07:58 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n , here ) ;
2016-08-23 17:27:24 +03:00
kfree_skb ( skb ) ;
}
}
2016-09-17 12:49:14 +03:00
/*
* Note the injected loss of a socket buffer .
*/
void rxrpc_lose_skb ( struct sk_buff * skb , enum rxrpc_skb_trace op )
{
const void * here = __builtin_return_address ( 0 ) ;
if ( skb ) {
int n ;
CHECK_SLAB_OKAY ( & skb - > users ) ;
2016-09-30 00:37:15 +03:00
n = atomic_dec_return ( select_skb_count ( op ) ) ;
2017-06-30 13:07:58 +03:00
trace_rxrpc_skb ( skb , op , refcount_read ( & skb - > users ) , n , here ) ;
2016-09-30 00:37:15 +03:00
kfree_skb ( skb ) ;
2016-09-17 12:49:14 +03:00
}
}
2016-08-23 17:27:24 +03:00
/*
* Clear a queue of socket buffers .
*/
void rxrpc_purge_queue ( struct sk_buff_head * list )
{
const void * here = __builtin_return_address ( 0 ) ;
struct sk_buff * skb ;
while ( ( skb = skb_dequeue ( ( list ) ) ) ! = NULL ) {
2016-09-17 12:49:14 +03:00
int n = atomic_dec_return ( select_skb_count ( rxrpc_skb_rx_purged ) ) ;
trace_rxrpc_skb ( skb , rxrpc_skb_rx_purged ,
2017-06-30 13:07:58 +03:00
refcount_read ( & skb - > users ) , n , here ) ;
2016-08-23 17:27:24 +03:00
kfree_skb ( skb ) ;
}
}