2021-12-05 07:21:55 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2023-06-02 13:21:34 +03:00
# define pr_fmt(fmt) "ref_tracker: " fmt
2021-12-05 07:21:55 +03:00
# include <linux/export.h>
2023-06-02 13:21:34 +03:00
# include <linux/list_sort.h>
2021-12-05 07:21:55 +03:00
# include <linux/ref_tracker.h>
# include <linux/slab.h>
# include <linux/stacktrace.h>
# include <linux/stackdepot.h>
# define REF_TRACKER_STACK_ENTRIES 16
2023-06-02 13:21:34 +03:00
# define STACK_BUF_SIZE 1024
2021-12-05 07:21:55 +03:00
struct ref_tracker {
struct list_head head ; /* anchor into dir->list or dir->quarantine */
bool dead ;
depot_stack_handle_t alloc_stack_handle ;
depot_stack_handle_t free_stack_handle ;
} ;
2023-06-02 13:21:34 +03:00
struct ref_tracker_dir_stats {
int total ;
int count ;
struct {
depot_stack_handle_t stack_handle ;
unsigned int count ;
} stacks [ ] ;
} ;
static struct ref_tracker_dir_stats *
ref_tracker_get_stats ( struct ref_tracker_dir * dir , unsigned int limit )
2023-06-02 13:21:33 +03:00
{
2023-06-02 13:21:34 +03:00
struct ref_tracker_dir_stats * stats ;
2023-06-02 13:21:33 +03:00
struct ref_tracker * tracker ;
2023-06-02 13:21:34 +03:00
stats = kmalloc ( struct_size ( stats , stacks , limit ) ,
GFP_NOWAIT | __GFP_NOWARN ) ;
if ( ! stats )
return ERR_PTR ( - ENOMEM ) ;
stats - > total = 0 ;
stats - > count = 0 ;
2023-06-02 13:21:33 +03:00
list_for_each_entry ( tracker , & dir - > list , head ) {
2023-06-02 13:21:34 +03:00
depot_stack_handle_t stack = tracker - > alloc_stack_handle ;
int i ;
+ + stats - > total ;
for ( i = 0 ; i < stats - > count ; + + i )
if ( stats - > stacks [ i ] . stack_handle = = stack )
break ;
if ( i > = limit )
continue ;
if ( i > = stats - > count ) {
stats - > stacks [ i ] . stack_handle = stack ;
stats - > stacks [ i ] . count = 0 ;
+ + stats - > count ;
2023-06-02 13:21:33 +03:00
}
2023-06-02 13:21:34 +03:00
+ + stats - > stacks [ i ] . count ;
2023-06-02 13:21:33 +03:00
}
2023-06-02 13:21:34 +03:00
return stats ;
}
2023-06-02 13:21:35 +03:00
struct ostream {
char * buf ;
int size , used ;
} ;
# define pr_ostream(stream, fmt, args...) \
( { \
struct ostream * _s = ( stream ) ; \
\
if ( ! _s - > buf ) { \
pr_err ( fmt , # # args ) ; \
} else { \
int ret , len = _s - > size - _s - > used ; \
ret = snprintf ( _s - > buf + _s - > used , len , pr_fmt ( fmt ) , # # args ) ; \
_s - > used + = min ( ret , len ) ; \
} \
} )
static void
__ref_tracker_dir_pr_ostream ( struct ref_tracker_dir * dir ,
unsigned int display_limit , struct ostream * s )
2023-06-02 13:21:34 +03:00
{
struct ref_tracker_dir_stats * stats ;
unsigned int i = 0 , skipped ;
depot_stack_handle_t stack ;
char * sbuf ;
lockdep_assert_held ( & dir - > lock ) ;
if ( list_empty ( & dir - > list ) )
return ;
stats = ref_tracker_get_stats ( dir , display_limit ) ;
if ( IS_ERR ( stats ) ) {
2023-06-02 13:21:35 +03:00
pr_ostream ( s , " %s@%pK: couldn't get stats, error %pe \n " ,
dir - > name , dir , stats ) ;
2023-06-02 13:21:34 +03:00
return ;
}
sbuf = kmalloc ( STACK_BUF_SIZE , GFP_NOWAIT | __GFP_NOWARN ) ;
for ( i = 0 , skipped = stats - > total ; i < stats - > count ; + + i ) {
stack = stats - > stacks [ i ] . stack_handle ;
if ( sbuf & & ! stack_depot_snprint ( stack , sbuf , STACK_BUF_SIZE , 4 ) )
sbuf [ 0 ] = 0 ;
2023-06-02 13:21:35 +03:00
pr_ostream ( s , " %s@%pK has %d/%d users at \n %s \n " , dir - > name , dir ,
stats - > stacks [ i ] . count , stats - > total , sbuf ) ;
2023-06-02 13:21:34 +03:00
skipped - = stats - > stacks [ i ] . count ;
}
if ( skipped )
2023-06-02 13:21:35 +03:00
pr_ostream ( s , " %s@%pK skipped reports about %d/%d users. \n " ,
dir - > name , dir , skipped , stats - > total ) ;
2023-06-02 13:21:34 +03:00
kfree ( sbuf ) ;
kfree ( stats ) ;
2023-06-02 13:21:33 +03:00
}
2023-06-02 13:21:35 +03:00
void ref_tracker_dir_print_locked ( struct ref_tracker_dir * dir ,
unsigned int display_limit )
{
struct ostream os = { } ;
__ref_tracker_dir_pr_ostream ( dir , display_limit , & os ) ;
}
2023-06-02 13:21:33 +03:00
EXPORT_SYMBOL ( ref_tracker_dir_print_locked ) ;
void ref_tracker_dir_print ( struct ref_tracker_dir * dir ,
unsigned int display_limit )
{
unsigned long flags ;
spin_lock_irqsave ( & dir - > lock , flags ) ;
ref_tracker_dir_print_locked ( dir , display_limit ) ;
spin_unlock_irqrestore ( & dir - > lock , flags ) ;
}
EXPORT_SYMBOL ( ref_tracker_dir_print ) ;
2023-06-02 13:21:35 +03:00
int ref_tracker_dir_snprint ( struct ref_tracker_dir * dir , char * buf , size_t size )
{
struct ostream os = { . buf = buf , . size = size } ;
unsigned long flags ;
spin_lock_irqsave ( & dir - > lock , flags ) ;
__ref_tracker_dir_pr_ostream ( dir , 16 , & os ) ;
spin_unlock_irqrestore ( & dir - > lock , flags ) ;
return os . used ;
}
EXPORT_SYMBOL ( ref_tracker_dir_snprint ) ;
2021-12-05 07:21:55 +03:00
void ref_tracker_dir_exit ( struct ref_tracker_dir * dir )
{
struct ref_tracker * tracker , * n ;
unsigned long flags ;
bool leak = false ;
2022-02-05 01:42:35 +03:00
dir - > dead = true ;
2021-12-05 07:21:55 +03:00
spin_lock_irqsave ( & dir - > lock , flags ) ;
list_for_each_entry_safe ( tracker , n , & dir - > quarantine , head ) {
list_del ( & tracker - > head ) ;
kfree ( tracker ) ;
dir - > quarantine_avail + + ;
}
2023-06-02 13:21:33 +03:00
if ( ! list_empty ( & dir - > list ) ) {
ref_tracker_dir_print_locked ( dir , 16 ) ;
2021-12-05 07:21:55 +03:00
leak = true ;
2023-06-02 13:21:33 +03:00
list_for_each_entry_safe ( tracker , n , & dir - > list , head ) {
list_del ( & tracker - > head ) ;
kfree ( tracker ) ;
}
2021-12-05 07:21:55 +03:00
}
spin_unlock_irqrestore ( & dir - > lock , flags ) ;
WARN_ON_ONCE ( leak ) ;
WARN_ON_ONCE ( refcount_read ( & dir - > untracked ) ! = 1 ) ;
2022-02-05 01:42:36 +03:00
WARN_ON_ONCE ( refcount_read ( & dir - > no_tracker ) ! = 1 ) ;
2021-12-05 07:21:55 +03:00
}
EXPORT_SYMBOL ( ref_tracker_dir_exit ) ;
int ref_tracker_alloc ( struct ref_tracker_dir * dir ,
struct ref_tracker * * trackerp ,
gfp_t gfp )
{
unsigned long entries [ REF_TRACKER_STACK_ENTRIES ] ;
struct ref_tracker * tracker ;
unsigned int nr_entries ;
2023-06-02 13:21:36 +03:00
gfp_t gfp_mask = gfp | __GFP_NOWARN ;
2021-12-05 07:21:55 +03:00
unsigned long flags ;
2022-02-05 01:42:35 +03:00
WARN_ON_ONCE ( dir - > dead ) ;
2022-02-05 01:42:36 +03:00
if ( ! trackerp ) {
refcount_inc ( & dir - > no_tracker ) ;
return 0 ;
}
2022-01-12 14:14:45 +03:00
if ( gfp & __GFP_DIRECT_RECLAIM )
gfp_mask | = __GFP_NOFAIL ;
* trackerp = tracker = kzalloc ( sizeof ( * tracker ) , gfp_mask ) ;
2021-12-05 07:21:55 +03:00
if ( unlikely ( ! tracker ) ) {
pr_err_once ( " memory allocation failure, unreliable refcount tracker. \n " ) ;
refcount_inc ( & dir - > untracked ) ;
return - ENOMEM ;
}
nr_entries = stack_trace_save ( entries , ARRAY_SIZE ( entries ) , 1 ) ;
tracker - > alloc_stack_handle = stack_depot_save ( entries , nr_entries , gfp ) ;
spin_lock_irqsave ( & dir - > lock , flags ) ;
list_add ( & tracker - > head , & dir - > list ) ;
spin_unlock_irqrestore ( & dir - > lock , flags ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( ref_tracker_alloc ) ;
int ref_tracker_free ( struct ref_tracker_dir * dir ,
struct ref_tracker * * trackerp )
{
unsigned long entries [ REF_TRACKER_STACK_ENTRIES ] ;
depot_stack_handle_t stack_handle ;
2022-02-05 01:42:36 +03:00
struct ref_tracker * tracker ;
2021-12-05 07:21:55 +03:00
unsigned int nr_entries ;
unsigned long flags ;
2022-02-05 01:42:35 +03:00
WARN_ON_ONCE ( dir - > dead ) ;
2022-02-05 01:42:36 +03:00
if ( ! trackerp ) {
refcount_dec ( & dir - > no_tracker ) ;
return 0 ;
}
tracker = * trackerp ;
2021-12-05 07:21:55 +03:00
if ( ! tracker ) {
refcount_dec ( & dir - > untracked ) ;
return - EEXIST ;
}
nr_entries = stack_trace_save ( entries , ARRAY_SIZE ( entries ) , 1 ) ;
2023-06-02 13:21:36 +03:00
stack_handle = stack_depot_save ( entries , nr_entries ,
GFP_NOWAIT | __GFP_NOWARN ) ;
2021-12-05 07:21:55 +03:00
spin_lock_irqsave ( & dir - > lock , flags ) ;
if ( tracker - > dead ) {
pr_err ( " reference already released. \n " ) ;
if ( tracker - > alloc_stack_handle ) {
pr_err ( " allocated in: \n " ) ;
stack_depot_print ( tracker - > alloc_stack_handle ) ;
}
if ( tracker - > free_stack_handle ) {
pr_err ( " freed in: \n " ) ;
stack_depot_print ( tracker - > free_stack_handle ) ;
}
spin_unlock_irqrestore ( & dir - > lock , flags ) ;
WARN_ON_ONCE ( 1 ) ;
return - EINVAL ;
}
tracker - > dead = true ;
tracker - > free_stack_handle = stack_handle ;
list_move_tail ( & tracker - > head , & dir - > quarantine ) ;
if ( ! dir - > quarantine_avail ) {
tracker = list_first_entry ( & dir - > quarantine , struct ref_tracker , head ) ;
list_del ( & tracker - > head ) ;
} else {
dir - > quarantine_avail - - ;
tracker = NULL ;
}
spin_unlock_irqrestore ( & dir - > lock , flags ) ;
kfree ( tracker ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( ref_tracker_free ) ;