2017-12-04 21:31:44 +03:00
# include <linux/export.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
* indicate the number of elements to allocate in the array . max_size
* gives the maximum number of elements to allocate . cpu_mult gives
* the number of locks per CPU to allocate . The size is rounded up
* to a power of 2 to be suitable as a hash table .
*/
2018-08-15 01:21:31 +03:00
int __alloc_bucket_spinlocks ( spinlock_t * * locks , unsigned int * locks_mask ,
size_t max_size , unsigned int cpu_mult , gfp_t gfp ,
const char * name , struct lock_class_key * key )
2017-12-04 21:31:44 +03:00
{
spinlock_t * tlocks = NULL ;
unsigned int i , size ;
# if defined(CONFIG_PROVE_LOCKING)
unsigned int nr_pcpus = 2 ;
# else
unsigned int nr_pcpus = num_possible_cpus ( ) ;
# endif
if ( cpu_mult ) {
nr_pcpus = min_t ( unsigned int , nr_pcpus , 64UL ) ;
size = min_t ( unsigned int , nr_pcpus * cpu_mult , max_size ) ;
} else {
size = max_size ;
}
if ( sizeof ( spinlock_t ) ! = 0 ) {
2018-06-08 03:09:40 +03:00
tlocks = kvmalloc_array ( size , sizeof ( spinlock_t ) , gfp ) ;
2017-12-04 21:31:44 +03:00
if ( ! tlocks )
return - ENOMEM ;
2018-08-15 01:21:31 +03:00
for ( i = 0 ; i < size ; i + + ) {
2017-12-04 21:31:44 +03:00
spin_lock_init ( & tlocks [ i ] ) ;
2018-08-15 01:21:31 +03:00
lockdep_init_map ( & tlocks [ i ] . dep_map , name , key , 0 ) ;
}
2017-12-04 21:31:44 +03:00
}
* locks = tlocks ;
* locks_mask = size - 1 ;
return 0 ;
}
2018-08-15 01:21:31 +03:00
EXPORT_SYMBOL ( __alloc_bucket_spinlocks ) ;
2017-12-04 21:31:44 +03:00
void free_bucket_spinlocks ( spinlock_t * locks )
{
kvfree ( locks ) ;
}
EXPORT_SYMBOL ( free_bucket_spinlocks ) ;