2014-08-02 13:47:44 +04:00
/*
* Resizable , Scalable , Concurrent Hash Table
*
2015-03-20 13:57:00 +03:00
* Copyright ( c ) 2015 Herbert Xu < herbert @ gondor . apana . org . au >
2015-02-05 04:03:32 +03:00
* Copyright ( c ) 2014 - 2015 Thomas Graf < tgraf @ suug . ch >
2014-08-02 13:47:44 +04:00
* Copyright ( c ) 2008 - 2014 Patrick McHardy < kaber @ trash . net >
*
* Code partially derived from nft_hash
2015-03-20 13:57:00 +03:00
* Rewritten with rehash code from br_multicast plus single list
* pointer as suggested by Josh Triplett
2014-08-02 13:47:44 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/log2.h>
2015-02-26 18:20:34 +03:00
# include <linux/sched.h>
2014-08-02 13:47:44 +04:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
# include <linux/mm.h>
2014-12-10 18:33:11 +03:00
# include <linux/jhash.h>
2014-08-02 13:47:44 +04:00
# include <linux/random.h>
# include <linux/rhashtable.h>
2015-02-09 06:04:03 +03:00
# include <linux/err.h>
2014-08-02 13:47:44 +04:00
# define HASH_DEFAULT_SIZE 64UL
2015-03-18 12:01:16 +03:00
# define HASH_MIN_SIZE 4U
2015-01-03 01:00:20 +03:00
# define BUCKET_LOCKS_PER_CPU 128UL
2015-03-10 01:27:55 +03:00
static u32 head_hashfn ( struct rhashtable * ht ,
2015-01-03 01:00:14 +03:00
const struct bucket_table * tbl ,
const struct rhash_head * he )
2014-08-02 13:47:44 +04:00
{
2015-03-20 13:57:00 +03:00
return rht_head_hashfn ( ht , tbl , he , ht - > p ) ;
2014-08-02 13:47:44 +04:00
}
2015-02-05 04:03:34 +03:00
# ifdef CONFIG_PROVE_LOCKING
# define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
int lockdep_rht_mutex_is_held ( struct rhashtable * ht )
{
return ( debug_locks ) ? lockdep_is_held ( & ht - > mutex ) : 1 ;
}
EXPORT_SYMBOL_GPL ( lockdep_rht_mutex_is_held ) ;
int lockdep_rht_bucket_is_held ( const struct bucket_table * tbl , u32 hash )
{
2015-03-20 13:57:00 +03:00
spinlock_t * lock = rht_bucket_lock ( tbl , hash ) ;
2015-02-05 04:03:34 +03:00
return ( debug_locks ) ? lockdep_is_held ( lock ) : 1 ;
}
EXPORT_SYMBOL_GPL ( lockdep_rht_bucket_is_held ) ;
# else
# define ASSERT_RHT_MUTEX(HT)
# endif
2015-01-03 01:00:20 +03:00
static int alloc_bucket_locks ( struct rhashtable * ht , struct bucket_table * tbl )
{
unsigned int i , size ;
# if defined(CONFIG_PROVE_LOCKING)
unsigned int nr_pcpus = 2 ;
# else
unsigned int nr_pcpus = num_possible_cpus ( ) ;
# endif
nr_pcpus = min_t ( unsigned int , nr_pcpus , 32UL ) ;
size = roundup_pow_of_two ( nr_pcpus * ht - > p . locks_mul ) ;
2015-02-05 04:03:32 +03:00
/* Never allocate more than 0.5 locks per bucket */
size = min_t ( unsigned int , size , tbl - > size > > 1 ) ;
2015-01-03 01:00:20 +03:00
if ( sizeof ( spinlock_t ) ! = 0 ) {
# ifdef CONFIG_NUMA
if ( size * sizeof ( spinlock_t ) > PAGE_SIZE )
tbl - > locks = vmalloc ( size * sizeof ( spinlock_t ) ) ;
else
# endif
tbl - > locks = kmalloc_array ( size , sizeof ( spinlock_t ) ,
GFP_KERNEL ) ;
if ( ! tbl - > locks )
return - ENOMEM ;
for ( i = 0 ; i < size ; i + + )
spin_lock_init ( & tbl - > locks [ i ] ) ;
}
tbl - > locks_mask = size - 1 ;
return 0 ;
}
static void bucket_table_free ( const struct bucket_table * tbl )
{
if ( tbl )
kvfree ( tbl - > locks ) ;
kvfree ( tbl ) ;
}
2015-03-14 05:57:23 +03:00
static void bucket_table_free_rcu ( struct rcu_head * head )
{
bucket_table_free ( container_of ( head , struct bucket_table , rcu ) ) ;
}
2015-01-03 01:00:20 +03:00
static struct bucket_table * bucket_table_alloc ( struct rhashtable * ht ,
2015-03-14 05:57:22 +03:00
size_t nbuckets )
2014-08-02 13:47:44 +04:00
{
2015-02-20 02:53:38 +03:00
struct bucket_table * tbl = NULL ;
2014-08-02 13:47:44 +04:00
size_t size ;
2015-01-03 01:00:21 +03:00
int i ;
2014-08-02 13:47:44 +04:00
size = sizeof ( * tbl ) + nbuckets * sizeof ( tbl - > buckets [ 0 ] ) ;
2015-02-20 02:53:38 +03:00
if ( size < = ( PAGE_SIZE < < PAGE_ALLOC_COSTLY_ORDER ) )
tbl = kzalloc ( size , GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY ) ;
2014-08-02 13:47:44 +04:00
if ( tbl = = NULL )
tbl = vzalloc ( size ) ;
if ( tbl = = NULL )
return NULL ;
tbl - > size = nbuckets ;
2015-01-03 01:00:20 +03:00
if ( alloc_bucket_locks ( ht , tbl ) < 0 ) {
bucket_table_free ( tbl ) ;
return NULL ;
}
2014-08-02 13:47:44 +04:00
2015-03-14 05:57:20 +03:00
INIT_LIST_HEAD ( & tbl - > walkers ) ;
2015-03-14 05:57:22 +03:00
get_random_bytes ( & tbl - > hash_rnd , sizeof ( tbl - > hash_rnd ) ) ;
2015-01-03 01:00:21 +03:00
for ( i = 0 ; i < nbuckets ; i + + )
INIT_RHT_NULLS_HEAD ( tbl - > buckets [ i ] , ht , i ) ;
2015-01-03 01:00:20 +03:00
return tbl ;
2014-08-02 13:47:44 +04:00
}
2015-03-11 01:43:48 +03:00
static int rhashtable_rehash_one ( struct rhashtable * ht , unsigned old_hash )
2015-02-05 04:03:32 +03:00
{
2015-03-11 01:43:48 +03:00
struct bucket_table * old_tbl = rht_dereference ( ht - > tbl , ht ) ;
2015-03-14 05:57:25 +03:00
struct bucket_table * new_tbl =
rht_dereference ( old_tbl - > future_tbl , ht ) ? : old_tbl ;
2015-03-11 01:43:48 +03:00
struct rhash_head __rcu * * pprev = & old_tbl - > buckets [ old_hash ] ;
int err = - ENOENT ;
struct rhash_head * head , * next , * entry ;
spinlock_t * new_bucket_lock ;
unsigned new_hash ;
rht_for_each ( entry , old_tbl , old_hash ) {
err = 0 ;
next = rht_dereference_bucket ( entry - > next , old_tbl , old_hash ) ;
if ( rht_is_a_nulls ( next ) )
break ;
2015-02-05 04:03:32 +03:00
2015-03-11 01:43:48 +03:00
pprev = & entry - > next ;
}
2015-02-05 04:03:32 +03:00
2015-03-11 01:43:48 +03:00
if ( err )
goto out ;
2015-01-03 01:00:20 +03:00
2015-03-11 01:43:48 +03:00
new_hash = head_hashfn ( ht , new_tbl , entry ) ;
2014-08-02 13:47:44 +04:00
2015-03-20 13:57:00 +03:00
new_bucket_lock = rht_bucket_lock ( new_tbl , new_hash ) ;
2014-08-02 13:47:44 +04:00
2015-03-14 05:57:21 +03:00
spin_lock_nested ( new_bucket_lock , SINGLE_DEPTH_NESTING ) ;
2015-03-11 01:43:48 +03:00
head = rht_dereference_bucket ( new_tbl - > buckets [ new_hash ] ,
new_tbl , new_hash ) ;
2015-01-03 01:00:20 +03:00
2015-03-11 01:43:48 +03:00
if ( rht_is_a_nulls ( head ) )
INIT_RHT_NULLS_HEAD ( entry - > next , ht , new_hash ) ;
else
RCU_INIT_POINTER ( entry - > next , head ) ;
2015-02-05 04:03:32 +03:00
2015-03-11 01:43:48 +03:00
rcu_assign_pointer ( new_tbl - > buckets [ new_hash ] , entry ) ;
spin_unlock ( new_bucket_lock ) ;
2015-01-03 01:00:20 +03:00
2015-03-11 01:43:48 +03:00
rcu_assign_pointer ( * pprev , next ) ;
2014-08-02 13:47:44 +04:00
2015-03-11 01:43:48 +03:00
out :
return err ;
}
2015-01-03 01:00:20 +03:00
2015-03-11 01:43:48 +03:00
static void rhashtable_rehash_chain ( struct rhashtable * ht , unsigned old_hash )
{
struct bucket_table * old_tbl = rht_dereference ( ht - > tbl , ht ) ;
spinlock_t * old_bucket_lock ;
2015-03-20 13:57:00 +03:00
old_bucket_lock = rht_bucket_lock ( old_tbl , old_hash ) ;
2015-02-05 04:03:32 +03:00
2015-03-11 01:43:48 +03:00
spin_lock_bh ( old_bucket_lock ) ;
while ( ! rhashtable_rehash_one ( ht , old_hash ) )
;
2015-03-14 05:57:24 +03:00
old_tbl - > rehash + + ;
2015-03-11 01:43:48 +03:00
spin_unlock_bh ( old_bucket_lock ) ;
2015-01-03 01:00:20 +03:00
}
2015-03-11 01:43:48 +03:00
static void rhashtable_rehash ( struct rhashtable * ht ,
struct bucket_table * new_tbl )
2015-01-03 01:00:20 +03:00
{
2015-03-11 01:43:48 +03:00
struct bucket_table * old_tbl = rht_dereference ( ht - > tbl , ht ) ;
2015-03-14 05:57:20 +03:00
struct rhashtable_walker * walker ;
2015-03-11 01:43:48 +03:00
unsigned old_hash ;
2015-02-05 04:03:35 +03:00
2015-03-11 01:43:48 +03:00
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize .
*/
2015-03-14 05:57:25 +03:00
rcu_assign_pointer ( old_tbl - > future_tbl , new_tbl ) ;
2015-03-11 01:43:48 +03:00
2015-03-12 14:07:49 +03:00
/* Ensure the new table is visible to readers. */
smp_wmb ( ) ;
2015-03-11 01:43:48 +03:00
for ( old_hash = 0 ; old_hash < old_tbl - > size ; old_hash + + )
rhashtable_rehash_chain ( ht , old_hash ) ;
/* Publish the new table pointer. */
rcu_assign_pointer ( ht - > tbl , new_tbl ) ;
2015-03-14 05:57:20 +03:00
list_for_each_entry ( walker , & old_tbl - > walkers , list )
walker - > tbl = NULL ;
2015-03-11 01:43:48 +03:00
/* Wait for readers. All new readers will see the new
* table , and thus no references to the old table will
* remain .
*/
2015-03-14 05:57:23 +03:00
call_rcu ( & old_tbl - > rcu , bucket_table_free_rcu ) ;
2014-08-02 13:47:44 +04:00
}
/**
* rhashtable_expand - Expand hash table while allowing concurrent lookups
* @ ht : the hash table to expand
*
2015-03-11 01:43:48 +03:00
* A secondary bucket array is allocated and the hash entries are migrated .
2014-08-02 13:47:44 +04:00
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu ( ) , e . g . not within a rcu_read_lock ( ) section .
*
2015-01-03 01:00:20 +03:00
* The caller must ensure that no concurrent resizing occurs by holding
* ht - > mutex .
*
* It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals .
2014-08-02 13:47:44 +04:00
*/
2014-11-13 15:45:46 +03:00
int rhashtable_expand ( struct rhashtable * ht )
2014-08-02 13:47:44 +04:00
{
struct bucket_table * new_tbl , * old_tbl = rht_dereference ( ht - > tbl , ht ) ;
ASSERT_RHT_MUTEX ( ht ) ;
2015-03-14 05:57:22 +03:00
new_tbl = bucket_table_alloc ( ht , old_tbl - > size * 2 ) ;
2014-08-02 13:47:44 +04:00
if ( new_tbl = = NULL )
return - ENOMEM ;
2015-03-11 01:43:48 +03:00
rhashtable_rehash ( ht , new_tbl ) ;
2014-08-02 13:47:44 +04:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( rhashtable_expand ) ;
/**
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups
* @ ht : the hash table to shrink
*
* This function may only be called in a context where it is safe to call
* synchronize_rcu ( ) , e . g . not within a rcu_read_lock ( ) section .
*
2015-01-03 01:00:20 +03:00
* The caller must ensure that no concurrent resizing occurs by holding
* ht - > mutex .
*
2014-08-02 13:47:44 +04:00
* The caller must ensure that no concurrent table mutations take place .
* It is however valid to have concurrent lookups if they are RCU protected .
2015-01-03 01:00:20 +03:00
*
* It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals .
2014-08-02 13:47:44 +04:00
*/
2014-11-13 15:45:46 +03:00
int rhashtable_shrink ( struct rhashtable * ht )
2014-08-02 13:47:44 +04:00
{
2015-03-12 17:28:40 +03:00
struct bucket_table * new_tbl , * old_tbl = rht_dereference ( ht - > tbl , ht ) ;
2014-08-02 13:47:44 +04:00
ASSERT_RHT_MUTEX ( ht ) ;
2015-03-14 05:57:22 +03:00
new_tbl = bucket_table_alloc ( ht , old_tbl - > size / 2 ) ;
2015-01-03 01:00:20 +03:00
if ( new_tbl = = NULL )
2014-08-02 13:47:44 +04:00
return - ENOMEM ;
2015-03-11 01:43:48 +03:00
rhashtable_rehash ( ht , new_tbl ) ;
2014-08-02 13:47:44 +04:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( rhashtable_shrink ) ;
2015-01-03 01:00:20 +03:00
static void rht_deferred_worker ( struct work_struct * work )
{
struct rhashtable * ht ;
struct bucket_table * tbl ;
rhashtable: Fix race in rhashtable_destroy() and use regular work_struct
When we put our declared work task in the global workqueue with
schedule_delayed_work(), its delay parameter is always zero.
Therefore, we should define a regular work in rhashtable structure
instead of a delayed work.
By the way, we add a condition to check whether resizing functions
are NULL before cancelling the work, avoiding to cancel an
uninitialized work.
Lastly, while we wait for all work items we submitted before to run
to completion with cancel_delayed_work(), ht->mutex has been taken in
rhashtable_destroy(). Moreover, cancel_delayed_work() doesn't return
until all work items are accomplished, and when work items are
scheduled, the work's function - rht_deferred_worker() will be called.
However, as rht_deferred_worker() also needs to acquire the lock,
deadlock might happen at the moment as the lock is already held before.
So if the cancel work function is moved out of the lock covered scope,
this will avoid the deadlock.
Fixes: 97defe1 ("rhashtable: Per bucket locks & deferred expansion/shrinking")
Signed-off-by: Ying Xue <ying.xue@windriver.com>
Cc: Thomas Graf <tgraf@suug.ch>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-01-16 06:13:09 +03:00
ht = container_of ( work , struct rhashtable , run_work ) ;
2015-01-03 01:00:20 +03:00
mutex_lock ( & ht - > mutex ) ;
2015-02-03 23:33:22 +03:00
if ( ht - > being_destroyed )
goto unlock ;
2015-01-03 01:00:20 +03:00
tbl = rht_dereference ( ht - > tbl , ht ) ;
2015-03-12 17:28:40 +03:00
if ( rht_grow_above_75 ( ht , tbl ) )
2015-01-03 01:00:20 +03:00
rhashtable_expand ( ht ) ;
2015-03-12 17:28:40 +03:00
else if ( rht_shrink_below_30 ( ht , tbl ) )
2015-01-03 01:00:20 +03:00
rhashtable_shrink ( ht ) ;
2015-02-03 23:33:22 +03:00
unlock :
2015-01-03 01:00:20 +03:00
mutex_unlock ( & ht - > mutex ) ;
}
2015-03-20 13:57:00 +03:00
int rhashtable_insert_slow ( struct rhashtable * ht , const void * key ,
struct rhash_head * obj ,
struct bucket_table * tbl )
{
struct rhash_head * head ;
unsigned hash ;
int err = - EEXIST ;
hash = head_hashfn ( ht , tbl , obj ) ;
spin_lock_nested ( rht_bucket_lock ( tbl , hash ) , SINGLE_DEPTH_NESTING ) ;
if ( key & & rhashtable_lookup_fast ( ht , key , ht - > p ) )
goto exit ;
err = 0 ;
head = rht_dereference_bucket ( tbl - > buckets [ hash ] , tbl , hash ) ;
RCU_INIT_POINTER ( obj - > next , head ) ;
rcu_assign_pointer ( tbl - > buckets [ hash ] , obj ) ;
atomic_inc ( & ht - > nelems ) ;
exit :
spin_unlock ( rht_bucket_lock ( tbl , hash ) ) ;
return err ;
}
EXPORT_SYMBOL_GPL ( rhashtable_insert_slow ) ;
2015-02-03 23:33:23 +03:00
/**
* rhashtable_walk_init - Initialise an iterator
* @ ht : Table to walk over
* @ iter : Hash table Iterator
*
* This function prepares a hash table walk .
*
* Note that if you restart a walk after rhashtable_walk_stop you
* may see the same object twice . Also , you may miss objects if
* there are removals in between rhashtable_walk_stop and the next
* call to rhashtable_walk_start .
*
* For a completely stable walk you should construct your own data
* structure outside the hash table .
*
* This function may sleep so you must not call it from interrupt
* context or with spin locks held .
*
* You must call rhashtable_walk_exit if this function returns
* successfully .
*/
int rhashtable_walk_init ( struct rhashtable * ht , struct rhashtable_iter * iter )
{
iter - > ht = ht ;
iter - > p = NULL ;
iter - > slot = 0 ;
iter - > skip = 0 ;
iter - > walker = kmalloc ( sizeof ( * iter - > walker ) , GFP_KERNEL ) ;
if ( ! iter - > walker )
return - ENOMEM ;
mutex_lock ( & ht - > mutex ) ;
2015-03-14 05:57:20 +03:00
iter - > walker - > tbl = rht_dereference ( ht - > tbl , ht ) ;
list_add ( & iter - > walker - > list , & iter - > walker - > tbl - > walkers ) ;
2015-02-03 23:33:23 +03:00
mutex_unlock ( & ht - > mutex ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( rhashtable_walk_init ) ;
/**
* rhashtable_walk_exit - Free an iterator
* @ iter : Hash table Iterator
*
* This function frees resources allocated by rhashtable_walk_init .
*/
void rhashtable_walk_exit ( struct rhashtable_iter * iter )
{
mutex_lock ( & iter - > ht - > mutex ) ;
2015-03-14 05:57:20 +03:00
if ( iter - > walker - > tbl )
list_del ( & iter - > walker - > list ) ;
2015-02-03 23:33:23 +03:00
mutex_unlock ( & iter - > ht - > mutex ) ;
kfree ( iter - > walker ) ;
}
EXPORT_SYMBOL_GPL ( rhashtable_walk_exit ) ;
/**
* rhashtable_walk_start - Start a hash table walk
* @ iter : Hash table iterator
*
* Start a hash table walk . Note that we take the RCU lock in all
* cases including when we return an error . So you must always call
* rhashtable_walk_stop to clean up .
*
* Returns zero if successful .
*
* Returns - EAGAIN if resize event occured . Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next .
*/
int rhashtable_walk_start ( struct rhashtable_iter * iter )
2015-03-16 12:42:27 +03:00
__acquires ( RCU )
2015-02-03 23:33:23 +03:00
{
2015-03-14 05:57:20 +03:00
struct rhashtable * ht = iter - > ht ;
mutex_lock ( & ht - > mutex ) ;
if ( iter - > walker - > tbl )
list_del ( & iter - > walker - > list ) ;
2015-02-03 23:33:23 +03:00
rcu_read_lock ( ) ;
2015-03-14 05:57:20 +03:00
mutex_unlock ( & ht - > mutex ) ;
if ( ! iter - > walker - > tbl ) {
iter - > walker - > tbl = rht_dereference_rcu ( ht - > tbl , ht ) ;
2015-02-03 23:33:23 +03:00
return - EAGAIN ;
}
return 0 ;
}
EXPORT_SYMBOL_GPL ( rhashtable_walk_start ) ;
/**
* rhashtable_walk_next - Return the next object and advance the iterator
* @ iter : Hash table iterator
*
* Note that you must call rhashtable_walk_stop when you are finished
* with the walk .
*
* Returns the next object or NULL when the end of the table is reached .
*
* Returns - EAGAIN if resize event occured . Note that the iterator
* will rewind back to the beginning and you may continue to use it .
*/
void * rhashtable_walk_next ( struct rhashtable_iter * iter )
{
2015-03-14 05:57:20 +03:00
struct bucket_table * tbl = iter - > walker - > tbl ;
2015-02-03 23:33:23 +03:00
struct rhashtable * ht = iter - > ht ;
struct rhash_head * p = iter - > p ;
void * obj = NULL ;
if ( p ) {
p = rht_dereference_bucket_rcu ( p - > next , tbl , iter - > slot ) ;
goto next ;
}
for ( ; iter - > slot < tbl - > size ; iter - > slot + + ) {
int skip = iter - > skip ;
rht_for_each_rcu ( p , tbl , iter - > slot ) {
if ( ! skip )
break ;
skip - - ;
}
next :
if ( ! rht_is_a_nulls ( p ) ) {
iter - > skip + + ;
iter - > p = p ;
obj = rht_obj ( ht , p ) ;
goto out ;
}
iter - > skip = 0 ;
}
2015-03-23 16:50:19 +03:00
/* Ensure we see any new tables. */
smp_rmb ( ) ;
2015-03-14 05:57:25 +03:00
iter - > walker - > tbl = rht_dereference_rcu ( tbl - > future_tbl , ht ) ;
if ( iter - > walker - > tbl ) {
2015-02-03 23:33:23 +03:00
iter - > slot = 0 ;
iter - > skip = 0 ;
return ERR_PTR ( - EAGAIN ) ;
}
2015-03-14 05:57:20 +03:00
iter - > p = NULL ;
out :
2015-02-03 23:33:23 +03:00
return obj ;
}
EXPORT_SYMBOL_GPL ( rhashtable_walk_next ) ;
/**
* rhashtable_walk_stop - Finish a hash table walk
* @ iter : Hash table iterator
*
* Finish a hash table walk .
*/
void rhashtable_walk_stop ( struct rhashtable_iter * iter )
2015-03-16 12:42:27 +03:00
__releases ( RCU )
2015-02-03 23:33:23 +03:00
{
2015-03-14 05:57:20 +03:00
struct rhashtable * ht ;
struct bucket_table * tbl = iter - > walker - > tbl ;
if ( ! tbl )
2015-03-15 13:12:04 +03:00
goto out ;
2015-03-14 05:57:20 +03:00
ht = iter - > ht ;
mutex_lock ( & ht - > mutex ) ;
2015-03-14 05:57:25 +03:00
if ( tbl - > rehash < tbl - > size )
2015-03-14 05:57:20 +03:00
list_add ( & iter - > walker - > list , & tbl - > walkers ) ;
else
iter - > walker - > tbl = NULL ;
mutex_unlock ( & ht - > mutex ) ;
2015-02-03 23:33:23 +03:00
iter - > p = NULL ;
2015-03-15 13:12:04 +03:00
out :
rcu_read_unlock ( ) ;
2015-02-03 23:33:23 +03:00
}
EXPORT_SYMBOL_GPL ( rhashtable_walk_stop ) ;
2015-03-20 13:56:59 +03:00
static size_t rounded_hashtable_size ( const struct rhashtable_params * params )
2014-08-02 13:47:44 +04:00
{
2014-09-03 05:22:36 +04:00
return max ( roundup_pow_of_two ( params - > nelem_hint * 4 / 3 ) ,
2015-03-18 12:01:21 +03:00
( unsigned long ) params - > min_size ) ;
2014-08-02 13:47:44 +04:00
}
/**
* rhashtable_init - initialize a new hash table
* @ ht : hash table to be initialized
* @ params : configuration parameters
*
* Initializes a new hash table based on the provided configuration
* parameters . A table can be configured either with a variable or
* fixed length key :
*
* Configuration Example 1 : Fixed length keys
* struct test_obj {
* int key ;
* void * my_member ;
* struct rhash_head node ;
* } ;
*
* struct rhashtable_params params = {
* . head_offset = offsetof ( struct test_obj , node ) ,
* . key_offset = offsetof ( struct test_obj , key ) ,
* . key_len = sizeof ( int ) ,
2014-12-10 18:33:11 +03:00
* . hashfn = jhash ,
2015-01-03 01:00:21 +03:00
* . nulls_base = ( 1U < < RHT_BASE_SHIFT ) ,
2014-08-02 13:47:44 +04:00
* } ;
*
* Configuration Example 2 : Variable length keys
* struct test_obj {
* [ . . . ]
* struct rhash_head node ;
* } ;
*
* u32 my_hash_fn ( const void * data , u32 seed )
* {
* struct test_obj * obj = data ;
*
* return [ . . . hash . . . ] ;
* }
*
* struct rhashtable_params params = {
* . head_offset = offsetof ( struct test_obj , node ) ,
2014-12-10 18:33:11 +03:00
* . hashfn = jhash ,
2014-08-02 13:47:44 +04:00
* . obj_hashfn = my_hash_fn ,
* } ;
*/
2015-03-20 13:56:59 +03:00
int rhashtable_init ( struct rhashtable * ht ,
const struct rhashtable_params * params )
2014-08-02 13:47:44 +04:00
{
struct bucket_table * tbl ;
size_t size ;
size = HASH_DEFAULT_SIZE ;
2015-03-20 13:57:00 +03:00
if ( ( ! ( params - > key_len & & params - > hashfn ) & & ! params - > obj_hashfn ) | |
( params - > obj_hashfn & & ! params - > obj_cmpfn ) )
2014-08-02 13:47:44 +04:00
return - EINVAL ;
2015-01-03 01:00:21 +03:00
if ( params - > nulls_base & & params - > nulls_base < ( 1U < < RHT_BASE_SHIFT ) )
return - EINVAL ;
2014-08-02 13:47:44 +04:00
if ( params - > nelem_hint )
2014-09-03 05:22:36 +04:00
size = rounded_hashtable_size ( params ) ;
2014-08-02 13:47:44 +04:00
2015-01-03 01:00:20 +03:00
memset ( ht , 0 , sizeof ( * ht ) ) ;
mutex_init ( & ht - > mutex ) ;
memcpy ( & ht - > p , params , sizeof ( * params ) ) ;
2015-03-20 01:31:13 +03:00
if ( params - > min_size )
ht - > p . min_size = roundup_pow_of_two ( params - > min_size ) ;
if ( params - > max_size )
ht - > p . max_size = rounddown_pow_of_two ( params - > max_size ) ;
2015-03-20 13:56:59 +03:00
ht - > p . min_size = max ( ht - > p . min_size , HASH_MIN_SIZE ) ;
2015-03-20 01:31:13 +03:00
2015-01-03 01:00:20 +03:00
if ( params - > locks_mul )
ht - > p . locks_mul = roundup_pow_of_two ( params - > locks_mul ) ;
else
ht - > p . locks_mul = BUCKET_LOCKS_PER_CPU ;
2015-03-14 05:57:22 +03:00
tbl = bucket_table_alloc ( ht , size ) ;
2014-08-02 13:47:44 +04:00
if ( tbl = = NULL )
return - ENOMEM ;
2015-01-07 08:41:57 +03:00
atomic_set ( & ht - > nelems , 0 ) ;
2015-03-12 17:28:40 +03:00
2014-08-02 13:47:44 +04:00
RCU_INIT_POINTER ( ht - > tbl , tbl ) ;
2015-02-25 18:31:54 +03:00
INIT_WORK ( & ht - > run_work , rht_deferred_worker ) ;
2015-01-03 01:00:20 +03:00
2014-08-02 13:47:44 +04:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( rhashtable_init ) ;
/**
* rhashtable_destroy - destroy hash table
* @ ht : the hash table to destroy
*
2014-09-02 02:26:05 +04:00
* Frees the bucket array . This function is not rcu safe , therefore the caller
* has to make sure that no resizing may happen by unpublishing the hashtable
* and waiting for the quiescent cycle before releasing the bucket array .
2014-08-02 13:47:44 +04:00
*/
2015-01-03 01:00:20 +03:00
void rhashtable_destroy ( struct rhashtable * ht )
2014-08-02 13:47:44 +04:00
{
2015-01-03 01:00:20 +03:00
ht - > being_destroyed = true ;
2015-02-25 18:31:54 +03:00
cancel_work_sync ( & ht - > run_work ) ;
2015-01-03 01:00:20 +03:00
rhashtable: Fix race in rhashtable_destroy() and use regular work_struct
When we put our declared work task in the global workqueue with
schedule_delayed_work(), its delay parameter is always zero.
Therefore, we should define a regular work in rhashtable structure
instead of a delayed work.
By the way, we add a condition to check whether resizing functions
are NULL before cancelling the work, avoiding to cancel an
uninitialized work.
Lastly, while we wait for all work items we submitted before to run
to completion with cancel_delayed_work(), ht->mutex has been taken in
rhashtable_destroy(). Moreover, cancel_delayed_work() doesn't return
until all work items are accomplished, and when work items are
scheduled, the work's function - rht_deferred_worker() will be called.
However, as rht_deferred_worker() also needs to acquire the lock,
deadlock might happen at the moment as the lock is already held before.
So if the cancel work function is moved out of the lock covered scope,
this will avoid the deadlock.
Fixes: 97defe1 ("rhashtable: Per bucket locks & deferred expansion/shrinking")
Signed-off-by: Ying Xue <ying.xue@windriver.com>
Cc: Thomas Graf <tgraf@suug.ch>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-01-16 06:13:09 +03:00
mutex_lock ( & ht - > mutex ) ;
2015-01-03 01:00:20 +03:00
bucket_table_free ( rht_dereference ( ht - > tbl , ht ) ) ;
mutex_unlock ( & ht - > mutex ) ;
2014-08-02 13:47:44 +04:00
}
EXPORT_SYMBOL_GPL ( rhashtable_destroy ) ;