rhashtable: Drop raw RCU deref in nested_table_free

This patch replaces some unnecessary uses of rcu_dereference_raw
in the rhashtable code with rcu_dereference_protected.

The top-level nested table entry is only marked as RCU because it
shares the same type as the tree entries underneath it.  So it
doesn't need any RCU protection.

We also don't need RCU protection when we're freeing a nested RCU
table because by this stage we've long passed a memory barrier
when anyone could change the nested table.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Herbert Xu 2020-06-03 18:12:43 +10:00 committed by David S. Miller
parent 7f89cc07d2
commit 4a3084aaa8

View File

@ -63,13 +63,22 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT) #define ASSERT_RHT_MUTEX(HT)
#endif #endif
static inline union nested_table *nested_table_top(
const struct bucket_table *tbl)
{
/* The top-level bucket entry does not need RCU protection
* because it's set at the same time as tbl->nest.
*/
return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
}
static void nested_table_free(union nested_table *ntbl, unsigned int size) static void nested_table_free(union nested_table *ntbl, unsigned int size)
{ {
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
const unsigned int len = 1 << shift; const unsigned int len = 1 << shift;
unsigned int i; unsigned int i;
ntbl = rcu_dereference_raw(ntbl->table); ntbl = rcu_dereference_protected(ntbl->table, 1);
if (!ntbl) if (!ntbl)
return; return;
@ -89,7 +98,7 @@ static void nested_bucket_table_free(const struct bucket_table *tbl)
union nested_table *ntbl; union nested_table *ntbl;
unsigned int i; unsigned int i;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = nested_table_top(tbl);
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size); nested_table_free(ntbl + i, size);
@ -1173,7 +1182,7 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int subhash = hash; unsigned int subhash = hash;
union nested_table *ntbl; union nested_table *ntbl;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = nested_table_top(tbl);
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
subhash >>= tbl->nest; subhash >>= tbl->nest;
@ -1213,7 +1222,7 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int size = tbl->size >> tbl->nest; unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl; union nested_table *ntbl;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = nested_table_top(tbl);
hash >>= tbl->nest; hash >>= tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table, ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift)); size <= (1 << shift));