rcu/kvfree: Add debug to check grace periods
This commit adds debugging checks to verify that the required RCU grace period has elapsed for each kvfree_rcu_bulk_data structure that arrives at the kvfree_rcu_bulk() function. These checks make use of that structure's ->gp_snap field, which has been upgraded from an unsigned long to an rcu_gp_oldstate structure. This upgrade reduces the chances of false positives to nearly zero, even on 32-bit systems, for which this structure carries 64 bits of state. Cc: Ziwei Dai <ziwei.dai@unisoc.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
7e3f926bf4
commit
cdfa0f6fa6
@ -2756,7 +2756,7 @@ EXPORT_SYMBOL_GPL(call_rcu);
|
|||||||
*/
|
*/
|
||||||
struct kvfree_rcu_bulk_data {
|
struct kvfree_rcu_bulk_data {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
unsigned long gp_snap;
|
struct rcu_gp_oldstate gp_snap;
|
||||||
unsigned long nr_records;
|
unsigned long nr_records;
|
||||||
void *records[];
|
void *records[];
|
||||||
};
|
};
|
||||||
@ -2921,23 +2921,24 @@ kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
debug_rcu_bhead_unqueue(bnode);
|
debug_rcu_bhead_unqueue(bnode);
|
||||||
|
if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
|
||||||
|
rcu_lock_acquire(&rcu_callback_map);
|
||||||
|
if (idx == 0) { // kmalloc() / kfree().
|
||||||
|
trace_rcu_invoke_kfree_bulk_callback(
|
||||||
|
rcu_state.name, bnode->nr_records,
|
||||||
|
bnode->records);
|
||||||
|
|
||||||
rcu_lock_acquire(&rcu_callback_map);
|
kfree_bulk(bnode->nr_records, bnode->records);
|
||||||
if (idx == 0) { // kmalloc() / kfree().
|
} else { // vmalloc() / vfree().
|
||||||
trace_rcu_invoke_kfree_bulk_callback(
|
for (i = 0; i < bnode->nr_records; i++) {
|
||||||
rcu_state.name, bnode->nr_records,
|
trace_rcu_invoke_kvfree_callback(
|
||||||
bnode->records);
|
rcu_state.name, bnode->records[i], 0);
|
||||||
|
|
||||||
kfree_bulk(bnode->nr_records, bnode->records);
|
vfree(bnode->records[i]);
|
||||||
} else { // vmalloc() / vfree().
|
}
|
||||||
for (i = 0; i < bnode->nr_records; i++) {
|
|
||||||
trace_rcu_invoke_kvfree_callback(
|
|
||||||
rcu_state.name, bnode->records[i], 0);
|
|
||||||
|
|
||||||
vfree(bnode->records[i]);
|
|
||||||
}
|
}
|
||||||
|
rcu_lock_release(&rcu_callback_map);
|
||||||
}
|
}
|
||||||
rcu_lock_release(&rcu_callback_map);
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&krcp->lock, flags);
|
raw_spin_lock_irqsave(&krcp->lock, flags);
|
||||||
if (put_cached_bnode(krcp, bnode))
|
if (put_cached_bnode(krcp, bnode))
|
||||||
@ -3081,7 +3082,7 @@ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
|
|||||||
INIT_LIST_HEAD(&bulk_ready[i]);
|
INIT_LIST_HEAD(&bulk_ready[i]);
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
|
list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
|
||||||
if (!poll_state_synchronize_rcu(bnode->gp_snap))
|
if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
|
atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
|
||||||
@ -3285,7 +3286,7 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
|
|||||||
|
|
||||||
// Finally insert and update the GP for this page.
|
// Finally insert and update the GP for this page.
|
||||||
bnode->records[bnode->nr_records++] = ptr;
|
bnode->records[bnode->nr_records++] = ptr;
|
||||||
bnode->gp_snap = get_state_synchronize_rcu();
|
get_state_synchronize_rcu_full(&bnode->gp_snap);
|
||||||
atomic_inc(&(*krcp)->bulk_count[idx]);
|
atomic_inc(&(*krcp)->bulk_count[idx]);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user