Merge branch 'net-sched-optimizations-around-action-binding-and-init'
Pedro Tammela says: ==================== net/sched: optimizations around action binding and init Scaling optimizations for action binding in rtnl-less filters. We saw a noticeable lock contention around idrinfo->lock when testing in a 56 core system, which disappeared after the patches. ==================== Link: https://lore.kernel.org/r/20231211181807.96028-1-pctammela@mojatatu.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
a25ebbf332
@ -191,7 +191,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
|
||||
struct nlattr *est, struct tc_action **a,
|
||||
const struct tc_action_ops *ops, int bind,
|
||||
u32 flags);
|
||||
void tcf_idr_insert_many(struct tc_action *actions[]);
|
||||
void tcf_idr_insert_many(struct tc_action *actions[], int init_res[]);
|
||||
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
|
||||
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
|
||||
struct tc_action **a, int bind);
|
||||
|
@ -816,6 +816,9 @@ EXPORT_SYMBOL(tcf_idr_cleanup);
|
||||
* its reference and bind counters, and return 1. Otherwise insert temporary
|
||||
* error pointer (to prevent concurrent users from inserting actions with same
|
||||
* index) and return 0.
|
||||
*
|
||||
* May return -EAGAIN for binding actions in case of a parallel add/delete on
|
||||
* the requested index.
|
||||
*/
|
||||
|
||||
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
|
||||
@ -824,43 +827,61 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
|
||||
struct tcf_idrinfo *idrinfo = tn->idrinfo;
|
||||
struct tc_action *p;
|
||||
int ret;
|
||||
u32 max;
|
||||
|
||||
again:
|
||||
mutex_lock(&idrinfo->lock);
|
||||
if (*index) {
|
||||
again:
|
||||
rcu_read_lock();
|
||||
p = idr_find(&idrinfo->action_idr, *index);
|
||||
|
||||
if (IS_ERR(p)) {
|
||||
/* This means that another process allocated
|
||||
* index but did not assign the pointer yet.
|
||||
*/
|
||||
mutex_unlock(&idrinfo->lock);
|
||||
rcu_read_unlock();
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (p) {
|
||||
refcount_inc(&p->tcfa_refcnt);
|
||||
if (bind)
|
||||
atomic_inc(&p->tcfa_bindcnt);
|
||||
*a = p;
|
||||
ret = 1;
|
||||
} else {
|
||||
*a = NULL;
|
||||
ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
|
||||
*index, GFP_KERNEL);
|
||||
if (!ret)
|
||||
idr_replace(&idrinfo->action_idr,
|
||||
ERR_PTR(-EBUSY), *index);
|
||||
if (!p) {
|
||||
/* Empty slot, try to allocate it */
|
||||
max = *index;
|
||||
rcu_read_unlock();
|
||||
goto new;
|
||||
}
|
||||
|
||||
if (!refcount_inc_not_zero(&p->tcfa_refcnt)) {
|
||||
/* Action was deleted in parallel */
|
||||
rcu_read_unlock();
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (bind)
|
||||
atomic_inc(&p->tcfa_bindcnt);
|
||||
*a = p;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return 1;
|
||||
} else {
|
||||
/* Find a slot */
|
||||
*index = 1;
|
||||
*a = NULL;
|
||||
ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
|
||||
UINT_MAX, GFP_KERNEL);
|
||||
if (!ret)
|
||||
idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
|
||||
*index);
|
||||
max = UINT_MAX;
|
||||
}
|
||||
|
||||
new:
|
||||
*a = NULL;
|
||||
|
||||
mutex_lock(&idrinfo->lock);
|
||||
ret = idr_alloc_u32(&idrinfo->action_idr, ERR_PTR(-EBUSY), index, max,
|
||||
GFP_KERNEL);
|
||||
mutex_unlock(&idrinfo->lock);
|
||||
|
||||
/* N binds raced for action allocation,
|
||||
* retry for all the ones that failed.
|
||||
*/
|
||||
if (ret == -ENOSPC && *index == max)
|
||||
ret = -EAGAIN;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_idr_check_alloc);
|
||||
@ -1283,7 +1304,7 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
|
||||
[TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
|
||||
};
|
||||
|
||||
void tcf_idr_insert_many(struct tc_action *actions[])
|
||||
void tcf_idr_insert_many(struct tc_action *actions[], int init_res[])
|
||||
{
|
||||
struct tc_action *a;
|
||||
int i;
|
||||
@ -1291,11 +1312,12 @@ void tcf_idr_insert_many(struct tc_action *actions[])
|
||||
tcf_act_for_each_action(i, a, actions) {
|
||||
struct tcf_idrinfo *idrinfo;
|
||||
|
||||
if (init_res[i] == 0) /* Bound */
|
||||
continue;
|
||||
|
||||
idrinfo = a->idrinfo;
|
||||
mutex_lock(&idrinfo->lock);
|
||||
/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
|
||||
* it is just created, otherwise this is just a nop.
|
||||
*/
|
||||
/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
|
||||
idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
|
||||
mutex_unlock(&idrinfo->lock);
|
||||
}
|
||||
@ -1495,7 +1517,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
|
||||
/* We have to commit them all together, because if any error happened in
|
||||
* between, we could not handle the failure gracefully.
|
||||
*/
|
||||
tcf_idr_insert_many(actions);
|
||||
tcf_idr_insert_many(actions, init_res);
|
||||
|
||||
*attr_size = tcf_action_full_attrs_size(sz);
|
||||
err = i - 1;
|
||||
|
@ -3313,7 +3313,7 @@ int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **
|
||||
act->type = exts->type = TCA_OLD_COMPAT;
|
||||
exts->actions[0] = act;
|
||||
exts->nr_actions = 1;
|
||||
tcf_idr_insert_many(exts->actions);
|
||||
tcf_idr_insert_many(exts->actions, init_res);
|
||||
} else if (exts->action && tb[exts->action]) {
|
||||
int err;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user