apparmor: Convert secid mapping to XArrays instead of IDR

XArrays are a better match than IDR for how AppArmor is mapping
secids.  Specifically AppArmor is trying to keep the allocation
dense. XArrays also have the advantage of avoiding the complexity IDRs
preallocation.

In addition this avoids/fixes a lockdep issue raised in the LKML thread
  "Linux 5.18-rc4"

where there is a report of an interaction between apparmor and IPC,
this warning may have been spurious as the reported issue is in a
per-cpu local lock taken by the IDR. With the one side in the IPC id
allocation and the other in AppArmor's secid allocation.

Description by John Johansen <john.johansen@canonical.com>

Message-Id: <226cee6a-6ca1-b603-db08-8500cd8f77b7@gnuweeb.org>
Signed-off-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: John Johansen <john.johansen@canonical.com>
This commit is contained in:
Matthew Wilcox 2022-06-06 21:23:22 +01:00 committed by John Johansen
parent 95c0581f9b
commit df4390934d
3 changed files with 13 additions and 32 deletions

View File

@ -31,6 +31,4 @@ int aa_alloc_secid(struct aa_label *label, gfp_t gfp);
void aa_free_secid(u32 secid);
void aa_secid_update(u32 secid, struct aa_label *label);
void aa_secids_init(void);
#endif /* __AA_SECID_H */

View File

@ -1857,8 +1857,6 @@ static int __init apparmor_init(void)
{
int error;
aa_secids_init();
error = aa_setup_dfa_engine();
if (error) {
AA_ERROR("Unable to setup dfa engine\n");

View File

@ -13,9 +13,9 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/xarray.h>
#include "include/cred.h"
#include "include/lib.h"
@ -29,8 +29,7 @@
*/
#define AA_FIRST_SECID 2
static DEFINE_IDR(aa_secids);
static DEFINE_SPINLOCK(secid_lock);
static DEFINE_XARRAY_FLAGS(aa_secids, XA_FLAGS_LOCK_IRQ | XA_FLAGS_TRACK_FREE);
/*
* TODO: allow policy to reserve a secid range?
@ -47,9 +46,9 @@ void aa_secid_update(u32 secid, struct aa_label *label)
{
unsigned long flags;
spin_lock_irqsave(&secid_lock, flags);
idr_replace(&aa_secids, label, secid);
spin_unlock_irqrestore(&secid_lock, flags);
xa_lock_irqsave(&aa_secids, flags);
__xa_store(&aa_secids, secid, label, 0);
xa_unlock_irqrestore(&aa_secids, flags);
}
/**
@ -58,13 +57,7 @@ void aa_secid_update(u32 secid, struct aa_label *label)
*/
struct aa_label *aa_secid_to_label(u32 secid)
{
struct aa_label *label;
rcu_read_lock();
label = idr_find(&aa_secids, secid);
rcu_read_unlock();
return label;
return xa_load(&aa_secids, secid);
}
int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
@ -126,19 +119,16 @@ int aa_alloc_secid(struct aa_label *label, gfp_t gfp)
unsigned long flags;
int ret;
idr_preload(gfp);
spin_lock_irqsave(&secid_lock, flags);
ret = idr_alloc(&aa_secids, label, AA_FIRST_SECID, 0, GFP_ATOMIC);
spin_unlock_irqrestore(&secid_lock, flags);
idr_preload_end();
xa_lock_irqsave(&aa_secids, flags);
ret = __xa_alloc(&aa_secids, &label->secid, label,
XA_LIMIT(AA_FIRST_SECID, INT_MAX), gfp);
xa_unlock_irqrestore(&aa_secids, flags);
if (ret < 0) {
label->secid = AA_SECID_INVALID;
return ret;
}
AA_BUG(ret == AA_SECID_INVALID);
label->secid = ret;
return 0;
}
@ -150,12 +140,7 @@ void aa_free_secid(u32 secid)
{
unsigned long flags;
spin_lock_irqsave(&secid_lock, flags);
idr_remove(&aa_secids, secid);
spin_unlock_irqrestore(&secid_lock, flags);
}
void aa_secids_init(void)
{
idr_init_base(&aa_secids, AA_FIRST_SECID);
xa_lock_irqsave(&aa_secids, flags);
__xa_erase(&aa_secids, secid);
xa_unlock_irqrestore(&aa_secids, flags);
}