A single update to debugobjects:
Prevent a race vs. statically initialized objects. Such objects are usually not initialized via an init() function. They are special cased and detected on first use under the assumption that they are already correctly initialized via the static initializer. This works correctly unless there are two concurrent debug object operations on such an object. The first one detects that the object is not yet tracked and tries to establish a tracking object after dropping the debug objects hash bucket lock. The concurrent operation does the same. The one which wins the race ends up modifying the state of the object which makes the other one fail resulting in a bogus debug objects warning. Prevent this by making the detection of a static object and the allocation of a tracking object atomic under the hash bucket lock. So the first one to acquire the hash bucket lock will succeed and the second one will observe the correct tracking state. This race existed forever but was only exposed when the timer wheel code added a debug_object_assert_init() call outside of the timer base locked region. This replaced the previous warning about timer::function being NULL which had to be removed when the timer_shutdown() mechanics were added. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmRGfx8THHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYod4YD/98pgjxl9zht0tJpjOQv1GHQeKWGOnS T2NcK7UBF7IZnGVCaQovs1TLPEiHZMY9TgSmefP9UuYNCUthdzgxUv1hljb915zI xcQmqFopUyFF+F+qE7ti1C4HvXzbdss14XK97EcsoooS1ALq5xTkUJEcmdLFRL85 /ACkHz0/iMEHT9QVX6WoAOptg7HLoscb30CEZGa8skStAIRZfMIFqmN5GXzKUsPH oLUldSjoXyqq2ZBu9jiO9GoPmei3VuaZO3qWtN4KYY0C37BvKavgS2N/NsOh7s+0 I51G5+R8o6kQgr3RSll6frsPcy1EXsgPDZXO5tC1W9bp6+yrQ97ztdG0QS52fcPb fcCQtAX3L+K38vf4GfvboDyf7x21leJSYE3u+HCXUlyC2Es8QZgWw4U7Bi8IwSZg /BKC6QkQD/YyG/aQyZq6ZGiLgbJt8g53WiR8HGx35P3RUEy5Mit3bBSuq1dSuGR0 RozFlWswUif3Teticq33MR6Mv9M3866lX4iTMGT50xjJZirb8ongpKkRxIOHVeXV 4//0V/GOswyTwkY884Q6zJCZZq2FEudn6/Vtjh97zLxvJzLbdIEnEPC5HG75Jed0 a9NISg+NT9VOx4PLwgMWgW6dlT5SNUeWD4ddC879c4ELbyNd1i4AY54pMrcwEVVj fGdL6pFfFzZI5w== =19cg -----END PGP SIGNATURE----- Merge tag 'core-debugobjects-2023-04-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull core debugobjects update from Thomas Gleixner: "A single update to debugobjects: Prevent a race vs statically initialized objects. Such objects are usually not initialized via an init() function. They are special cased and detected on first use under the assumption that they are already correctly initialized via the static initializer. This works correctly unless there are two concurrent debug object operations on such an object. The first one detects that the object is not yet tracked and tries to establish a tracking object after dropping the debug objects hash bucket lock. The concurrent operation does the same. The one which wins the race ends up modifying the state of the object which makes the other one fail resulting in a bogus debug objects warning. Prevent this by making the detection of a static object and the allocation of a tracking object atomic under the hash bucket lock. So the first one to acquire the hash bucket lock will succeed and the second one will observe the correct tracking state. This race existed forever but was only exposed when the timer wheel code added a debug_object_assert_init() call outside of the timer base locked region. This replaced the previous warning about timer::function being NULL which had to be removed when the timer_shutdown() mechanics were added" * tag 'core-debugobjects-2023-04-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: debugobject: Prevent init race with static objects
This commit is contained in:
commit
29e95a4b26
@ -216,10 +216,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
|
||||
return obj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new object. If the pool is empty, switch off the debugger.
|
||||
* Must be called with interrupts disabled.
|
||||
*/
|
||||
static struct debug_obj *
|
||||
alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
|
||||
{
|
||||
@ -552,11 +548,49 @@ static void debug_object_is_on_stack(void *addr, int onstack)
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
|
||||
const struct debug_obj_descr *descr,
|
||||
bool onstack, bool alloc_ifstatic)
|
||||
{
|
||||
struct debug_obj *obj = lookup_object(addr, b);
|
||||
enum debug_obj_state state = ODEBUG_STATE_NONE;
|
||||
|
||||
if (likely(obj))
|
||||
return obj;
|
||||
|
||||
/*
|
||||
* debug_object_init() unconditionally allocates untracked
|
||||
* objects. It does not matter whether it is a static object or
|
||||
* not.
|
||||
*
|
||||
* debug_object_assert_init() and debug_object_activate() allow
|
||||
* allocation only if the descriptor callback confirms that the
|
||||
* object is static and considered initialized. For non-static
|
||||
* objects the allocation needs to be done from the fixup callback.
|
||||
*/
|
||||
if (unlikely(alloc_ifstatic)) {
|
||||
if (!descr->is_static_object || !descr->is_static_object(addr))
|
||||
return ERR_PTR(-ENOENT);
|
||||
/* Statically allocated objects are considered initialized */
|
||||
state = ODEBUG_STATE_INIT;
|
||||
}
|
||||
|
||||
obj = alloc_object(addr, b, descr);
|
||||
if (likely(obj)) {
|
||||
obj->state = state;
|
||||
debug_object_is_on_stack(addr, onstack);
|
||||
return obj;
|
||||
}
|
||||
|
||||
/* Out of memory. Do the cleanup outside of the locked region */
|
||||
debug_objects_enabled = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
|
||||
{
|
||||
enum debug_obj_state state;
|
||||
bool check_stack = false;
|
||||
struct debug_bucket *db;
|
||||
struct debug_obj *obj;
|
||||
unsigned long flags;
|
||||
@ -572,16 +606,11 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
|
||||
|
||||
raw_spin_lock_irqsave(&db->lock, flags);
|
||||
|
||||
obj = lookup_object(addr, db);
|
||||
if (!obj) {
|
||||
obj = alloc_object(addr, db, descr);
|
||||
if (!obj) {
|
||||
debug_objects_enabled = 0;
|
||||
raw_spin_unlock_irqrestore(&db->lock, flags);
|
||||
debug_objects_oom();
|
||||
return;
|
||||
}
|
||||
check_stack = true;
|
||||
obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
|
||||
if (unlikely(!obj)) {
|
||||
raw_spin_unlock_irqrestore(&db->lock, flags);
|
||||
debug_objects_oom();
|
||||
return;
|
||||
}
|
||||
|
||||
switch (obj->state) {
|
||||
@ -607,8 +636,6 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&db->lock, flags);
|
||||
if (check_stack)
|
||||
debug_object_is_on_stack(addr, onstack);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -648,14 +675,12 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
|
||||
*/
|
||||
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
|
||||
{
|
||||
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
|
||||
enum debug_obj_state state;
|
||||
struct debug_bucket *db;
|
||||
struct debug_obj *obj;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
struct debug_obj o = { .object = addr,
|
||||
.state = ODEBUG_STATE_NOTAVAILABLE,
|
||||
.descr = descr };
|
||||
|
||||
if (!debug_objects_enabled)
|
||||
return 0;
|
||||
@ -664,8 +689,8 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
|
||||
|
||||
raw_spin_lock_irqsave(&db->lock, flags);
|
||||
|
||||
obj = lookup_object(addr, db);
|
||||
if (obj) {
|
||||
obj = lookup_object_or_alloc(addr, db, descr, false, true);
|
||||
if (likely(!IS_ERR_OR_NULL(obj))) {
|
||||
bool print_object = false;
|
||||
|
||||
switch (obj->state) {
|
||||
@ -698,24 +723,16 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
|
||||
|
||||
raw_spin_unlock_irqrestore(&db->lock, flags);
|
||||
|
||||
/*
|
||||
* We are here when a static object is activated. We
|
||||
* let the type specific code confirm whether this is
|
||||
* true or not. if true, we just make sure that the
|
||||
* static object is tracked in the object tracker. If
|
||||
* not, this must be a bug, so we try to fix it up.
|
||||
*/
|
||||
if (descr->is_static_object && descr->is_static_object(addr)) {
|
||||
/* track this static object */
|
||||
debug_object_init(addr, descr);
|
||||
debug_object_activate(addr, descr);
|
||||
} else {
|
||||
debug_print_object(&o, "activate");
|
||||
ret = debug_object_fixup(descr->fixup_activate, addr,
|
||||
ODEBUG_STATE_NOTAVAILABLE);
|
||||
return ret ? 0 : -EINVAL;
|
||||
/* If NULL the allocation has hit OOM */
|
||||
if (!obj) {
|
||||
debug_objects_oom();
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
|
||||
/* Object is neither static nor tracked. It's not initialized */
|
||||
debug_print_object(&o, "activate");
|
||||
ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
|
||||
return ret ? 0 : -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(debug_object_activate);
|
||||
|
||||
@ -869,6 +886,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
|
||||
*/
|
||||
void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
|
||||
{
|
||||
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
|
||||
struct debug_bucket *db;
|
||||
struct debug_obj *obj;
|
||||
unsigned long flags;
|
||||
@ -879,31 +897,20 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
|
||||
db = get_bucket((unsigned long) addr);
|
||||
|
||||
raw_spin_lock_irqsave(&db->lock, flags);
|
||||
obj = lookup_object_or_alloc(addr, db, descr, false, true);
|
||||
raw_spin_unlock_irqrestore(&db->lock, flags);
|
||||
if (likely(!IS_ERR_OR_NULL(obj)))
|
||||
return;
|
||||
|
||||
obj = lookup_object(addr, db);
|
||||
/* If NULL the allocation has hit OOM */
|
||||
if (!obj) {
|
||||
struct debug_obj o = { .object = addr,
|
||||
.state = ODEBUG_STATE_NOTAVAILABLE,
|
||||
.descr = descr };
|
||||
|
||||
raw_spin_unlock_irqrestore(&db->lock, flags);
|
||||
/*
|
||||
* Maybe the object is static, and we let the type specific
|
||||
* code confirm. Track this static object if true, else invoke
|
||||
* fixup.
|
||||
*/
|
||||
if (descr->is_static_object && descr->is_static_object(addr)) {
|
||||
/* Track this static object */
|
||||
debug_object_init(addr, descr);
|
||||
} else {
|
||||
debug_print_object(&o, "assert_init");
|
||||
debug_object_fixup(descr->fixup_assert_init, addr,
|
||||
ODEBUG_STATE_NOTAVAILABLE);
|
||||
}
|
||||
debug_objects_oom();
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&db->lock, flags);
|
||||
/* Object is neither tracked nor static. It's not initialized. */
|
||||
debug_print_object(&o, "assert_init");
|
||||
debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(debug_object_assert_init);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user