locking/spinlock/rt: Prepare for RT local_lock
Add the static and runtime initializer mechanics to support the RT variant of local_lock, which requires the lock type in the lockdep map to be set to LD_LOCK_PERCPU. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211305.967526724@linutronix.de
This commit is contained in:
parent
992caf7f17
commit
31552385f8
@ -8,10 +8,10 @@
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
struct lock_class_key *key, bool percpu);
|
||||
#else
|
||||
static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
struct lock_class_key *key, bool percpu)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@ -21,7 +21,15 @@ do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
rt_mutex_base_init(&(slock)->lock); \
|
||||
__rt_spin_lock_init(slock, #slock, &__key); \
|
||||
__rt_spin_lock_init(slock, #slock, &__key, false); \
|
||||
} while (0)
|
||||
|
||||
#define local_spin_lock_init(slock) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
rt_mutex_base_init(&(slock)->lock); \
|
||||
__rt_spin_lock_init(slock, #slock, &__key, true); \
|
||||
} while (0)
|
||||
|
||||
extern void rt_spin_lock(spinlock_t *lock);
|
||||
|
@ -60,6 +60,12 @@ typedef struct spinlock {
|
||||
SPIN_DEP_MAP_INIT(name) \
|
||||
}
|
||||
|
||||
#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
|
||||
{ \
|
||||
.lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
|
||||
LOCAL_SPIN_DEP_MAP_INIT(name) \
|
||||
}
|
||||
|
||||
#define DEFINE_SPINLOCK(name) \
|
||||
spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
|
||||
|
||||
|
@ -37,9 +37,17 @@ typedef struct raw_spinlock {
|
||||
.name = #lockname, \
|
||||
.wait_type_inner = LD_WAIT_CONFIG, \
|
||||
}
|
||||
|
||||
# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
|
||||
.dep_map = { \
|
||||
.name = #lockname, \
|
||||
.wait_type_inner = LD_WAIT_CONFIG, \
|
||||
.lock_type = LD_LOCK_PERCPU, \
|
||||
}
|
||||
#else
|
||||
# define RAW_SPIN_DEP_MAP_INIT(lockname)
|
||||
# define SPIN_DEP_MAP_INIT(lockname)
|
||||
# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
|
@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
void __rt_spin_lock_init(spinlock_t *lock, const char *name,
|
||||
struct lock_class_key *key)
|
||||
struct lock_class_key *key, bool percpu)
|
||||
{
|
||||
u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
|
||||
|
||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
|
||||
lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
|
||||
LD_WAIT_INV, type);
|
||||
}
|
||||
EXPORT_SYMBOL(__rt_spin_lock_init);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user