[PATCH] sem2mutex: kprobes
Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
2c68ee754c
commit
7a7d1cf954
@ -84,9 +84,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
|||||||
|
|
||||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
down(&kprobe_mutex);
|
mutex_lock(&kprobe_mutex);
|
||||||
free_insn_slot(p->ainsn.insn);
|
free_insn_slot(p->ainsn.insn);
|
||||||
up(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||||
|
@ -81,9 +81,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
|||||||
|
|
||||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
down(&kprobe_mutex);
|
mutex_lock(&kprobe_mutex);
|
||||||
free_insn_slot(p->ainsn.insn);
|
free_insn_slot(p->ainsn.insn);
|
||||||
up(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||||
|
@ -222,9 +222,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
|||||||
|
|
||||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
down(&kprobe_mutex);
|
mutex_lock(&kprobe_mutex);
|
||||||
free_insn_slot(p->ainsn.insn);
|
free_insn_slot(p->ainsn.insn);
|
||||||
up(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
|
||||||
#ifdef CONFIG_KPROBES
|
#ifdef CONFIG_KPROBES
|
||||||
#include <asm/kprobes.h>
|
#include <asm/kprobes.h>
|
||||||
@ -152,7 +153,7 @@ struct kretprobe_instance {
|
|||||||
};
|
};
|
||||||
|
|
||||||
extern spinlock_t kretprobe_lock;
|
extern spinlock_t kretprobe_lock;
|
||||||
extern struct semaphore kprobe_mutex;
|
extern struct mutex kprobe_mutex;
|
||||||
extern int arch_prepare_kprobe(struct kprobe *p);
|
extern int arch_prepare_kprobe(struct kprobe *p);
|
||||||
extern void arch_arm_kprobe(struct kprobe *p);
|
extern void arch_arm_kprobe(struct kprobe *p);
|
||||||
extern void arch_disarm_kprobe(struct kprobe *p);
|
extern void arch_disarm_kprobe(struct kprobe *p);
|
||||||
|
@ -48,7 +48,7 @@
|
|||||||
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
|
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
|
||||||
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
|
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
|
||||||
|
|
||||||
DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
|
DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
|
||||||
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
|
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
|
||||||
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
|
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
|
||||||
|
|
||||||
@ -460,7 +460,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
|
|||||||
}
|
}
|
||||||
|
|
||||||
p->nmissed = 0;
|
p->nmissed = 0;
|
||||||
down(&kprobe_mutex);
|
mutex_lock(&kprobe_mutex);
|
||||||
old_p = get_kprobe(p->addr);
|
old_p = get_kprobe(p->addr);
|
||||||
if (old_p) {
|
if (old_p) {
|
||||||
ret = register_aggr_kprobe(old_p, p);
|
ret = register_aggr_kprobe(old_p, p);
|
||||||
@ -477,7 +477,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
|
|||||||
arch_arm_kprobe(p);
|
arch_arm_kprobe(p);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
up(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
|
|
||||||
if (ret && probed_mod)
|
if (ret && probed_mod)
|
||||||
module_put(probed_mod);
|
module_put(probed_mod);
|
||||||
@ -496,10 +496,10 @@ void __kprobes unregister_kprobe(struct kprobe *p)
|
|||||||
struct kprobe *old_p, *list_p;
|
struct kprobe *old_p, *list_p;
|
||||||
int cleanup_p;
|
int cleanup_p;
|
||||||
|
|
||||||
down(&kprobe_mutex);
|
mutex_lock(&kprobe_mutex);
|
||||||
old_p = get_kprobe(p->addr);
|
old_p = get_kprobe(p->addr);
|
||||||
if (unlikely(!old_p)) {
|
if (unlikely(!old_p)) {
|
||||||
up(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (p != old_p) {
|
if (p != old_p) {
|
||||||
@ -507,7 +507,7 @@ void __kprobes unregister_kprobe(struct kprobe *p)
|
|||||||
if (list_p == p)
|
if (list_p == p)
|
||||||
/* kprobe p is a valid probe */
|
/* kprobe p is a valid probe */
|
||||||
goto valid_p;
|
goto valid_p;
|
||||||
up(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
valid_p:
|
valid_p:
|
||||||
@ -523,7 +523,7 @@ valid_p:
|
|||||||
cleanup_p = 0;
|
cleanup_p = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
up(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
|
|
||||||
synchronize_sched();
|
synchronize_sched();
|
||||||
if (p->mod_refcounted &&
|
if (p->mod_refcounted &&
|
||||||
|
Loading…
Reference in New Issue
Block a user