summaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 16:09:17 +0200
committerIngo Molnar <mingo@elte.hu>2011-09-13 11:11:45 +0200
commitec484608c5885931c432e99ecfd2772288cd993c (patch)
tree1e62f1ac1e79241d092bef4c7b9f051c9cf9b014 /kernel/kprobes.c
parent76bf6877fed4f59826a72e0ab209badfc594008a (diff)
downloadlinux-stable-ec484608c5885931c432e99ecfd2772288cd993c.tar.gz
linux-stable-ec484608c5885931c432e99ecfd2772288cd993c.tar.bz2
linux-stable-ec484608c5885931c432e99ecfd2772288cd993c.zip
locking, kprobes: Annotate the hash locks and kretprobe.lock as raw
The kprobe locks can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b30fd54eb985..2f193d0ba7f2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
- spinlock_t lock ____cacheline_aligned_in_smp;
+ raw_spinlock_t lock ____cacheline_aligned_in_smp;
} kretprobe_table_locks[KPROBE_TABLE_SIZE];
-static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
+static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
return &(kretprobe_table_locks[hash].lock);
}
@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
hlist_del(&ri->hlist);
INIT_HLIST_NODE(&ri->hlist);
if (likely(rp)) {
- spin_lock(&rp->lock);
+ raw_spin_lock(&rp->lock);
hlist_add_head(&ri->hlist, &rp->free_instances);
- spin_unlock(&rp->lock);
+ raw_spin_unlock(&rp->lock);
} else
/* Unregistering */
hlist_add_head(&ri->hlist, head);
@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
__acquires(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- spinlock_t *hlist_lock;
+ raw_spinlock_t *hlist_lock;
*head = &kretprobe_inst_table[hash];
hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_lock_irqsave(hlist_lock, *flags);
+ raw_spin_lock_irqsave(hlist_lock, *flags);
}
static void __kprobes kretprobe_table_lock(unsigned long hash,
unsigned long *flags)
__acquires(hlist_lock)
{
- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_lock_irqsave(hlist_lock, *flags);
+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+ raw_spin_lock_irqsave(hlist_lock, *flags);
}
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
__releases(hlist_lock)
{
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- spinlock_t *hlist_lock;
+ raw_spinlock_t *hlist_lock;
hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_unlock_irqrestore(hlist_lock, *flags);
+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
}
static void __kprobes kretprobe_table_unlock(unsigned long hash,
unsigned long *flags)
__releases(hlist_lock)
{
- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- spin_unlock_irqrestore(hlist_lock, *flags);
+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
}
/*
@@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
/*TODO: consider to only swap the RA after the last pre_handler fired */
hash = hash_ptr(current, KPROBE_HASH_BITS);
- spin_lock_irqsave(&rp->lock, flags);
+ raw_spin_lock_irqsave(&rp->lock, flags);
if (!hlist_empty(&rp->free_instances)) {
ri = hlist_entry(rp->free_instances.first,
struct kretprobe_instance, hlist);
hlist_del(&ri->hlist);
- spin_unlock_irqrestore(&rp->lock, flags);
+ raw_spin_unlock_irqrestore(&rp->lock, flags);
ri->rp = rp;
ri->task = current;
@@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
kretprobe_table_unlock(hash, &flags);
} else {
rp->nmissed++;
- spin_unlock_irqrestore(&rp->lock, flags);
+ raw_spin_unlock_irqrestore(&rp->lock, flags);
}
return 0;
}
@@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->maxactive = num_possible_cpus();
#endif
}
- spin_lock_init(&rp->lock);
+ raw_spin_lock_init(&rp->lock);
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
inst = kmalloc(sizeof(struct kretprobe_instance) +
@@ -1959,7 +1959,7 @@ static int __init init_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
- spin_lock_init(&(kretprobe_table_locks[i].lock));
+ raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
}
/*