summaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-10-16 01:24:07 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:50 -0700
commit74a0b5762713a26496db72eac34fbbed46f20fce (patch)
tree4a14df7c07ebc16283454f33713519a0e10b5c43 /kernel/kprobes.c
parentd5a7430ddcdb598261d70f7eb1bf450b5be52085 (diff)
downloadlinux-74a0b5762713a26496db72eac34fbbed46f20fce.tar.gz
linux-74a0b5762713a26496db72eac34fbbed46f20fce.tar.bz2
linux-74a0b5762713a26496db72eac34fbbed46f20fce.zip
x86: optimize page faults like all other achitectures and kill notifier cruft
x86(-64) are the last architectures still using the page fault notifier cruft for the kprobes page fault hook. This patch converts them to the proper direct calls, and removes the now unused pagefault notifier bits aswell as the cruft in kprobes.c that was related to this mess. I know Andi didn't really like this, but all other architecture maintainers agreed the direct calls are much better and besides the obvious cruft removal a common way of dealing with kprobes across architectures is important aswell. [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc64] Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Andi Kleen <ak@suse.de> Cc: <linux-arch@vger.kernel.org> Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c39
1 files changed, 3 insertions, 36 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 4b8a4493c541..f9798ff7899f 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -64,7 +64,6 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
-static atomic_t kprobe_count;
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobe_enabled;
@@ -73,11 +72,6 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
-static struct notifier_block kprobe_page_fault_nb = {
- .notifier_call = kprobe_exceptions_notify,
- .priority = 0x7fffffff /* we need to notified first */
-};
-
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -556,8 +550,6 @@ static int __kprobes __register_kprobe(struct kprobe *p,
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
- if (!ret)
- atomic_inc(&kprobe_count);
goto out;
}
@@ -569,13 +561,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- if (kprobe_enabled) {
- if (atomic_add_return(1, &kprobe_count) == \
- (ARCH_INACTIVE_KPROBE_COUNT + 1))
- register_page_fault_notifier(&kprobe_page_fault_nb);
-
+ if (kprobe_enabled)
arch_arm_kprobe(p);
- }
+
out:
mutex_unlock(&kprobe_mutex);
@@ -658,16 +646,6 @@ valid_p:
}
mutex_unlock(&kprobe_mutex);
}
-
- /* Call unregister_page_fault_notifier()
- * if no probes are active
- */
- mutex_lock(&kprobe_mutex);
- if (atomic_add_return(-1, &kprobe_count) == \
- ARCH_INACTIVE_KPROBE_COUNT)
- unregister_page_fault_notifier(&kprobe_page_fault_nb);
- mutex_unlock(&kprobe_mutex);
- return;
}
static struct notifier_block kprobe_exceptions_nb = {
@@ -815,7 +793,6 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
- atomic_set(&kprobe_count, 0);
/* By default, kprobes are enabled */
kprobe_enabled = true;
@@ -921,13 +898,6 @@ static void __kprobes enable_all_kprobes(void)
if (kprobe_enabled)
goto already_enabled;
- /*
- * Re-register the page fault notifier only if there are any
- * active probes at the time of enabling kprobes globally
- */
- if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
- register_page_fault_notifier(&kprobe_page_fault_nb);
-
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
@@ -968,10 +938,7 @@ static void __kprobes disable_all_kprobes(void)
mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */
synchronize_sched();
-
- mutex_lock(&kprobe_mutex);
- /* Unconditionally unregister the page_fault notifier */
- unregister_page_fault_notifier(&kprobe_page_fault_nb);
+ return;
already_disabled:
mutex_unlock(&kprobe_mutex);