summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-08-20 17:02:18 -0700
committerIngo Molnar <mingo@elte.hu>2008-08-21 13:52:57 +0200
commit168d2f464ab9860f0d1e66cf1f9684973222f1c6 (patch)
treec3a4199cf2aa4c6322b7860c431fd5bfdc4220ef /arch/x86
parent7708ad64a24a674f7905aa7a5099a50f055debec (diff)
downloadlinux-168d2f464ab9860f0d1e66cf1f9684973222f1c6.tar.gz
linux-168d2f464ab9860f0d1e66cf1f9684973222f1c6.tar.bz2
linux-168d2f464ab9860f0d1e66cf1f9684973222f1c6.zip
xen: save previous spinlock when blocking
A spinlock can be interrupted while spinning, so make sure we preserve the previous lock of interest if we're taking a lock from within an interrupt handler. We also need to deal with the case where the blocking path gets interrupted between testing to see if the lock is free and actually blocking. If we get interrupted there and end up in the state where the lock is free but the irq isn't pending, then we'll block indefinitely in the hypervisor. This fix is to make sure that any nested lock-takers will always leave the irq pending if there's any chance the outer lock became free. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/spinlock.c65
1 files changed, 50 insertions, 15 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 8dc4d31da67f..4884bc603aa7 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -47,25 +47,41 @@ static int xen_spin_trylock(struct raw_spinlock *lock)
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
-static inline void spinning_lock(struct xen_spinlock *xl)
+/*
+ * Mark a cpu as interested in a lock. Returns the CPU's previous
+ * lock of interest, in case we got preempted by an interrupt.
+ */
+static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
{
+ struct xen_spinlock *prev;
+
+ prev = __get_cpu_var(lock_spinners);
__get_cpu_var(lock_spinners) = xl;
+
wmb(); /* set lock of interest before count */
+
asm(LOCK_PREFIX " incw %0"
: "+m" (xl->spinners) : : "memory");
+
+ return prev;
}
-static inline void unspinning_lock(struct xen_spinlock *xl)
+/*
+ * Mark a cpu as no longer interested in a lock. Restores previous
+ * lock of interest (NULL for none).
+ */
+static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
{
asm(LOCK_PREFIX " decw %0"
: "+m" (xl->spinners) : : "memory");
- wmb(); /* decrement count before clearing lock */
- __get_cpu_var(lock_spinners) = NULL;
+ wmb(); /* decrement count before restoring lock */
+ __get_cpu_var(lock_spinners) = prev;
}
static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+ struct xen_spinlock *prev;
int irq = __get_cpu_var(lock_kicker_irq);
int ret;
@@ -74,23 +90,42 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
return 0;
/* announce we're spinning */
- spinning_lock(xl);
+ prev = spinning_lock(xl);
- /* clear pending */
- xen_clear_irq_pending(irq);
+ do {
+ /* clear pending */
+ xen_clear_irq_pending(irq);
+
+ /* check again make sure it didn't become free while
+ we weren't looking */
+ ret = xen_spin_trylock(lock);
+ if (ret) {
+ /*
+ * If we interrupted another spinlock while it
+ * was blocking, make sure it doesn't block
+ * without rechecking the lock.
+ */
+ if (prev != NULL)
+ xen_set_irq_pending(irq);
+ goto out;
+ }
- /* check again make sure it didn't become free while
- we weren't looking */
- ret = xen_spin_trylock(lock);
- if (ret)
- goto out;
+ /*
+ * Block until irq becomes pending. If we're
+ * interrupted at this point (after the trylock but
+ * before entering the block), then the nested lock
+ * handler guarantees that the irq will be left
+ * pending if there's any chance the lock became free;
+ * xen_poll_irq() returns immediately if the irq is
+ * pending.
+ */
+ xen_poll_irq(irq);
+ } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
- /* block until irq becomes pending */
- xen_poll_irq(irq);
kstat_this_cpu.irqs[irq]++;
out:
- unspinning_lock(xl);
+ unspinning_lock(xl, prev);
return ret;
}