summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 14:57:23 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 16:24:43 -0700
commit4866cde064afbb6c2a488c265e696879de616daa (patch)
tree6effad1ab6271129fc607b98273086409876563a /include/asm-sparc64
parent48c08d3f8ff94fa118187e4d8d4a5707bb85e59d (diff)
downloadlinux-4866cde064afbb6c2a488c265e696879de616daa.tar.gz
linux-4866cde064afbb6c2a488c265e696879de616daa.tar.bz2
linux-4866cde064afbb6c2a488c265e696879de616daa.zip
[PATCH] sched: cleanup context switch locking
Instead of requiring architecture code to interact with the scheduler's locking implementation, provide a couple of defines that can be used by the architecture to request runqueue unlocked context switches, and ask for interrupts to be enabled over the context switch. Also replaces the "switch_lock" used by these architectures with an oncpu flag (note, not a potentially slow bitflag). This eliminates one bus locked memory operation when context switching, and simplifies the task_running function. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/system.h14
1 files changed, 4 insertions, 10 deletions
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index fd12ca386f48..f9be2c5b4dc9 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -139,19 +139,13 @@ extern void __flushw_user(void);
#define flush_user_windows flushw_user
#define flush_register_windows flushw_all
-#define prepare_arch_switch(rq, next) \
-do { spin_lock(&(next)->switch_lock); \
- spin_unlock(&(rq)->lock); \
+/* Don't hold the runqueue lock over context switch */
+#define __ARCH_WANT_UNLOCKED_CTXSW
+#define prepare_arch_switch(next) \
+do { \
flushw_all(); \
} while (0)
-#define finish_arch_switch(rq, prev) \
-do { spin_unlock_irq(&(prev)->switch_lock); \
-} while (0)
-
-#define task_running(rq, p) \
- ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
-
/* See what happens when you design the chip correctly?
*
* We tell gcc we clobber all non-fixed-usage registers except