summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2014-05-21 15:23:19 -0700
committerH. Peter Anvin <hpa@linux.intel.com>2014-05-21 15:23:19 -0700
commite6ab9a20e73e790d47e6aa231fcf66f27b6ce3d4 (patch)
tree6a41361e1de2a6693ccae96b99307e79a467236f /kernel/sched/core.c
parent34273f41d57ee8d854dcd2a1d754cbb546cb548f (diff)
parent7ed6fb9b5a5510e4ef78ab27419184741169978a (diff)
downloadlinux-e6ab9a20e73e790d47e6aa231fcf66f27b6ce3d4.tar.gz
linux-e6ab9a20e73e790d47e6aa231fcf66f27b6ce3d4.tar.bz2
linux-e6ab9a20e73e790d47e6aa231fcf66f27b6ce3d4.zip
Merge commit '7ed6fb9b5a5510e4ef78ab27419184741169978a' into x86/espfix
Merge in Linus' tree with: fa81511bb0bb x86-64, modify_ldt: Make support for 16-bit segments a runtime option ... reverted, to avoid a conflict. This commit is no longer necessary with the proper fix in place. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 268a45ea238c..d9d8ece46a15 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
-asmlinkage void schedule_tail(struct task_struct *prev)
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
@@ -2741,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
-asmlinkage void __sched schedule(void)
+asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
@@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void)
EXPORT_SYMBOL(schedule);
#ifdef CONFIG_CONTEXT_TRACKING
-asmlinkage void __sched schedule_user(void)
+asmlinkage __visible void __sched schedule_user(void)
{
/*
* If we come here after a random call to set_need_resched(),
@@ -2783,7 +2783,7 @@ void __sched schedule_preempt_disabled(void)
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
-asmlinkage void __sched notrace preempt_schedule(void)
+asmlinkage __visible void __sched notrace preempt_schedule(void)
{
/*
* If there is a non-zero preempt_count or interrupts are disabled,
@@ -2813,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule);
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
-asmlinkage void __sched preempt_schedule_irq(void)
+asmlinkage __visible void __sched preempt_schedule_irq(void)
{
enum ctx_state prev_state;