summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-08-31 20:26:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-31 20:26:22 -0700
commita1d8561172f369ba56d636df49a6b4d6d77e2123 (patch)
tree746e4666ec62e057887ee9c884eb613c03f9ff28 /arch
parent3959df1dfb9538498ec3372a2d390bc7fbdbfac2 (diff)
parentff277d4250fe715b6666219b1a3423b863418794 (diff)
downloadlinux-stable-a1d8561172f369ba56d636df49a6b4d6d77e2123.tar.gz
linux-stable-a1d8561172f369ba56d636df49a6b4d6d77e2123.tar.bz2
linux-stable-a1d8561172f369ba56d636df49a6b4d6d77e2123.zip
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The biggest change in this cycle is the rewrite of the main SMP load balancing metric: the CPU load/utilization. The main goal was to make the metric more precise and more representative - see the changelog of this commit for the gory details: 9d89c257dfb9 ("sched/fair: Rewrite runnable load and utilization average tracking") It is done in a way that significantly reduces complexity of the code: 5 files changed, 249 insertions(+), 494 deletions(-) and the performance testing results are encouraging. Nevertheless we need to keep an eye on potential regressions, since this potentially affects every SMP workload in existence. This work comes from Yuyang Du. Other changes: - SCHED_DL updates. (Andrea Parri) - Simplify architecture callbacks by removing finish_arch_switch(). (Peter Zijlstra et al) - cputime accounting: guarantee stime + utime == rtime. (Peter Zijlstra) - optimize idle CPU wakeups some more - inspired by Facebook server loads. (Mike Galbraith) - stop_machine fixes and updates. (Oleg Nesterov) - Introduce the 'trace_sched_waking' tracepoint. (Peter Zijlstra) - sched/numa tweaks. (Srikar Dronamraju) - misc fixes and small cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits) sched/deadline: Fix comment in enqueue_task_dl() sched/deadline: Fix comment in push_dl_tasks() sched: Change the sched_class::set_cpus_allowed() calling context sched: Make sched_class::set_cpus_allowed() unconditional sched: Fix a race between __kthread_bind() and sched_setaffinity() sched: Ensure a task has a non-normalized vruntime when returning back to CFS sched/numa: Fix NUMA_DIRECT topology identification tile: Reorganize _switch_to() sched, sparc32: Update scheduler comments in copy_thread() sched: Remove finish_arch_switch() sched, tile: Remove finish_arch_switch sched, sh: Fold finish_arch_switch() into switch_to() sched, score: Remove finish_arch_switch() sched, avr32: Remove finish_arch_switch() sched, MIPS: Get rid of finish_arch_switch() sched, arm: Remove finish_arch_switch() sched/fair: Clean up load average references sched/fair: Provide runnable_load_avg back to cfs_rq sched/fair: Remove task and group entity load when they are dead sched/fair: Init cfs_rq's sched_entity load average ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/switch_to.h5
-rw-r--r--arch/avr32/include/asm/switch_to.h7
-rw-r--r--arch/mips/include/asm/switch_to.h48
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/score/include/asm/switch_to.h2
-rw-r--r--arch/sh/include/asm/switch_to_32.h8
-rw-r--r--arch/sparc/kernel/process_32.c10
-rw-r--r--arch/tile/include/asm/switch_to.h8
-rw-r--r--arch/tile/kernel/process.c5
-rw-r--r--arch/x86/include/asm/preempt.h4
10 files changed, 50 insertions, 49 deletions
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index c99e259469f7..12ebfcc1d539 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -10,7 +10,9 @@
* CPU.
*/
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
-#define finish_arch_switch(prev) dsb(ish)
+#define __complete_pending_tlbi() dsb(ish)
+#else
+#define __complete_pending_tlbi()
#endif
/*
@@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
+ __complete_pending_tlbi(); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/avr32/include/asm/switch_to.h b/arch/avr32/include/asm/switch_to.h
index 9a8e9d5208d4..6f00581c3d4f 100644
--- a/arch/avr32/include/asm/switch_to.h
+++ b/arch/avr32/include/asm/switch_to.h
@@ -15,11 +15,13 @@
*/
#ifdef CONFIG_OWNERSHIP_TRACE
#include <asm/ocd.h>
-#define finish_arch_switch(prev) \
+#define ocd_switch(prev, next) \
do { \
ocd_write(PID, prev->pid); \
- ocd_write(PID, current->pid); \
+ ocd_write(PID, next->pid); \
} while(0)
+#else
+#define ocd_switch(prev, next)
#endif
/*
@@ -38,6 +40,7 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct cpu_context *);
#define switch_to(prev, next, last) \
do { \
+ ocd_switch(prev, next); \
last = __switch_to(prev, &prev->thread.cpu_context + 1, \
&next->thread.cpu_context); \
} while (0)
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 7163cd7fdd69..9733cd0266e4 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \
} \
} while (0)
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following resume() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before resume().
+ */
#define switch_to(prev, next, last) \
do { \
- u32 __c0_stat; \
s32 __fpsave = FP_SAVE_NONE; \
__mips_mt_fpaff_switch_to(prev); \
- if (cpu_has_dsp) \
+ if (cpu_has_dsp) { \
__save_dsp(prev); \
- if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \
- if (cop2_lazy_restore) \
- KSTK_STATUS(prev) &= ~ST0_CU2; \
- __c0_stat = read_c0_status(); \
- write_c0_status(__c0_stat | ST0_CU2); \
- cop2_save(prev); \
- write_c0_status(__c0_stat & ~ST0_CU2); \
+ __restore_dsp(next); \
+ } \
+ if (cop2_present) { \
+ set_c0_status(ST0_CU2); \
+ if ((KSTK_STATUS(prev) & ST0_CU2)) { \
+ if (cop2_lazy_restore) \
+ KSTK_STATUS(prev) &= ~ST0_CU2; \
+ cop2_save(prev); \
+ } \
+ if (KSTK_STATUS(next) & ST0_CU2 && \
+ !cop2_lazy_restore) { \
+ cop2_restore(next); \
+ } \
+ clear_c0_status(ST0_CU2); \
} \
__clear_software_ll_bit(); \
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
__fpsave = FP_SAVE_SCALAR; \
if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
__fpsave = FP_SAVE_VECTOR; \
- (last) = resume(prev, next, task_thread_info(next), __fpsave); \
-} while (0)
-
-#define finish_arch_switch(prev) \
-do { \
- u32 __c0_stat; \
- if (cop2_present && !cop2_lazy_restore && \
- (KSTK_STATUS(current) & ST0_CU2)) { \
- __c0_stat = read_c0_status(); \
- write_c0_status(__c0_stat | ST0_CU2); \
- cop2_restore(current); \
- write_c0_status(__c0_stat & ~ST0_CU2); \
- } \
- if (cpu_has_dsp) \
- __restore_dsp(current); \
if (cpu_has_userlocal) \
- write_c0_userlocal(current_thread_info()->tp_value); \
+ write_c0_userlocal(task_thread_info(next)->tp_value); \
__restore_watch(); \
disable_msa(); \
+ (last) = resume(prev, next, task_thread_info(next), __fpsave); \
} while (0)
#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 68d067ad4222..a9f753fb73a8 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vc->runner = vcpu;
if (n_ceded == vc->n_runnable) {
kvmppc_vcore_blocked(vc);
- } else if (should_resched()) {
+ } else if (need_resched()) {
vc->vcore_state = VCORE_PREEMPT;
/* Let something else run */
cond_resched_lock(&vc->lock);
diff --git a/arch/score/include/asm/switch_to.h b/arch/score/include/asm/switch_to.h
index 031756b59ece..fda3f83308d2 100644
--- a/arch/score/include/asm/switch_to.h
+++ b/arch/score/include/asm/switch_to.h
@@ -8,6 +8,4 @@ do { \
(last) = resume(prev, next, task_thread_info(next)); \
} while (0)
-#define finish_arch_switch(prev) do {} while (0)
-
#endif /* _ASM_SCORE_SWITCH_TO_H */
diff --git a/arch/sh/include/asm/switch_to_32.h b/arch/sh/include/asm/switch_to_32.h
index 0c065513e7ac..7661b4ba8259 100644
--- a/arch/sh/include/asm/switch_to_32.h
+++ b/arch/sh/include/asm/switch_to_32.h
@@ -78,6 +78,8 @@ do { \
\
if (is_dsp_enabled(prev)) \
__save_dsp(prev); \
+ if (is_dsp_enabled(next)) \
+ __restore_dsp(next); \
\
__ts1 = (u32 *)&prev->thread.sp; \
__ts2 = (u32 *)&prev->thread.pc; \
@@ -125,10 +127,4 @@ do { \
last = __last; \
} while (0)
-#define finish_arch_switch(prev) \
-do { \
- if (is_dsp_enabled(prev)) \
- __restore_dsp(prev); \
-} while (0)
-
#endif /* __ASM_SH_SWITCH_TO_32_H */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 50e7b626afe8..c5113c7ce2fd 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -333,11 +333,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
/*
- * A new process must start with interrupts closed in 2.5,
- * because this is how Mingo's scheduler works (see schedule_tail
- * and finish_arch_switch). If we do not do it, a timer interrupt hits
- * before we unlock, attempts to re-take the rq->lock, and then we die.
- * Thus, kpsr|=PSR_PIL.
+ * A new process must start with interrupts disabled, see schedule_tail()
+ * and finish_task_switch(). (If we do not do it and if a timer interrupt
+ * hits before we unlock and attempts to take the rq->lock, we deadlock.)
+ *
+ * Thus, kpsr |= PSR_PIL.
*/
ti->ksp = (unsigned long) new_stack;
p->thread.kregs = childregs;
diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h
index b8f888cbe6b0..34ee72705521 100644
--- a/arch/tile/include/asm/switch_to.h
+++ b/arch/tile/include/asm/switch_to.h
@@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void);
* Kernel threads can check to see if they need to migrate their
* stack whenever they return from a context switch; for user
* threads, we defer until they are returning to user-space.
+ * We defer homecache migration until the runqueue lock is released.
*/
-#define finish_arch_switch(prev) do { \
- if (unlikely((prev)->state == TASK_DEAD)) \
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
- ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
+#define finish_arch_post_lock_switch() do { \
__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
(current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
if (current->mm == NULL && !kstack_hash && \
- current_thread_info()->homecache_cpu != smp_processor_id()) \
+ current_thread_info()->homecache_cpu != raw_smp_processor_id()) \
homecache_migrate_kthread(); \
} while (0)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index a45213781ad0..7d5769310bef 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -446,6 +446,11 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
hardwall_switch_tasks(prev, next);
#endif
+ /* Notify the simulator of task exit. */
+ if (unlikely(prev->state == TASK_DEAD))
+ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
+ (prev->pid << _SIM_CONTROL_OPERATOR_BITS));
+
/*
* Switch kernel SP, PC, and callee-saved registers.
* In the context of the new task, return the old task pointer
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index dca71714f860..b12f81022a6b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!raw_cpu_read_4(__preempt_count));
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}
#ifdef CONFIG_PREEMPT