diff options
author | Paul Burton <paul.burton@imgtec.com> | 2017-06-02 14:48:50 -0700 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2017-06-29 02:42:28 +0200 |
commit | 516db1c61f3fd4328361699a2c74781ab1dbf84c (patch) | |
tree | d7081b6226e45a468823885d998b1434a4c1cc3e /arch | |
parent | 9b03d8abe06d92195712fd489b2e8983de27fa68 (diff) | |
download | linux-516db1c61f3fd4328361699a2c74781ab1dbf84c.tar.gz linux-516db1c61f3fd4328361699a2c74781ab1dbf84c.tar.bz2 linux-516db1c61f3fd4328361699a2c74781ab1dbf84c.zip |
MIPS: CM: Avoid per-core locking with CM3 & higher
CM3 provides a GCR_CL_OTHER register per VP, rather than only per core.
This means that we don't need to prevent other VPs within a core from
racing with code that makes use of the core-other register region.
Reduce locking overhead by demoting the per-core spinlock providing
protection for CM2.5 & lower to a per-CPU/per-VP spinlock for CM3 &
higher.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16193/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/kernel/mips-cm.c | 38 |
1 files changed, 32 insertions, 6 deletions
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 659e6d3ae335..99bb74dd12ce 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -265,15 +265,34 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp) u32 val; preempt_disable(); - curr_core = current_cpu_data.core; - spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), - per_cpu(cm_core_lock_flags, curr_core)); if (mips_cm_revision() >= CM_REV_CM3) { val = core << CM3_GCR_Cx_OTHER_CORE_SHF; val |= vp << CM3_GCR_Cx_OTHER_VP_SHF; + + /* + * We need to disable interrupts in SMP systems in order to + * ensure that we don't interrupt the caller with code which + * may modify the redirect register. We do so here in a + * slightly obscure way by using a spin lock, since this has + * the neat property of also catching any nested uses of + * mips_cm_lock_other() leading to a deadlock or a nice warning + * with lockdep enabled. + */ + spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), + *this_cpu_ptr(&cm_core_lock_flags)); } else { BUG_ON(vp != 0); + + /* + * We only have a GCR_CL_OTHER per core in systems with + * CM 2.5 & older, so have to ensure other VP(E)s don't + * race with us. + */ + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + val = core << CM_GCR_Cx_OTHER_CORENUM_SHF; } @@ -288,10 +307,17 @@ void mips_cm_lock_other(unsigned int core, unsigned int vp) void mips_cm_unlock_other(void) { - unsigned curr_core = current_cpu_data.core; + unsigned int curr_core; + + if (mips_cm_revision() < CM_REV_CM3) { + curr_core = current_cpu_data.core; + spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + } else { + spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock), + *this_cpu_ptr(&cm_core_lock_flags)); + } - spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), - per_cpu(cm_core_lock_flags, curr_core)); preempt_enable(); } |