summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/context.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-05-29 18:28:44 +0100
committerWill Deacon <will.deacon@arm.com>2015-07-27 11:08:40 +0100
commit4b3dc9679cf779339d9049800803dfc3c83433d1 (patch)
tree8c04c26a614240f3142b7afafc071719fc3ea0f5 /arch/arm64/mm/context.c
parent52da443ec4d0a807b720527eb474f9c2878cd671 (diff)
downloadlinux-4b3dc9679cf779339d9049800803dfc3c83433d1.tar.gz
linux-4b3dc9679cf779339d9049800803dfc3c83433d1.tar.bz2
linux-4b3dc9679cf779339d9049800803dfc3c83433d1.zip
arm64: force CONFIG_SMP=y and remove redundant #ifdefs
Nobody seems to be producing !SMP systems anymore, so this is just becoming a source of kernel bugs, particularly if people want to use coherent DMA with non-shared pages. This patch forces CONFIG_SMP=y for arm64, removing a modest amount of code in the process. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm/context.c')
-rw-r--r--arch/arm64/mm/context.c16
1 files changed, 0 insertions, 16 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 76c1e6cd36fc..d70ff14dbdbd 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -53,8 +53,6 @@ static void flush_context(void)
__flush_icache_all();
}
-#ifdef CONFIG_SMP
-
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
{
unsigned long flags;
@@ -110,23 +108,12 @@ static void reset_context(void *info)
cpu_switch_mm(mm->pgd, mm);
}
-#else
-
-static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
-{
- mm->context.id = asid;
- cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
-}
-
-#endif
-
void __new_context(struct mm_struct *mm)
{
unsigned int asid;
unsigned int bits = asid_bits();
raw_spin_lock(&cpu_asid_lock);
-#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from another
* CPU before we acquired the lock.
@@ -136,7 +123,6 @@ void __new_context(struct mm_struct *mm)
raw_spin_unlock(&cpu_asid_lock);
return;
}
-#endif
/*
* At this point, it is guaranteed that the current mm (with an old
* ASID) isn't active on any other CPU since the ASIDs are changed
@@ -155,10 +141,8 @@ void __new_context(struct mm_struct *mm)
cpu_last_asid = ASID_FIRST_VERSION;
asid = cpu_last_asid + smp_processor_id();
flush_context();
-#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
-#endif
cpu_last_asid += NR_CPUS - 1;
}