summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-12-05 13:34:46 +0100
committerIngo Molnar <mingo@kernel.org>2017-12-22 20:13:03 +0100
commitb5fc6d943808b570bdfbec80f40c6b3855f1c48b (patch)
tree2e6ddbeb5ad7a205c6c97c8ba0ca877051187ddb
parenta501686b2923ce6f2ff2b1d0d50682c6411baf72 (diff)
downloadlinux-b5fc6d943808b570bdfbec80f40c6b3855f1c48b.tar.gz
linux-b5fc6d943808b570bdfbec80f40c6b3855f1c48b.tar.bz2
linux-b5fc6d943808b570bdfbec80f40c6b3855f1c48b.zip
x86/mm: Remove superfluous barriers
atomic64_inc_return() already implies smp_mb() before and after. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/tlbflush.h8
1 files changed, 1 insertions, 7 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c2e45da4e540..3e2227386abe 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -60,19 +60,13 @@ static inline void invpcid_flush_all_nonglobals(void)
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
- u64 new_tlb_gen;
-
/*
* Bump the generation count. This also serves as a full barrier
* that synchronizes with switch_mm(): callers are required to order
* their read of mm_cpumask after their writes to the paging
* structures.
*/
- smp_mb__before_atomic();
- new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
- smp_mb__after_atomic();
-
- return new_tlb_gen;
+ return atomic64_inc_return(&mm->context.tlb_gen);
}
#ifdef CONFIG_PARAVIRT