summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/lib/copy_mc.c32
-rw-r--r--arch/x86/lib/copy_mc_64.S36
-rw-r--r--tools/objtool/check.c1
3 files changed, 9 insertions, 60 deletions
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index c13e8c9ee926..2633635530b7 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -45,8 +45,6 @@ void enable_copy_mc_fragile(void)
#define copy_mc_fragile_enabled (0)
#endif
-unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
-
/**
* copy_mc_to_kernel - memory copy that handles source exceptions
*
@@ -54,11 +52,9 @@ unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned
* @src: source address
* @len: number of bytes to copy
*
- * Call into the 'fragile' version on systems that benefit from avoiding
- * corner case poison consumption scenarios, For example, accessing
- * poison across 2 cachelines with a single instruction. Almost all
- * other uses case can use copy_mc_enhanced_fast_string() for a fast
- * recoverable copy, or fallback to plain memcpy.
+ * Call into the 'fragile' version on systems that have trouble
+ * actually do machine check recovery. Everyone else can just
+ * use memcpy().
*
* Return 0 for success, or number of bytes not copied if there was an
* exception.
@@ -67,8 +63,6 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
{
if (copy_mc_fragile_enabled)
return copy_mc_fragile(dst, src, len);
- if (static_cpu_has(X86_FEATURE_ERMS))
- return copy_mc_enhanced_fast_string(dst, src, len);
memcpy(dst, src, len);
return 0;
}
@@ -78,19 +72,11 @@ unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned
{
unsigned long ret;
- if (copy_mc_fragile_enabled) {
- __uaccess_begin();
- ret = copy_mc_fragile(dst, src, len);
- __uaccess_end();
- return ret;
- }
-
- if (static_cpu_has(X86_FEATURE_ERMS)) {
- __uaccess_begin();
- ret = copy_mc_enhanced_fast_string(dst, src, len);
- __uaccess_end();
- return ret;
- }
+ if (!copy_mc_fragile_enabled)
+ return copy_user_generic(dst, src, len);
- return copy_user_generic(dst, src, len);
+ __uaccess_begin();
+ ret = copy_mc_fragile(dst, src, len);
+ __uaccess_end();
+ return ret;
}
diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S
index 892d8915f609..c3b613c4544a 100644
--- a/arch/x86/lib/copy_mc_64.S
+++ b/arch/x86/lib/copy_mc_64.S
@@ -124,40 +124,4 @@ EXPORT_SYMBOL_GPL(copy_mc_fragile)
_ASM_EXTABLE(.L_write_words, .E_write_words)
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
#endif /* CONFIG_X86_MCE */
-
-/*
- * copy_mc_enhanced_fast_string - memory copy with exception handling
- *
- * Fast string copy + fault / exception handling. If the CPU does
- * support machine check exception recovery, but does not support
- * recovering from fast-string exceptions then this CPU needs to be
- * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any
- * machine check recovery support this version should be no slower than
- * standard memcpy.
- */
-SYM_FUNC_START(copy_mc_enhanced_fast_string)
- movq %rdi, %rax
- movq %rdx, %rcx
-.L_copy:
- rep movsb
- /* Copy successful. Return zero */
- xorl %eax, %eax
- ret
-SYM_FUNC_END(copy_mc_enhanced_fast_string)
-
- .section .fixup, "ax"
-.E_copy:
- /*
- * On fault %rcx is updated such that the copy instruction could
- * optionally be restarted at the fault position, i.e. it
- * contains 'bytes remaining'. A non-zero return indicates error
- * to copy_mc_generic() users, or indicate short transfers to
- * user-copy routines.
- */
- movq %rcx, %rax
- ret
-
- .previous
-
- _ASM_EXTABLE_FAULT(.L_copy, .E_copy)
#endif /* !CONFIG_UML */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 42ac19e0299c..cf2d076f6ba5 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -550,7 +550,6 @@ static const char *uaccess_safe_builtin[] = {
"csum_partial_copy_generic",
"copy_mc_fragile",
"copy_mc_fragile_handle_tail",
- "copy_mc_enhanced_fast_string",
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
NULL
};