summaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-01-11 11:04:34 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2016-08-07 23:47:15 -0400
commit784d5699eddc55878627da20d3fe0c8542e2f1a2 (patch)
treec7fac7583c9730fb0c0e73ea905a97a487ec34bf /arch/x86/lib
parent22823ab419d8ed884195cfa75483fd3a99bb1462 (diff)
downloadlinux-784d5699eddc55878627da20d3fe0c8542e2f1a2.tar.gz
linux-784d5699eddc55878627da20d3fe0c8542e2f1a2.tar.bz2
linux-784d5699eddc55878627da20d3fe0c8542e2f1a2.zip
x86: move exports to actual definitions
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/checksum_32.S3
-rw-r--r--arch/x86/lib/clear_page_64.S2
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S2
-rw-r--r--arch/x86/lib/copy_page_64.S2
-rw-r--r--arch/x86/lib/copy_user_64.S8
-rw-r--r--arch/x86/lib/csum-partial_64.c1
-rw-r--r--arch/x86/lib/getuser.S5
-rw-r--r--arch/x86/lib/hweight.S3
-rw-r--r--arch/x86/lib/memcpy_64.S4
-rw-r--r--arch/x86/lib/memmove_64.S3
-rw-r--r--arch/x86/lib/memset_64.S3
-rw-r--r--arch/x86/lib/putuser.S5
-rw-r--r--arch/x86/lib/strstr_32.c3
13 files changed, 43 insertions, 1 deletions
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index c1e623209853..4d34bb548b41 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -28,6 +28,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
+#include <asm/export.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
@@ -251,6 +252,7 @@ ENTRY(csum_partial)
ENDPROC(csum_partial)
#endif
+EXPORT_SYMBOL(csum_partial)
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
@@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic)
#undef ROUND1
#endif
+EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index 65be7cfaf947..5e2af3a88cf5 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,6 +1,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
/*
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
@@ -23,6 +24,7 @@ ENTRY(clear_page)
rep stosq
ret
ENDPROC(clear_page)
+EXPORT_SYMBOL(clear_page)
ENTRY(clear_page_orig)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index ad5349778490..03a186fc06ea 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
.text
@@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu)
ret
ENDPROC(cmpxchg8b_emu)
+EXPORT_SYMBOL(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 24ef1c2104d4..e8508156c99d 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
/*
* Some CPUs run faster using the string copy instructions (sane microcode).
@@ -17,6 +18,7 @@ ENTRY(copy_page)
rep movsq
ret
ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
ENTRY(copy_page_regs)
subq $2*8, %rsp
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index bf603ebbfd8e..d376e4b48f88 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -14,6 +14,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
@@ -29,6 +30,7 @@ ENTRY(_copy_to_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_to_user)
+EXPORT_SYMBOL(_copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
@@ -44,6 +46,8 @@ ENTRY(_copy_from_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_from_user)
+EXPORT_SYMBOL(_copy_from_user)
+
.section .fixup,"ax"
/* must zero dest */
@@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
ENDPROC(copy_user_generic_unrolled)
+EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible.
@@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
ENDPROC(copy_user_generic_string)
+EXPORT_SYMBOL(copy_user_generic_string)
/*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
@@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string)
_ASM_EXTABLE(1b,12b)
ENDPROC(copy_user_enhanced_fast_string)
+EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
* copy_user_nocache - Uncached memory copy with exception handling
@@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
+EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 9a7fe6a70491..378e5d5bf9b1 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
return (__force __wsum)add32_with_carry(do_csum(buff, len),
(__force u32)sum);
}
+EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 0ef5128c2de8..37b62d412148 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -32,6 +32,7 @@
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
.text
ENTRY(__get_user_1)
@@ -44,6 +45,7 @@ ENTRY(__get_user_1)
ASM_CLAC
ret
ENDPROC(__get_user_1)
+EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
add $1,%_ASM_AX
@@ -57,6 +59,7 @@ ENTRY(__get_user_2)
ASM_CLAC
ret
ENDPROC(__get_user_2)
+EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
add $3,%_ASM_AX
@@ -70,6 +73,7 @@ ENTRY(__get_user_4)
ASM_CLAC
ret
ENDPROC(__get_user_4)
+EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
#ifdef CONFIG_X86_64
@@ -97,6 +101,7 @@ ENTRY(__get_user_8)
ret
#endif
ENDPROC(__get_user_8)
+EXPORT_SYMBOL(__get_user_8)
bad_get_user:
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index 02de3d74d2c5..9d4ca9207427 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm/export.h>
#include <asm/asm.h>
@@ -32,6 +33,7 @@ ENTRY(__sw_hweight32)
__ASM_SIZE(pop,) %__ASM_REG(dx)
ret
ENDPROC(__sw_hweight32)
+EXPORT_SYMBOL(__sw_hweight32)
ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64
@@ -75,3 +77,4 @@ ENTRY(__sw_hweight64)
ret
#endif
ENDPROC(__sw_hweight64)
+EXPORT_SYMBOL(__sw_hweight64)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 2ec0b0abbfaa..94c917af9688 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,6 +4,7 @@
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
@@ -40,6 +41,8 @@ ENTRY(memcpy)
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(__memcpy)
/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
@@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe)
xorq %rax, %rax
ret
ENDPROC(memcpy_mcsafe)
+EXPORT_SYMBOL_GPL(memcpy_mcsafe)
.section .fixup, "ax"
/* Return -EFAULT for any failure */
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 90ce01bee00c..15de86cd15b0 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
#undef memmove
@@ -207,3 +208,5 @@ ENTRY(__memmove)
retq
ENDPROC(__memmove)
ENDPROC(memmove)
+EXPORT_SYMBOL(__memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index e1229ecd2a82..55b95db30a61 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
.weak memset
@@ -43,6 +44,8 @@ ENTRY(__memset)
ret
ENDPROC(memset)
ENDPROC(__memset)
+EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index c891ece81e5b..cd5d716d2897 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -15,6 +15,7 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
/*
@@ -43,6 +44,7 @@ ENTRY(__put_user_1)
xor %eax,%eax
EXIT
ENDPROC(__put_user_1)
+EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
ENTER
@@ -55,6 +57,7 @@ ENTRY(__put_user_2)
xor %eax,%eax
EXIT
ENDPROC(__put_user_2)
+EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
ENTER
@@ -67,6 +70,7 @@ ENTRY(__put_user_4)
xor %eax,%eax
EXIT
ENDPROC(__put_user_4)
+EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
ENTER
@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
xor %eax,%eax
EXIT
ENDPROC(__put_user_8)
+EXPORT_SYMBOL(__put_user_8)
bad_put_user:
movl $-EFAULT,%eax
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 8e2d55f754bf..a03b1c750bfe 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -1,4 +1,5 @@
#include <linux/string.h>
+#include <linux/export.h>
char *strstr(const char *cs, const char *ct)
{
@@ -28,4 +29,4 @@ __asm__ __volatile__(
: "dx", "di");
return __res;
}
-
+EXPORT_SYMBOL(strstr);